reponame
stringlengths
2
39
files
list
median_score
float64
0
11.5
matteotenca
[ { "content": "\"\"\"Contains the OneDrive object class to interact through the Graph API using the Python package Graph-OneDrive.\n\"\"\"\nfrom __future__ import annotations\n\nimport asyncio\nimport logging\nimport os\nimport re\nimport secrets\nimport shutil\nimport tempfile\nimport urllib.parse\nimport warnings\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom datetime import timezone\nfrom json.decoder import JSONDecodeError\nfrom pathlib import Path\nfrom time import sleep\nfrom typing import Any\nfrom typing import Optional\n\nimport aiofiles\nimport httpx\n\nfrom graph_onedrive.__init__ import __version__\nfrom graph_onedrive._config import dump_config\nfrom graph_onedrive._config import load_config\nfrom graph_onedrive._decorators import token_required\n\n\n# Set logger\nlogger = logging.getLogger(__name__)\n\n\nclass GraphAPIError(Exception):\n \"\"\"Exception raised when Graph API returns an error status.\"\"\"\n\n pass\n\n\nclass OneDrive:\n \"\"\"Creates an instance to interact with Microsoft's OneDrive platform through their Graph API.\n Positional arguments:\n client_id (str) -- Azure app client id\n client_secret (str) -- Azure app client secret\n Keyword arguments:\n tenant (str) -- Azure app org tenant id number, use default if multi-tenant (default = \"common\")\n redirect_url (str) -- Authentication redirection url (default = \"http://localhost:8080\")\n refresh_token (str) -- optional token from previous session (default = None)\n Attributes:\n refresh_token (str) -- single-use token to supply when recreating the instance to skip authorization\n Constructor methods:\n from_dict -- create an instance from a dictionary\n from_file -- create an instance from a config file\n to_file -- export an instance configuration to a config file\n Methods:\n get_usage -- account current usage and total capacity\n list_directory -- lists all of the items and their attributes within a directory\n search -- list items matching a seearch query\n detail_item -- get item details by item id\n detail_item_path -- get item details by drive path\n item_type -- get item type, folder or file\n is_folder -- check if an item is a folder\n is_file -- check if an item is a file\n create_share_link -- create a sharing link for a file or folder\n make_folder -- creates a folder\n move_item -- moves an item\n copy_item -- copies an item\n rename_item -- renames an item\n delete_item -- deletes an item\n download_file -- downloads a file to the working directory\n upload_file -- uploads a file\n \"\"\"\n\n # Set class constants for the Graph API\n _API_VERSION = \"v1.0\"\n _API_URL = \"https://graph.microsoft.com/\" + _API_VERSION + \"/\"\n _AUTH_BASE_URL = \"https://login.microsoftonline.com/\"\n _AUTH_ENDPOINT = \"/oauth2/v2.0/\"\n\n def __init__(\n self,\n client_id: str,\n client_secret: str,\n tenant: str = \"common\",\n redirect_url: str = \"http://localhost:8080\",\n refresh_token: str | None = None,\n ) -> None:\n # Set private attributes after checking types\n if not isinstance(client_id, str):\n raise TypeError(\n f\"client_id expected 'str', got {type(client_id).__name__!r}\"\n )\n self._client_id = client_id\n if not isinstance(client_secret, str):\n raise TypeError(\n f\"client_secret expected 'str', got {type(client_secret).__name__!r}\"\n )\n self._client_secret = client_secret\n if not isinstance(tenant, str):\n raise TypeError(f\"tenant expected 'str', got {type(tenant).__name__!r}\")\n self._tenant_id = tenant\n self._auth_url = self._AUTH_BASE_URL + self._tenant_id + self._AUTH_ENDPOINT\n self._scope = \"offline_access files.readwrite\"\n if not isinstance(redirect_url, str):\n raise TypeError(\n f\"redirect_url expected 'str', got {type(redirect_url).__name__!r}\"\n )\n self._redirect = redirect_url\n self._drive_path = \"me/drive/\"\n self._api_drive_url = self._API_URL + self._drive_path\n self._access_token = \"\"\n self._access_expires = 0.0\n # Set public attributes\n if refresh_token:\n if not isinstance(refresh_token, str):\n raise TypeError(\n f\"refresh_token expected 'str', got {type(refresh_token).__name__!r}\"\n )\n self.refresh_token: str = refresh_token\n else:\n self.refresh_token = \"\"\n # Initiate generation of authorization tokens\n self._get_token()\n self._create_headers()\n # Set additional attributes from the server\n self._get_drive_details()\n logger.debug(\n f\"Graph-OneDrive version={__version__}, client_id={client_id}, tenant={tenant}\"\n )\n\n def __repr__(self) -> str:\n return f\"<OneDrive {self._drive_type} {self._drive_name} {self._owner_name}>\"\n\n @classmethod\n def from_dict(cls, config: dict[str, Any]) -> OneDrive:\n \"\"\"Create an instance of the OneDrive class from a dictionary.\n Keyword arguments:\n config (dict) -- dictionary containing at minimum tenant_id, client_id, client_secret_value\n Returns:\n onedrive_instance (OneDrive) -- OneDrive object instance\n \"\"\"\n # Check config contents\n try:\n tenant_id = config[\"tenant_id\"]\n except KeyError:\n raise KeyError(\"expected tenant_id in first level of dictionary\")\n try:\n client_id = config[\"client_id\"]\n except KeyError:\n raise KeyError(\"expected client_id in first level of dictionary\")\n try:\n client_secret = config[\"client_secret_value\"]\n except KeyError:\n raise KeyError(\"expected client_secret_value in first level of dictionary\")\n try:\n redirect_url = config[\"redirect_url\"]\n except KeyError:\n redirect_url = \"http://localhost:8080\"\n try:\n refresh_token = config[\"refresh_token\"]\n except KeyError:\n refresh_token = None\n # Create OneDrive object instance\n return cls(\n client_id=client_id,\n client_secret=client_secret,\n tenant=tenant_id,\n redirect_url=redirect_url,\n refresh_token=refresh_token,\n )\n\n @classmethod\n def from_file(\n cls,\n file_path: str | Path = \"config.json\",\n config_key: str = \"onedrive\",\n save_refresh_token: bool = False,\n ) -> OneDrive:\n \"\"\"Create an instance of the OneDrive class from a config file.\n Keyword arguments:\n file_path (str|Path) -- path to configuration file (default = \"config.json\")\n config_key (str) -- key of the item storing the configuration (default = \"onedrive\")\n save_refresh_token (bool) -- save the refresh token back to the config file during instance initiation (default = False)\n Returns:\n onedrive_instance (OneDrive) -- OneDrive object instance\n \"\"\"\n # Check types\n if not isinstance(file_path, str) and not isinstance(file_path, Path):\n raise TypeError(\n f\"config_path expected 'str' or 'Path', got {type(file_path).__name__!r}\"\n )\n if not isinstance(config_key, str):\n raise TypeError(\n f\"config_key expected 'str', got {type(config_key).__name__!r}\"\n )\n # Read configuration from config file\n config_path = Path(file_path)\n logger.info(f\"reading OneDrive configuration from {config_path.name}\")\n config = load_config(config_path, config_key)\n # Create the instance\n onedrive_instance = cls.from_dict(config)\n # Get refresh token from instance and update config file\n if save_refresh_token:\n logger.info(\"saving refresh token\")\n config[\"refresh_token\"] = onedrive_instance.refresh_token\n dump_config(config, config_path, config_key)\n\n # Return the OneDrive instance\n return onedrive_instance\n\n def to_file(\n self, file_path: str | Path = \"config.json\", config_key: str = \"onedrive\"\n ) -> None:\n \"\"\"Save the configuration to a config file.\n Keyword arguments:\n file_path (str|Path) -- path to configuration file (default = \"config.json\")\n config_key (str) -- key of the item storing the configuration (default = \"onedrive\")\n \"\"\"\n # Check types\n if not isinstance(file_path, str) and not isinstance(file_path, Path):\n raise TypeError(\n f\"file_path expected 'str' or 'Path', got {type(file_path).__name__!r}\"\n )\n if not isinstance(config_key, str):\n raise TypeError(\n f\"config_key expected 'str', got {type(config_key).__name__!r}\"\n )\n # Set the new configuration\n config = {\n \"tenant_id\": self._tenant_id,\n \"client_id\": self._client_id,\n \"client_secret_value\": self._client_secret,\n \"redirect_url\": self._redirect,\n \"refresh_token\": self.refresh_token,\n }\n # Save the configuration to config file\n dump_config(config, file_path, config_key)\n logger.info(f\"saved config to '{file_path}' with key '{config_key}'\")\n\n @staticmethod\n def _raise_unexpected_response(\n response: httpx.Response,\n expected: int | list[int] = 200,\n message: str = \"could not complete request\",\n has_json: bool = False,\n ) -> None:\n \"\"\"INTERNAL: Checks a API HTTPX Response object status and raises an exception if not as expected.\n Positional arguments:\n response (Response) -- HTTPX response object\n Keyword arguments:\n expected (int|[int]) -- valid status checks expected (default = 200)\n message (str) -- exception message to display (default = \"could not complete request\")\n has_json (bool) -- check the response has json (default = False)\n \"\"\"\n # Ensure expected is a list\n expected = expected if isinstance(expected, list) else [expected]\n # Check the status code\n if response.status_code not in expected:\n # Try get the api error message and raise an exception\n graph_error = \"no error message returned\"\n if response.headers.get(\"content-type\") == \"application/json\":\n api_error = response.json().get(\"error\", {}).get(\"message\")\n auth_error = response.json().get(\"error_description\")\n if api_error:\n graph_error = api_error\n elif auth_error:\n graph_error = auth_error\n logger.debug(f\"response_json={response.json()}\")\n logger.debug(\n f\"expected_codes={expected}, response_code={response.status_code}, package_error={message}\"\n )\n raise GraphAPIError(f\"{message} ({graph_error})\")\n # Check response has json\n if has_json:\n try:\n response.json()\n except JSONDecodeError:\n graph_error = \"response did not contain json\"\n raise GraphAPIError(f\"{message} ({graph_error})\")\n\n def _get_token(self) -> None:\n \"\"\"INTERNAL: Get access and refresh tokens from the Graph API.\n Calls get_authorization function if an existing refresh token (from a previous session) is not provided.\n \"\"\"\n request_url = self._auth_url + \"token\"\n # Generate request body as an url encoded query\n query = {\n \"client_id\": self._client_id,\n \"scope\": self._scope,\n \"redirect_uri\": self._redirect,\n \"client_secret\": self._client_secret,\n }\n\n # Set grant type\n # If no refresh token provided, get new authorization code\n if self.refresh_token != \"\":\n query[\"grant_type\"] = \"refresh_token\"\n query[\"refresh_token\"] = self.refresh_token\n else:\n query[\"grant_type\"] = \"authorization_code\"\n query[\"code\"] = self._get_authorization()\n\n # Set the header and encode the query\n headers = {\"content-type\": \"application/x-www-form-urlencoded\"}\n query_encoded = urllib.parse.urlencode(query, encoding=\"utf-8\")\n logger.debug(f\"token request query={query_encoded}\")\n\n # Make the request\n logger.info(f\"requesting access and refresh tokens from {request_url}\")\n response = httpx.post(request_url, headers=headers, content=query_encoded)\n\n # Check and parse the response\n self._raise_unexpected_response(\n response, 200, \"could not get access token\", has_json=True\n )\n response_data = response.json()\n\n # Set the access and refresh tokens to the instance attributes\n if not response_data.get(\"access_token\"):\n logger.error(\"response did not return an access token\")\n raise GraphAPIError(\"response did not return an access token\")\n self._access_token = response_data[\"access_token\"]\n\n if response_data.get(\"refresh_token\"):\n self.refresh_token = response_data[\"refresh_token\"]\n else:\n logger.warning(\n \"token request did not return a refresh token, existing config not updated\"\n )\n\n # Set an expiry time, removing 60 seconds assumed for processing\n expires = response_data.get(\"expires_in\", 660) - 60\n expires = datetime.now() + timedelta(seconds=expires)\n logger.info(f\"access token expires: {expires}\")\n self._access_expires = datetime.timestamp(expires)\n\n def _get_authorization(self) -> str:\n \"\"\"INTERNAL: Get authorization code by generating a url for the user to authenticate and authorize the app with.\n The user then return the response manually for the function to then extract the authorization code from.\n The authorization code has a short life and should only be used to generate access and refresh tokens.\n Returns:\n authorization_code (str) -- Graph API authorization code valid once for about 10 mins\n \"\"\"\n # Create state used for check\n alphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\" # = string.ascii_letters + string.digits\n state = \"\".join(secrets.choice(alphabet) for _ in range(10))\n # Generate request url\n request_url = self._auth_url + \"authorize\"\n request_url += \"?client_id=\" + self._client_id\n request_url += \"&response_type=code\"\n request_url += \"&redirect_uri=\" + urllib.parse.quote(self._redirect, safe=\"\")\n request_url += \"&response_mode=query\"\n request_url += \"&scope=\" + urllib.parse.quote(self._scope)\n request_url += \"&state=\" + state\n # Make request (manually)\n print(\"Manual app authorization required.\")\n print(\"Step 1: Copy the below URL and paste into a web browser.\")\n print(\"AUTHORIZATION URL --------------\")\n print(request_url)\n print(\"--------------------------------\")\n print(\"Step 2: Authorize the app using your account.\")\n print(\"You will be redirected (potentially to an error page - this is normal).\")\n print(\"Step 3: Copy the entire response URL address.\")\n response = input(\"Step 4: paste the response here: \").strip()\n # Verify the state which ensures the response is for this request\n return_state = re.search(\"[?|&]state=([^&]+)\", response)\n if return_state:\n if return_state.group(1) != state:\n error_message = \"response 'state' not for this request, occurs when reusing an old authorization url\"\n logger.error(error_message)\n raise GraphAPIError(error_message)\n else:\n logger.warning(\n \"response 'state' was not in returned url, response not confirmed\"\n )\n # Extract the code from the response\n authorization_code_re = re.search(\"[?|&]code=([^&]+)\", response)\n if authorization_code_re is None:\n error_message = \"response did not contain an authorization code\"\n logger.error(error_message)\n raise GraphAPIError(error_message)\n authorization_code = authorization_code_re.group(1)\n # Return the authorization code to be used to get tokens\n return authorization_code\n\n def _create_headers(self) -> None:\n \"\"\"INTERNAL: Create headers for the http request to the Graph API.\"\"\"\n if self._access_token == \"\":\n raise ValueError(\"expected self._access_token to be set, got empty string\")\n self._headers = {\n \"Accept\": \"*/*\",\n \"Authorization\": \"Bearer \" + self._access_token,\n }\n\n @token_required\n def _get_drive_details(self) -> None:\n \"\"\"INTERNAL: Gets the drive details\"\"\"\n # Generate request url\n request_url = self._api_drive_url\n response = httpx.get(request_url, headers=self._headers)\n self._raise_unexpected_response(\n response, 200, \"could not get drive details\", has_json=True\n )\n response_data = response.json()\n # Set drive details\n self._drive_id = response_data.get(\"id\")\n self._drive_name = response_data.get(\"name\")\n self._drive_type = response_data.get(\"driveType\")\n response_data_user = response_data.get(\"owner\", {}).get(\"user\", {})\n self._owner_id = response_data_user.get(\"id\")\n self._owner_email = response_data_user.get(\"email\")\n self._owner_name = response_data_user.get(\"displayName\")\n response_data_quota = response_data.get(\"quota\", {})\n self._quota_used = response_data_quota.get(\"used\")\n self._quota_remaining = response_data_quota.get(\"remaining\")\n self._quota_total = response_data_quota.get(\"total\")\n\n @token_required\n def get_usage(\n self, unit: str = \"gb\", refresh: bool = False, verbose: bool = False\n ) -> tuple[float, float, str]:\n \"\"\"Get the current usage and capacity of the connected OneDrive.\n Keyword arguments:\n unit (str) -- unit to return value [\"b\", \"kb\", \"mb\", \"gb\"] (default = \"gb\")\n refresh (bool) -- refresh the usage data (default = False)\n verbose (bool) -- print the usage (default = False)\n Returns:\n used (float) -- storage used in unit requested\n capacity (float) -- storage capacity in unit requested\n units (str) -- unit of usage\n \"\"\"\n # Validate unit\n if not isinstance(unit, str):\n raise TypeError(f\"unit expected 'str', got {type(unit).__name__!r}\")\n unit = unit.lower()\n if unit not in (\"b\", \"kb\", \"mb\", \"gb\"):\n raise ValueError(f\"{unit!r} is not a supported unit\")\n # Refresh drive details\n if refresh:\n self._get_drive_details()\n # Read usage values\n used = self._quota_used\n capacity = self._quota_total\n # Convert to requested unit unit\n if unit == \"gb\":\n used = round(used / (1024 * 1024 * 1024), 1)\n capacity = round(capacity / (1024 * 1024 * 1024), 1)\n elif unit == \"mb\":\n used = round(used / (1024 * 1024), 1)\n capacity = round(capacity / (1024 * 1024), 1)\n elif unit == \"kb\":\n used = round(used / (1024), 1)\n capacity = round(capacity / (1024), 1)\n else:\n unit = \"b\"\n # Print usage\n if verbose:\n print(\n f\"Using {used} {unit} ({used*100/capacity:.2f}%) of total {capacity} {unit}.\"\n )\n # Return usage and capacity in requested units\n return used, capacity, unit\n\n @token_required\n def list_directory(\n self, folder_id: str | None = None, verbose: bool = False\n ) -> list[dict[str, object]]:\n \"\"\"List the files and folders within the input folder/root of the connected OneDrive.\n Keyword arguments:\n folder_id (str) -- the item id of the folder to look into, None being the root directory (default = None)\n verbose (bool) -- print the items along with their ids (default = False)\n Returns:\n items (dict) -- details of all the items within the requested directory\n \"\"\"\n # Check if folder id was provided and create the request url\n if folder_id:\n if not isinstance(folder_id, str):\n raise TypeError(\n f\"folder_id expected 'str', got {type(folder_id).__name__!r}\"\n )\n request_url = self._api_drive_url + \"items/\" + folder_id + \"/children\"\n else:\n request_url = self._api_drive_url + \"root/children\"\n # Make the Graph API request\n items_list = []\n while True:\n response = httpx.get(request_url, headers=self._headers)\n # Validate request response and parse\n self._raise_unexpected_response(\n response, 200, \"directory could not be listed\", has_json=True\n )\n response_data = response.json()\n # Add the items to the item list\n items_list += response_data.get(\"value\", {})\n # Break if these is no next link, else set the request link\n if response_data.get(\"@odata.nextLink\") is None:\n break\n else:\n request_url = response_data[\"@odata.nextLink\"]\n # Print the items in the directory along with their item ids\n if verbose:\n for item in items_list:\n print(item[\"id\"], item[\"name\"])\n # Return the items dictionary\n return items_list\n\n @token_required\n def search(\n self, query: str, top: int = -1, verbose: bool = False\n ) -> list[dict[str, object]]:\n \"\"\"List files and folders matching a search query.\n Positional arguments:\n query (str) -- search query string\n Keyword arguments:\n top (int) -- limits the results list length, use -1 to not limit (default = -1)\n verbose (bool) -- print the items along with their ids (default = False)\n Returns:\n items (dict) -- details of all the top items matching the search query\n \"\"\"\n # Validate attributes\n if not isinstance(query, str):\n raise TypeError(f\"query expected 'str', got {type(query).__name__!r}\")\n if not isinstance(top, int):\n raise TypeError(f\"top expected 'int', got {type(top).__name__!r}\")\n if query in (\"\", \" \", \"%20\"):\n raise ValueError(\n \"cannot search for blank string. Did you mean list_directory(folder_id=None)?\"\n )\n # Build and make the request\n request_url = self._api_drive_url + f\"root/search(q='{query}')\"\n if top >= 1:\n request_url += f\"?$top={top}\"\n # Make the Graph API request\n items_list = []\n while True:\n response = httpx.get(request_url, headers=self._headers)\n # Validate request response and parse\n self._raise_unexpected_response(\n response, 200, \"search could not complete\", has_json=True\n )\n response_data = response.json()\n # Add the items to the item list\n items_list += response_data.get(\"value\", {})\n # Break if these is no next link, else set the request link\n if len(items_list) >= top or response_data.get(\"@odata.nextLink\") is None:\n break\n else:\n request_url = response_data[\"@odata.nextLink\"]\n # Print the items in the directory along with their item ids\n if verbose:\n for item in items_list:\n if \"folder\" in item:\n print(item[\"id\"], \"folder\", item[\"name\"])\n else:\n print(item[\"id\"], \"file \", item[\"name\"])\n # Return the items dictionary\n return items_list\n\n @token_required\n def detail_item(self, item_id: str, verbose: bool = False) -> dict[str, Any]:\n \"\"\"Retrieves the metadata for an item.\n Positional arguments:\n item_id (str) -- item id of the folder or file\n Keyword arguments:\n verbose (bool) -- print the main parts of the item metadata (default = False)\n Returns:\n item_details (dict) -- metadata of the requested item\n \"\"\"\n # Validate item id\n if not isinstance(item_id, str):\n raise TypeError(f\"item_id expected 'str', got {type(item_id).__name__!r}\")\n # Create request url based on input item id\n request_url = self._api_drive_url + \"items/\" + item_id\n # Make the Graph API request\n response = httpx.get(request_url, headers=self._headers)\n # Validate request response and parse\n self._raise_unexpected_response(\n response, 200, \"item could not be detailed\", has_json=True\n )\n response_data = response.json()\n # Print the item details\n if verbose:\n self._print_item_details(response_data)\n # Return the item details\n return response_data\n\n @token_required\n def detail_item_path(self, item_path: str, verbose: bool = False) -> dict[str, Any]:\n \"\"\"Retrieves the metadata for an item from a web path.\n Positional arguments:\n item_path (str) -- web path of the drive folder or file\n Keyword arguments:\n verbose (bool) -- print the main parts of the item metadata (default = False)\n Returns:\n item_details (dict) -- metadata of the requested item\n \"\"\"\n # Validate item id\n if not isinstance(item_path, str):\n raise TypeError(\n f\"item_path expected 'str', got {type(item_path).__name__!r}\"\n )\n # Create request url based on input item id\n if item_path[0] != \"/\":\n item_path = \"/\" + item_path\n request_url = self._api_drive_url + \"root:\" + item_path\n # Make the Graph API request\n response = httpx.get(request_url, headers=self._headers)\n # Validate request response and parse\n self._raise_unexpected_response(\n response, 200, \"item could not be detailed\", has_json=True\n )\n response_data = response.json()\n # Print the item details\n if verbose:\n self._print_item_details(response_data)\n # Return the item details\n return response_data\n\n def _print_item_details(self, item_details: dict[str, Any]) -> None:\n \"\"\"INTERNAL: Prints the details of an item.\n Positional arguments:\n item_details (dict) -- item details in a dictionary format, typically from detail_item method\n \"\"\"\n print(\"item id:\", item_details.get(\"id\"))\n print(\"name:\", item_details.get(\"name\"))\n if \"folder\" in item_details:\n print(\"type:\", \"folder\")\n elif \"file\" in item_details:\n print(\"type:\", \"file\")\n print(\n \"created:\",\n item_details.get(\"createdDateTime\"),\n \"by:\",\n item_details.get(\"createdBy\", {}).get(\"user\", {}).get(\"displayName\"),\n )\n print(\n \"last modified:\",\n item_details.get(\"lastModifiedDateTime\"),\n \"by:\",\n item_details.get(\"lastModifiedBy\", {}).get(\"user\", {}).get(\"displayName\"),\n )\n print(\"size:\", item_details.get(\"size\"))\n print(\"web url:\", item_details.get(\"webUrl\"))\n file_system_info = item_details.get(\"fileSystemInfo\", {})\n print(\"file system created:\", file_system_info.get(\"createdDateTime\"))\n print(\n \"file system last modified:\",\n file_system_info.get(\"lastModifiedDateTime\"),\n )\n if \"file\" in item_details.keys():\n hashes = item_details[\"file\"].get(\"hashes\")\n if isinstance(hashes, dict):\n for key, value in hashes.items():\n print(f\"file {key.replace('Hash', '')} hash:\", value)\n if \"folder\" in item_details.keys():\n print(\"child count:\", item_details[\"folder\"].get(\"childCount\"))\n\n def item_type(self, item_id: str) -> str:\n \"\"\"Returns the item type in str format.\n Positional arguments:\n item_id (str) -- item id of the folder or file\n Returns:\n type (str) -- \"folder\" or \"file\"\n \"\"\"\n item_details = self.detail_item(item_id)\n if \"folder\" in item_details:\n return \"folder\"\n else:\n return \"file\"\n\n def is_folder(self, item_id: str) -> bool:\n \"\"\"Checks if an item is a folder.\n Positional arguments:\n item_id (str) -- item id of the folder or file\n Returns:\n folder (bool) -- True if folder, else false.\n \"\"\"\n item_type = self.item_type(item_id)\n if item_type == \"folder\":\n return True\n else:\n return False\n\n def is_file(self, item_id: str) -> bool:\n \"\"\"Checks if an item is a file.\n Positional arguments:\n item_id (str) -- item id of the folder or file\n Returns:\n file (bool) -- True if file, else false.\n \"\"\"\n item_type = self.item_type(item_id)\n if item_type == \"file\":\n return True\n else:\n return False\n\n @token_required\n def create_share_link(\n self,\n item_id: str,\n link_type: str = \"view\",\n password: str | None = None,\n expiration: datetime | None = None,\n scope: str = \"anonymous\",\n ) -> str:\n \"\"\"Creates a basic sharing link for an item.\n Positional arguments:\n item_id (str) -- item id of the folder or file\n Keyword arguments:\n link_type (str) -- type of sharing link to create, either \"view\", \"edit\", or (\"embed\" for OneDrive personal only) (default = \"view\")\n password (str) -- password for the sharing link (OneDrive personal only) (default = None)\n expiration (datetime) -- expiration of the sharing link, computer local timezone assumed for 'native' datetime objects (default = None)\n scope (str) -- \"anonymous\" for anyone with the link, or (\"organization\" to limit to the tenant for OneDrive Business) (default = \"anonymous\")\n Returns:\n link (str) -- typically a web link, html iframe if link_type=\"embed\"\n \"\"\"\n # Verify type\n if not isinstance(link_type, str):\n raise TypeError(\n f\"link_type expected 'str', got {type(link_type).__name__!r}\"\n )\n elif link_type not in (\"view\", \"edit\", \"embed\"):\n raise ValueError(\n f\"link_type expected 'view', 'edit', or 'embed', got '{link_type}'\"\n )\n elif link_type == \"embed\" and self._drive_type != \"personal\":\n raise ValueError(\n f\"link_type='embed' is not available for {self._drive_type} OneDrive accounts\"\n )\n # Verify password\n if password is not None and not isinstance(password, str):\n raise TypeError(\n f\"password expected type 'str', got {type(password)}.__name__!r\"\n )\n elif password is not None and self._drive_type != \"personal\":\n raise ValueError(\n f\"password is not available for {self._drive_type} OneDrive accounts\"\n )\n # Verify expiration\n if expiration is not None and not isinstance(expiration, datetime):\n raise TypeError(\n f\"expiration expected type 'datetime.datetime', got {type(expiration).__name__!r}\"\n )\n elif expiration is not None and datetime.now(\n timezone.utc\n ) > expiration.astimezone(timezone.utc):\n raise ValueError(\"expiration can not be in the past\")\n # Verify scope\n if not isinstance(scope, str):\n raise TypeError(f\"scope expected type 'str', got {type(scope).__name__!r}\")\n elif scope not in (\"anonymous\", \"organization\"):\n raise ValueError(\n f\"scope expected 'anonymous' or 'organization', got {scope}\"\n )\n elif scope == \"organization\" and self._drive_type not in (\n \"business\",\n \"sharepoint\",\n ):\n raise ValueError(\n f\"scope='organization' is not available for {self._drive_type} OneDrive accounts\"\n )\n # Create the request url\n request_url = self._api_drive_url + \"items/\" + item_id + \"/createLink\"\n # Create the body\n body = {\"type\": link_type, \"scope\": scope}\n # Add link password to body if it exists\n if password is not None and password != \"\":\n body[\"password\"] = password\n # Add link expiration to body if it exists\n if expiration is not None:\n expiration_iso = (\n expiration.astimezone(timezone.utc)\n .isoformat(timespec=\"seconds\")\n .replace(\"+00:00\", \"Z\")\n )\n body[\"expirationDateTime\"] = expiration_iso\n # Make the request\n response = httpx.post(request_url, headers=self._headers, json=body)\n # Verify and parse the response\n self._raise_unexpected_response(\n response, [200, 201], \"share link could not be created\", has_json=True\n )\n response_data = response.json()\n # Extract the html iframe or link and return it\n if link_type == \"embed\":\n html_iframe = response_data.get(\"link\", {}).get(\"webHtml\")\n return html_iframe\n else:\n share_link = response_data.get(\"link\", {}).get(\"webUrl\")\n return share_link\n\n @token_required\n def make_folder(\n self,\n folder_name: str,\n parent_folder_id: str | None = None,\n check_existing: bool = True,\n if_exists: str = \"rename\",\n ) -> str:\n \"\"\"Creates a new folder within the input folder/root of the connected OneDrive.\n Positional arguments:\n folder_name (str) -- the name of the new folder\n Keyword arguments:\n parent_folder_id (str) -- the item id of the parent folder, None being the root directory (default = None)\n check_existing (bool) -- checks parent and returns folder_id if a matching folder already exists (default = True)\n if_exists (str) -- if check_existing is set to False; action to take if the new folder already exists [fail, replace, rename] (default = \"rename\")\n Returns:\n folder_id (str) -- newly created folder item id\n \"\"\"\n # Validate folder_name\n if not isinstance(folder_name, str):\n raise TypeError(\n f\"folder_name expected 'str', got {type(folder_name).__name__!r}\"\n )\n # Validate parent_folder_id\n if parent_folder_id and not isinstance(parent_folder_id, str):\n raise TypeError(\n f\"parent_folder_id expected 'str', got {type(parent_folder_id).__name__!r}\"\n )\n # Set conflict behavior\n conflict_behavior = if_exists\n if conflict_behavior not in (\"fail\", \"replace\", \"rename\"):\n raise ValueError(\n f\"if_exists expected 'fail', 'replace', or 'rename', got {if_exists!r}\"\n )\n # Create request url based on input parent folder\n if parent_folder_id:\n request_url = (\n self._api_drive_url + \"items/\" + parent_folder_id + \"/children\"\n )\n else:\n request_url = self._api_drive_url + \"root/children\"\n # Check if folder already exists\n if check_existing:\n items = self.list_directory(parent_folder_id)\n for item in items:\n if item.get(\"name\") == folder_name and \"folder\" in item:\n return item[\"id\"]\n # Create the request body\n body = {\n \"name\": folder_name,\n \"folder\": {},\n \"@microsoft.graph.conflictBehavior\": conflict_behavior,\n }\n # Make the Graph API request\n response = httpx.post(request_url, headers=self._headers, json=body)\n # Validate request response and parse\n self._raise_unexpected_response(\n response, 201, \"folder not created\", has_json=True\n )\n response_data = response.json()\n folder_id = response_data[\"id\"]\n # Return the folder item id\n return folder_id\n\n @token_required\n def move_item(\n self, item_id: str, new_folder_id: str, new_name: str | None = None\n ) -> tuple[str, str]:\n \"\"\"Moves an item (folder/file) within the connected OneDrive. Optionally rename an item at the same time.\n Positional arguments:\n item_id (str) -- item id of the folder or file to move\n new_folder_id (str) -- item id of the folder to shift the item to\n Keyword arguments:\n new_name (str) -- optional new item name with extension (default = None)\n Returns:\n item_id (str) -- item id of the folder or file that was moved, should match input item id\n folder_id (str) -- item id of the new parent folder, should match input folder id\n \"\"\"\n # Validate item_id\n if not isinstance(item_id, str):\n raise TypeError(f\"item_id expected 'str', got {type(item_id).__name__!r}\")\n # Validate new_folder_id\n if not isinstance(new_folder_id, str):\n raise TypeError(\n f\"new_folder_id expected 'str', got {type(new_folder_id).__name__!r}\"\n )\n # Validate new_name\n if new_name and not isinstance(new_name, str):\n raise TypeError(f\"new_name expected 'str', got {type(new_name).__name__!r}\")\n # Create request url based on input item id that should be moved\n request_url = self._api_drive_url + \"items/\" + item_id\n # Create the request body\n body: dict[str, Any] = {\"parentReference\": {\"id\": new_folder_id}}\n if new_name:\n body[\"name\"] = new_name\n # Make the Graph API request\n response = httpx.patch(request_url, headers=self._headers, json=body)\n # Validate request response and parse\n self._raise_unexpected_response(response, 200, \"item not moved\", has_json=True)\n response_data = response.json()\n item_id = response_data[\"id\"]\n parent_folder_id = response_data[\"parentReference\"][\"id\"]\n # Return the item id and parent folder id\n return item_id, parent_folder_id\n\n @token_required\n def copy_item(\n self,\n item_id: str,\n new_folder_id: str,\n new_name: str | None = None,\n confirm_complete: bool = True,\n verbose: bool = False,\n ) -> str | None:\n \"\"\"Copies an item (folder/file) within the connected OneDrive server-side.\n Positional arguments:\n item_id (str) -- item id of the folder or file to copy\n new_folder_id (str) -- item id of the folder to copy the item to\n Keyword arguments:\n new_name (str) -- optional new item name with extension (default = None)\n confirm_complete (bool) -- waits for the copy operation to finish before returning (default = True)\n verbose (bool) -- prints status message during the download process (default = False)\n Returns:\n item_id (str | None) -- item id of the new item (None returned if confirm_complete set to False)\n \"\"\"\n # Validate item_id\n if not isinstance(item_id, str):\n raise TypeError(f\"item_id expected 'str', got {type(item_id).__name__!r}\")\n # Validate new_folder_id\n if not isinstance(new_folder_id, str):\n raise TypeError(\n f\"new_folder_id expected 'str', got {type(new_folder_id).__name__!r}\"\n )\n # Validate new_name\n if new_name and not isinstance(new_name, str):\n raise TypeError(f\"new_name expected 'str', got {type(new_name).__name__!r}\")\n # Create request url based on input item id that should be moved\n request_url = self._api_drive_url + \"items/\" + item_id + \"/copy\"\n # Create the request body\n body: dict[str, Any] = {\n \"parentReference\": {\"driveId\": self._drive_id, \"id\": new_folder_id}\n }\n if new_name:\n body[\"name\"] = new_name\n # Make the Graph API request\n response = httpx.post(request_url, headers=self._headers, json=body)\n # Validate request response and parse\n self._raise_unexpected_response(response, 202, \"item not copied\")\n if verbose:\n print(\"Copy request sent.\")\n if confirm_complete:\n monitor_url = response.headers.get(\"Location\")\n wait_duration = 1\n previous_complete = 0\n while True:\n if verbose:\n print(f\"Waiting {wait_duration:.0f}s before checking progress\")\n sleep(wait_duration)\n response = httpx.get(monitor_url, follow_redirects=True)\n response_data = response.json()\n if response_data[\"status\"] == \"completed\":\n if verbose:\n print(\"Copy confirmed complete.\")\n break\n percentage_complete = response_data[\"percentageComplete\"]\n if verbose:\n print(f\"Percentage complete = {percentage_complete}%\")\n try:\n wait_duration = (\n 100.0\n / (percentage_complete - previous_complete)\n * wait_duration\n - wait_duration\n )\n except ZeroDivisionError:\n wait_duration = 10\n if wait_duration > 10:\n wait_duration = 10\n elif wait_duration < 1:\n wait_duration = 1\n previous_complete = percentage_complete\n new_item_id = response_data[\"resourceId\"]\n # Return the item id\n return new_item_id\n else:\n return None\n\n @token_required\n def rename_item(self, item_id: str, new_name: str) -> str:\n \"\"\"Renames an item (folder/file) without moving it within the connected OneDrive.\n Positional arguments:\n item_id (str) -- item id of the folder or file to rename\n new_name (str) -- new item name with extension\n Returns:\n item_name (str) -- new name of the folder or file that was renamed\n \"\"\"\n # Validate item_id\n if not isinstance(item_id, str):\n raise TypeError(f\"item_id expected 'str', got {type(item_id).__name__!r}\")\n # Validate new_name\n if not isinstance(new_name, str):\n raise TypeError(f\"new_name expected 'str', got {type(new_name).__name__!r}\")\n # Create request url based on input item id that should be renamed\n request_url = self._api_drive_url + \"items/\" + item_id\n # Create the request body\n body = {\"name\": new_name}\n # Make the Graph API request\n response = httpx.patch(request_url, headers=self._headers, json=body)\n # Validate request response and parse\n self._raise_unexpected_response(\n response, 200, \"item not renamed\", has_json=True\n )\n response_data = response.json()\n item_name = response_data[\"name\"]\n # Return the item name\n return item_name\n\n @token_required\n def delete_item(self, item_id: str, pre_confirm: bool = False) -> bool:\n \"\"\"Deletes an item (folder/file) within the connected OneDrive. Potentially restorable in the OneDrive web browser client.\n Positional arguments:\n item_id (str) -- item id of the folder or file to be deleted\n Keyword arguments:\n pre_confirm (bool) -- confirm that you want to delete the file and not show the warning (default = False)\n Returns:\n confirmation (bool) -- True if item was deleted successfully\n \"\"\"\n # Validate item_id\n if not isinstance(item_id, str):\n raise TypeError(f\"item_id expected 'str', got {type(item_id).__name__!r}\")\n # Validate pre_confirm\n if not isinstance(pre_confirm, bool):\n raise TypeError(\n f\"pre_confirm expected 'bool', got {type(item_id).__name__!r}\"\n )\n # Get the user to confirm that they want to delete\n if not pre_confirm:\n confirm = (\n input(\"Deleted files may not be restorable. Type 'delete' to confirm: \")\n .strip()\n .lower()\n )\n if confirm != \"delete\":\n print(\"Aborted.\")\n return False\n # Create request url based on input item id that should be deleted\n request_url = self._api_drive_url + \"items/\" + item_id\n # Make the Graph API request\n response = httpx.delete(request_url, headers=self._headers)\n # Validate request response\n self._raise_unexpected_response(response, 204, \"item not deleted\")\n # Return confirmation of deletion\n return True\n\n @token_required\n def download_file(\n self,\n item_id: str,\n max_connections: int = 8,\n dest_dir: str | Path | None = None,\n verbose: bool = False,\n ) -> str:\n \"\"\"Downloads a file to the current working directory asynchronously.\n Note folders cannot be downloaded, you need to use a loop instead.\n Positional arguments:\n item_id (str) -- item id of the file to be deleted\n Keyword arguments:\n max_connections (int) -- max concurrent open http requests, refer Docs regarding throttling limits\n dest_dir (str | Path) -- destination directory for the downloaded file, default is current working directory (default = None)\n verbose (bool) -- prints status message during the download process (default = False)\n Returns:\n file_path (Path) -- returns the path of the file including extension\n \"\"\"\n # Validate item_id\n if not isinstance(item_id, str):\n raise TypeError(f\"item_id expected 'str', got {type(item_id).__name__!r}\")\n # Validate max_connections\n if not isinstance(max_connections, int):\n raise TypeError(\n f\"max_connections expected 'int', got {type(max_connections).__name__!r}\"\n )\n # Validate dest_dir\n if dest_dir is None:\n dest_dir = Path.cwd()\n elif not isinstance(dest_dir, str) and not isinstance(dest_dir, Path):\n raise TypeError(\n f\"dest_dir expected 'str' or 'Path', got {type(dest_dir).__name__!r}\"\n )\n dest_dir = Path(dest_dir)\n if dest_dir.is_dir() is False:\n raise ValueError(f\"dest_dir {dest_dir} is not a directory\")\n # Check max connections is not excessive\n if max_connections > 16:\n warnings.warn(\n f\"max_connections={max_connections} could result in throttling and enforced cool-down period\",\n stacklevel=2,\n )\n # Get item details\n file_details = self.detail_item(item_id)\n # Check that it is not a folder\n if \"folder\" in file_details:\n raise ValueError(\"item_id provided is for a folder, expected file item id\")\n file_name = file_details[\"name\"]\n file_path = dest_dir / file_name\n size = file_details[\"size\"]\n # If the file is empty, just create it and return\n if size == 0:\n file_path.touch()\n logger.warning(f\"downloaded file size=0, empty file '{file_name}' created.\")\n return file_name\n # Create request url based on input item id to be downloaded\n request_url = self._api_drive_url + \"items/\" + item_id + \"/content\"\n # Make the Graph API request\n if verbose:\n print(\"Getting the file download url\")\n response = httpx.get(request_url, headers=self._headers)\n # Validate request response and parse\n self._raise_unexpected_response(response, 302, \"could not get download url\")\n download_url = response.headers[\"Location\"]\n logger.debug(f\"download_url={download_url}\")\n # Download the file asynchronously\n asyncio.run(\n self._download_async(\n download_url, file_path, size, max_connections, verbose\n )\n )\n # Return the file name\n return file_path\n\n async def _download_async(\n self,\n download_url: str,\n file_path: Path,\n file_size: int,\n max_connections: int = 8,\n verbose: bool = False,\n ) -> None:\n \"\"\"INTERNAL: Creates a list of co-routines each downloading one part of the file, and starts them.\n Positional arguments:\n download_url (str) -- url of the file to download\n file_path (Path) -- path of the final file\n file_size (int) -- size of the file being downloaded\n Keyword arguments:\n max_connections (int) -- max concurrent open http requests\n verbose (bool) -- prints status message during the download process (default = False)\n \"\"\"\n # Assert rather then check as this is an internal method\n assert isinstance(download_url, str)\n assert isinstance(file_path, Path)\n assert isinstance(file_size, int)\n assert isinstance(max_connections, int)\n tasks = list()\n file_part_names = list()\n # This httpx.AsyncClient instance will be shared among the co-routines, passed as an argument\n timeout = httpx.Timeout(10.0, read=180.0)\n client = httpx.AsyncClient(timeout=timeout)\n # Creates a new temp directory via tempfile.TemporaryDirectory()\n with tempfile.TemporaryDirectory() as tmp_dir:\n # Min chunk size, used to calculate the number of concurrent connections based on file size\n min_typ_chunk_size = 1 * 1024 * 1024 # 1 MiB\n # Effective number of concurrent connections\n num_coroutines = file_size // (2 * min_typ_chunk_size) + 1\n # Assures the max number of co-routines/concurrent connections is equal to the provided one\n if num_coroutines > max_connections:\n num_coroutines = max_connections\n # Calculates the final size of the chunk that each co-routine will download\n typ_chunk_size = file_size // num_coroutines\n if verbose:\n pretty_size = round(file_size / 1000000, 1)\n print(\n f\"File {file_path.name} ({pretty_size}mb) will be downloaded in {num_coroutines} segments.\"\n )\n logger.debug(\n f\"file_size={file_size}B, min_typ_chunk_size={min_typ_chunk_size}B, num_coroutines={num_coroutines}, typ_chunk_size={typ_chunk_size}\"\n )\n for i in range(num_coroutines):\n # Get the file part Path, placed in the temp directory\n part_file_path = Path(tmp_dir).joinpath(\n file_path.name + \".\" + str(i + 1)\n )\n # We save the file part Path for later use\n file_part_names.append(part_file_path)\n # On first iteration will be 0\n start = typ_chunk_size * i\n # If this is the last part, the `end` will be set to the file size minus one\n # This is needed to be sure we download the entire file.\n if i == num_coroutines - 1:\n end = file_size - 1\n else:\n end = start + typ_chunk_size - 1\n # We create a task and append it to the `task` list.\n tasks.append(\n asyncio.create_task(\n self._download_async_part(\n client, download_url, part_file_path, start, end, verbose\n )\n )\n )\n # This awaits all the tasks in the `task` list to return\n await asyncio.gather(*tasks)\n # Closing the httpx.AsyncClient instance\n await client.aclose()\n # Join the downloaded file parts\n if verbose:\n print(\"Joining individual segments into single file\")\n with open(file_path, \"wb\") as fw:\n for file_part in file_part_names:\n with open(file_part, \"rb\") as fr:\n shutil.copyfileobj(fr, fw)\n file_part.unlink()\n\n async def _download_async_part(\n self,\n client: httpx.AsyncClient,\n download_url: str,\n part_file_path: Path,\n start: int,\n end: int,\n verbose: bool = False,\n ) -> None:\n \"\"\"INTERNAL: Co-routine to download a part of a file asynchronously.\n Positional arguments:\n client (httpx) -- client object to use to make request\n download_url (str) -- url of the file to download\n part_file_path (str) -- path to the temporary part file\n start (int) -- byte range to start the download at\n end (int) -- byte range to end the download at\n Keyword arguments:\n verbose (bool) -- prints status message during the download process (default = False)\n \"\"\"\n # Each co-routine opens its own file part to write into\n async with aiofiles.open(part_file_path, \"wb\") as fw:\n # Build the Range HTTP header and add the auth header\n headers = {\"Range\": f\"bytes={start}-{end}\"}\n headers.update(self._headers)\n part_name = part_file_path.suffix.lstrip(\".\")\n if verbose:\n print(\n f\"Starting download of file segment {part_name} (bytes {start}-{end})\"\n )\n logger.debug(\n f\"starting download segment={part_name} start={start} end={end}\"\n )\n # Create an AsyncIterator over our GET request\n async with client.stream(\"GET\", download_url, headers=headers) as response:\n # Iterates over incoming bytes in chunks and saves them to file\n self._raise_unexpected_response(\n response, [200, 206], \"item not downloaded\"\n )\n write_chunk_size = 64 * 1024 # 64 KiB\n async for chunk in response.aiter_bytes(write_chunk_size):\n await fw.write(chunk)\n if verbose:\n print(f\"Finished download of file segment {part_name}\")\n logger.debug(f\"finished download segment={part_name}\")\n\n @token_required\n def upload_file(\n self,\n file_path: str | Path,\n new_file_name: str | None = None,\n parent_folder_id: str | None = None,\n if_exists: str = \"rename\",\n verbose: bool = False,\n ) -> str:\n \"\"\"Uploads a file to a particular folder with a provided file name.\n Positional arguments:\n file_path (str|Path) -- path of the local source file to upload\n Keyword arguments:\n new_file_name (str) -- new name of the file as it should appear on OneDrive, with extension (default = None)\n parent_folder_id (str) -- item id of the folder to put the file within, if None then root (default = None)\n if_exists (str) -- action to take if the new folder already exists [fail, replace, rename] (default = \"rename\")\n verbose (bool) -- prints status message during the download process (default = False)\n Returns:\n item_id (str) -- item id of the newly uploaded file\n \"\"\"\n # Validate file_path\n if not isinstance(file_path, str) and not isinstance(file_path, Path):\n raise TypeError(\n f\"file_path expected 'str' or 'Path', got {type(file_path).__name__!r}\"\n )\n # Validate new_file_name\n if new_file_name and not isinstance(new_file_name, str):\n raise TypeError(\n f\"new_file_name expected 'str', got {type(new_file_name).__name__!r}\"\n )\n # Validate parent_folder_id\n if parent_folder_id and not isinstance(parent_folder_id, str):\n raise TypeError(\n f\"parent_folder_id expected 'str', got {type(parent_folder_id).__name__!r}\"\n )\n # Set conflict behavior\n conflict_behavior = if_exists\n if conflict_behavior not in (\"fail\", \"replace\", \"rename\"):\n raise ValueError(\n f\"if_exists expected 'fail', 'replace', or 'rename', got {if_exists!r}\"\n )\n # Clean file path by removing escape slashes and converting to Path object\n # To-do: avoid the pathlib as it is a resource hog\n if os.name == \"nt\": # Windows\n file_path = str(file_path).replace(\"/\", \"\")\n else: # Other systems including Mac, Linux\n file_path = str(file_path).replace(\"\\\\\", \"\")\n file_path = Path(file_path)\n logger.debug(f\"file_path={file_path}\")\n # Set file name\n if new_file_name:\n destination_file_name = new_file_name\n else:\n destination_file_name = file_path.name\n # Check the path is valid and points to a file\n if not os.path.isfile(file_path):\n raise ValueError(\n f\"file_path expected a path to an existing file, got '{file_path!s}'\"\n )\n # Get file metadata\n file_size, file_created, file_modified = self._get_local_file_metadata(\n file_path\n )\n # Create request url for the upload session\n if parent_folder_id:\n request_url = self._api_drive_url + \"items/\" + parent_folder_id + \":/\"\n else:\n request_url = self._api_drive_url + \"root:/\"\n request_url += (\n urllib.parse.quote(destination_file_name) + \":/createUploadSession\"\n )\n logger.debug(f\"upload session request_url={request_url}\")\n # Create request body for the upload session\n body = {\n \"item\": {\n \"@microsoft.graph.conflictBehavior\": conflict_behavior,\n \"name\": destination_file_name,\n \"fileSystemInfo\": {\n \"createdDateTime\": file_created,\n \"lastModifiedDateTime\": file_modified,\n },\n }\n }\n logger.debug(f\"upload session body={body}\")\n # Make the Graph API request for the upload session\n if verbose:\n print(f\"Requesting upload session\")\n response = httpx.post(request_url, headers=self._headers, json=body)\n # Validate upload session request response and parse\n self._raise_unexpected_response(\n response, 200, \"upload session could not be created\", has_json=True\n )\n upload_url = response.json()[\"uploadUrl\"]\n logger.debug(f\"upload_url={upload_url}\")\n # Determine the upload file chunk size\n chunk_size: int = (\n 1024 * 320 * 16\n ) # = 5MiB. Docs: Must be multiple of 320KiB, recommend 5-10MiB, max 60MiB\n no_of_uploads: int = -(-file_size // chunk_size)\n logger.debug(\n f\"chunk_size={chunk_size}B, file_size={file_size}, no_of_uploads={no_of_uploads}\"\n )\n if verbose and no_of_uploads > 1:\n print(\n f\"File {destination_file_name} will be uploaded in {no_of_uploads} segments\"\n )\n logger.info(\n f\"file {destination_file_name} will be uploaded in {no_of_uploads} segments\"\n )\n # Create an upload connection client\n timeout = httpx.Timeout(10.0, read=180.0, write=180.0)\n client = httpx.Client(timeout=timeout)\n # Run in a try block to capture user cancellation request\n try:\n # Open the file pointer\n if verbose:\n print(\"Loading file\")\n logger.info(f\"opening file '{file_path}'\")\n data = open(file_path, \"rb\")\n # Start the upload in a loop for as long as there is data left to upload\n n = 0\n while data.tell() < file_size:\n # Print the upload status\n n += 1\n if n == 1:\n content_range_start = 0\n content_range_end = chunk_size - 1\n if verbose:\n print(f\"Uploading segment {n}/{no_of_uploads}\")\n else:\n if verbose:\n print(\n f\"Uploading segment {n}/{no_of_uploads} (~{int((n-1)/no_of_uploads*100)}% complete)\"\n )\n logger.debug(\n f\"uploading file segment={n}, content_range_start={content_range_start}, content_range_end={content_range_end}\"\n )\n # Upload chunks\n if (file_size - data.tell()) > chunk_size:\n # Typical chunk upload\n headers = {\n \"Content-Range\": f\"bytes {content_range_start}-{content_range_end}/{file_size}\"\n }\n content = data.read(chunk_size)\n response = client.put(\n upload_url,\n headers=headers,\n content=content,\n )\n # Validate request response\n self._raise_unexpected_response(\n response, 202, f\"could not upload chuck {n} of {no_of_uploads}\"\n )\n # Calculate next chunk range\n content_range_start = data.tell()\n content_range_end = data.tell() + chunk_size - 1\n else:\n # Final chunk upload\n content_range_end = file_size - 1\n headers = {\n \"Content-Range\": f\"bytes {content_range_start}-{content_range_end}/{file_size}\"\n }\n content = data.read(chunk_size)\n response = client.put(\n upload_url,\n headers=headers,\n content=content,\n )\n except KeyboardInterrupt:\n httpx.delete(upload_url)\n if verbose:\n print(\"Upload cancelled by user.\")\n raise\n except Exception:\n httpx.delete(upload_url)\n raise\n finally:\n data.close()\n client.close()\n # Validate request response\n self._raise_unexpected_response(\n response, [200, 201], \"item not uploaded\", has_json=True\n )\n if verbose:\n print(\"Upload complete\")\n # Return the file item id\n response_data = response.json()\n item_id = response_data[\"id\"]\n return item_id\n\n def _get_local_file_metadata(self, file_path: str | Path) -> tuple[int, str, str]:\n \"\"\"Retrieves local file metadata (size, dates).\n Note results differ based on platform, with creation date not available on Linux.\n Positional arguments:\n file_path (str|Path) -- path of the local source file to upload\n Returns:\n file_size (str) -- file size in bytes (B)\n creation_date (str) -- UTC ISO format file creation timestamp\n modified_date (str) -- UTC ISO format file last modified timestamp\n \"\"\"\n # Validate file_path type and that the file exists\n if not isinstance(file_path, str) and not isinstance(file_path, Path):\n raise TypeError(\n f\"file_path expected 'str' or 'Path', got {type(file_path).__name__!r}\"\n )\n if not os.path.isfile(file_path):\n raise ValueError(\n f\"file_path expected a path to an existing file, got '{file_path!s}'\"\n )\n # Get the file size\n file_size = os.path.getsize(file_path)\n # Get the file modified time\n file_modified = os.path.getmtime(file_path)\n # Get the file creation time (platform specific)\n if os.name == \"nt\":\n # Windows OS\n file_created = os.path.getctime(file_path)\n else:\n # Other OS\n stat = os.stat(file_path)\n try:\n # Likely Mac OS\n file_created = stat.st_birthtime\n except AttributeError:\n # Likely Linux OS, fall back to last modified.\n file_created = stat.st_mtime\n # Convert the seconds to UTC ISO timestamps\n file_created_str = (\n datetime.fromtimestamp(file_created)\n .astimezone(timezone.utc)\n .isoformat(timespec=\"seconds\")\n .replace(\"+00:00\", \"Z\")\n )\n file_modified_str = (\n datetime.fromtimestamp(file_modified)\n .astimezone(timezone.utc)\n .isoformat(timespec=\"seconds\")\n .replace(\"+00:00\", \"Z\")\n )\n logger.debug(\n f\"platform={os.name}, file_created_str={file_created_str}, file_modified_str={file_modified_str}\"\n )\n return file_size, file_created_str, file_modified_str\n", "id": "11653541", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "src/graph_onedrive/_onedrive.py" } ]
0
msniveau
[ { "content": "from lib.util.banner_message import banner_message\nfrom lib.util.parameter import parameter\nfrom lib.util.config import config\nfrom lib.util.queryli import queryli", "id": "7633304", "language": "Python", "matching_score": 1.5152838230133057, "max_stars_count": 3, "path": "lib/util/__init__.py" }, { "content": "def banner_message(length, message):\n l2=length-int(round((len(message)/2)))\n response=''\n while l2 != 0:\n response+=' '\n l2=l2-1\n response+=message\n return response\n\n", "id": "8328070", "language": "Python", "matching_score": 0.02569696493446827, "max_stars_count": 3, "path": "lib/util/banner_message.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.message.set_nickname import set_nickname\nfrom lib.message.add_server import add_server\nfrom lib.message.del_server import del_server\nfrom lib.message.query_server import query_server\nfrom lib.message.print_server import print_server\nfrom lib.message.list_tags import list_tags\nfrom lib.message.monitoring import monitoring\nfrom lib.message.format_edit import format_edit\n\ndef getClass(type):\n if type == \"!nick\":\n return set_nickname\n if type == \"!addserver\":\n return add_server\n if type == \"!delserver\":\n return del_server\n if type == \"!status\":\n return query_server\n if type == \"!servers\":\n return list_tags\n if type == \"!print\":\n return print_server\n if type == \"!monitoring\":\n return monitoring\n if type == \"!format\":\n return format_edit\n return base_message\n\ndef handle( message):\n command='unknown'\n if len(message.content.split()) > 0:\n command=message.content.split()[0]\n return getClass(command)(message)\n", "id": "4422806", "language": "Python", "matching_score": 1.8459604978561401, "max_stars_count": 3, "path": "lib/message/__init__.py" }, { "content": "from lib.message.base_message import base_message\n\nclass set_nickname(base_message):\n validationRules = {\n 1: '^[a-zA-Z0-9-_~!+\\.]+$'\n }\n length = {\n 'min': 1,\n 'max': 1\n }\n\n def run(self, client):\n self.assert_permission()\n self.assert_length()\n self.assert_valid()\n current_nick=self.message.server.me.display_name\n yield from client.change_nickname(self.message.server.me, self.getMessageParts()[1])\n yield from client.send_message(self.message.channel, 'Nickname was changed from %s to %s' % (current_nick, self.getMessageParts()[1]))\n", "id": "8858831", "language": "Python", "matching_score": 1.2410777807235718, "max_stars_count": 3, "path": "lib/message/set_nickname.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.util.error import DiscordError\nfrom lib.util.parameter import parameter\nimport re\n\nclass add_server(base_message):\n validationRules = {\n 1: '^[a-z0-9-_~!+\\.]+$',\n 2: '^[a-z0-9]+$',\n 3: '^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$',\n 4: '^[0-9]+$',\n }\n length = {\n 'min': 4,\n 'max': 4\n }\n\n def getMessageParts(self):\n ip=re.compile('^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$')\n message_parts = self.message.content.split()\n if len(message_parts) not in [4,5]:\n return message_parts\n if bool(ip.match(message_parts[2])):\n l = list()\n l.append(message_parts[0])\n l.append(message_parts[1])\n l.append('steam')\n l.append(message_parts[2])\n l.append(message_parts[3])\n message_parts = l\n return message_parts\n\n\n def run(self, client):\n self.assert_permission()\n self.assert_length()\n self.assert_valid()\n message_parts = self.getMessageParts()\n p = parameter.getInstance()\n cfg = p.__config__\n if not self.message.server.id in cfg.sections():\n cfg.add_section(self.message.server.id)\n cfg.set(self.message.server.id, message_parts[1], 'false')\n server_uid=message_parts[1] + ':' + self.message.server.id\n if server_uid in cfg.sections():\n raise DiscordError(\"Server with the tag \\\"%s\\\" is already configured, delete server first: !delserver <tag>\" % (message_parts[1]))\n else:\n cfg.add_section(server_uid)\n cfg.set(server_uid, 'game', message_parts[2])\n cfg.set(server_uid, 'ip', message_parts[3])\n cfg.set(server_uid, 'port', message_parts[4])\n with open(p.getConfig(), 'w+') as f:\n cfg.write(f)\n yield from client.send_message(self.message.channel, 'Added %s server %s with address %s:%s' % (message_parts[2], message_parts[1], message_parts[3], message_parts[4]))\n", "id": "10612462", "language": "Python", "matching_score": 3.7035443782806396, "max_stars_count": 3, "path": "lib/message/add_server.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.util.parameter import parameter\n\nclass del_server(base_message):\n validationRules = {\n 1: '^[a-z0-9-_~!+\\.]+$',\n }\n length = {\n 'min': 1,\n 'max': 1\n }\n\n def run(self, client):\n self.assert_permission()\n self.assert_length()\n self.assert_valid()\n message_parts = self.getMessageParts()\n p = parameter.getInstance()\n cfg = p.__config__\n if not self.message.server.id in cfg.sections():\n cfg.add_section(self.message.server.id)\n cfg.remove_option(self.message.server.id, message_parts[1])\n server_uid=message_parts[1] + ':' + self.message.server.id\n if server_uid in cfg.sections():\n cfg.remove_section(server_uid)\n with open(p.getConfig(), 'w+') as f:\n cfg.write(f)\n yield from client.send_message(self.message.channel, 'Deleted server %s' % (message_parts[1]))", "id": "2408987", "language": "Python", "matching_score": 1.30832040309906, "max_stars_count": 3, "path": "lib/message/del_server.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport os, sys, time, socket, argparse, asyncio, discord\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))\n\nfrom lib.util import *\nfrom lib.message import handle\nfrom bin import __version__, __bundledir__\nfrom lib.util.error import DiscordError\nfrom lib.util.queryli import queryli, format_message\n\n\nprint(\" _ _ _ _\")\nprint(\" ___| |_ __ _| |_ _ _ ___| |__ ___ | |_\")\nprint(\"/ __| __/ _` | __| | | / __| '_ \\ / _ \\| __|\")\nprint(\"\\__ \\ || (_| | |_| |_| \\__ \\ |_) | (_) | |_\")\nprint(\"|___/\\__\\__,_|\\__|\\__,_|___/_.__/ \\___/ \\__|\")\nprint(banner_message(22, \"discord status bot v\" + __version__))\nparam = parameter(argparse.ArgumentParser())\ncfg = param.getInstance().__config__\nprint(banner_message(22, \"config path: \" + param.getConfig()))\nif param.isVerbose() and not param.isDebugMode():\n print(banner_message(22, \"running in verbose mode (debugging only!)\"))\nif param.isDebugMode():\n print(banner_message(22, \"running in debug mode!\"))\nif param.getDSN():\n print(banner_message(22, \"sentry logging enabled\"))\n\nclient = discord.Client()\n\nif param.isDebugMode():\n print('[info ] Bundle dir is: %s' % (__bundledir__))\n\[email protected]\ndef monitoring():\n yield from client.wait_until_ready()\n while True:\n for section in cfg.sections():\n if not ':' in section and section.isdigit():\n for tag in cfg.options(section):\n if cfg.get(section, tag).isdigit():\n if param.isDebugMode():\n print('[info ] Monitoring run for %s %s' % (section, tag))\n yield from monitoring_run(section, tag)\n yield from asyncio.sleep(param.getMonitoringInterval())\n\[email protected]\ndef monitoring_run(section, tag):\n sct = tag + ':' + section\n game=cfg.get(sct, 'game')\n gs = queryli(game, cfg.get(sct, 'ip'), int(cfg.get(sct, 'port')))\n if not cfg.has_option(sct, 'status'):\n cfg.set(sct, 'status', 'false')\n old = cfg.get(sct, 'status')\n changed=False\n if gs.status.error:\n if old == 'false':\n pass\n else:\n cfg.set(sct, 'status', 'false')\n changed=True\n with open(param.getConfig(), 'w+') as f:\n cfg.write(f)\n else:\n if old == 'false':\n cfg.set(sct, 'status', 'true')\n changed=True\n with open(param.getConfig(), 'w+') as f:\n cfg.write(f)\n else:\n pass\n if changed:\n if param.isDebugMode():\n print('[info ] Monitoring status changed! (%s %s)' % (section, tag))\n try:\n yield from client.send_message(discord.Object(id=cfg.get(section, tag)), format_message(section, gs, game, False, True))\n except (discord.client.Forbidden, discord.errors.NotFound):\n print('[error ] Monitoring wasn\\'t able to send a message for (%s %s)' % (section, tag))\n yield from dict() \n\[email protected]\[email protected]\ndef on_message(message):\n if (param.isVerbose()):\n server_id='direct'\n if message.server:\n server_id='guild '\n print(\"[%s][%s] %s:\\t %s\" % (server_id, message.channel.id, message.author.name, message.content))\n cmd = handle(message)\n try:\n yield from cmd.run(client)\n except DiscordError as e:\n if param.isDebugMode():\n print(\"[error ] \" + str(e))\n yield from client.send_message(discord.Object(id=message.channel.id), \"Error: \" + str(e))\n\[email protected]\[email protected]\ndef on_ready():\n print(\"[info ] Logging in as %s (%s)\" % (client.user.name, client.user.id))\n\nclient.loop.create_task(monitoring())\nclient.run(param.getToken())\n", "id": "2037937", "language": "Python", "matching_score": 4.009134769439697, "max_stars_count": 3, "path": "bin/bot.py" }, { "content": "import os.path, configparser\nfrom bin import __bundledir__\nfrom shutil import copyfile\nfrom lib.util.error import ConfigurationError\nfrom lib.util.config import config\nfrom os.path import expanduser\n\nclass parameter():\n __instance = None\n\n @staticmethod\n def getInstance():\n return parameter.__instance\n\n def __init__(self, parser):\n self.parser = parser\n self.parser.add_argument('--config', dest='config', help='Defines the config path (default: ~/config.ini)')\n self.parser.add_argument('--token', dest='token', help='Discord token')\n self.parser.add_argument('--monitoring-interval', dest='monitoring_interval', help='Interval of the monitoring in seconds (default: 60)')\n self.parser.add_argument('--dsn', dest='dsn', help='Sentry DSN for error logging')\n self.parser.add_argument('-v', action='store_true', dest='verbose', help='Print every message the bot is receiving (debug only)')\n self.parser.add_argument('-vv', action='store_true', dest='verbose_extended', help='More detailed version of -v')\n self.args=parser.parse_args()\n self.__config__ = config.getInstance(self.getConfig())\n parameter.__instance = self\n\n def getConfig(self):\n if self.args.config == None:\n config = expanduser('~') + '/config.ini'\n else:\n config=self.args.config\n if not os.path.isfile(config):\n if self.isDebugMode():\n print('[info ] Config file not found, creating it.')\n copyfile(__bundledir__ + '/config.ini.dist', config)\n return config\n\n def getToken(self):\n if not self.args.token:\n if os.path.isfile(self.getConfig()):\n try:\n return self.__config__.get('discord', 'token')\n except configparser.NoSectionError:\n raise ConfigurationError('Unable to get discord token')\n else:\n raise AttributeError(\"Config file not found\")\n return self.args.token\n\n def getDSN(self):\n if not self.args.dsn:\n if os.path.isfile(self.getConfig()):\n try:\n dsn = self.__config__.get('sentry', 'dsn')\n if len(dsn.strip()) > 0:\n return dsn\n except configparser.NoSectionError:\n return False\n return False\n else:\n raise AttributeError(\"Config file not found\")\n return self.args.dsn\n\n def getMonitoringInterval(self):\n if not self.args.monitoring_interval:\n return 60\n return int(self.args.monitoring_interval)\n\n def isVerbose(self):\n return self.args.verbose != False or self.args.verbose_extended != False\n\n def isDebugMode(self):\n return self.args.verbose_extended != False\n\n def printHelp(self):\n self.parser.print_help()\n", "id": "10291796", "language": "Python", "matching_score": 1.8054676055908203, "max_stars_count": 3, "path": "lib/util/parameter.py" }, { "content": "from configparser import ConfigParser\n\nclass config:\n __instance = None\n\n @staticmethod\n def getInstance(path = None):\n if config.__instance == None:\n config(path)\n return config.__instance\n\n def __init__(self, path):\n if config.__instance != None:\n raise Exception(\"Cannot load config file again, use getInstance instead\")\n else:\n config.__instance = ConfigParser()\n config.__instance.read(path)", "id": "87951", "language": "Python", "matching_score": 0.2148800939321518, "max_stars_count": 3, "path": "lib/util/config.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2014-2017 <NAME>\n\nfrom __future__ import (absolute_import,\n unicode_literals, print_function, division)\n\nimport sys\n\nimport six\n\n\nclass Platform(object):\n \"\"\"A Source server platform identifier\n\n This class provides utilities for representing Source server platforms\n as returned from a A2S_INFO request. Each platform is ultimately\n represented by one of the following integers:\n\n +-----+----------+\n | ID | Platform |\n +=====+==========+\n | 76 | Linux |\n +-----+----------+\n | 87 | Windows |\n +-----+----------+\n | 108 | Linux |\n +-----+----------+\n | 109 | Mac OS X |\n +-----+----------+\n | 111 | Mac OS X |\n +-----+----------+\n | 119 | Windows |\n +-----+----------+\n\n .. note::\n Starbound uses 76 instead of 108 for Linux in the old GoldSource\n style.\n \"\"\"\n\n def __init__(self, value):\n \"\"\"Initialise the platform identifier\n\n The given ``value`` will be mapped to a numeric identifier. If the\n value is already an integer it must then it must exist in the table\n above else ValueError is returned.\n\n If ``value`` is a one character long string then it's ordinal value\n as given by ``ord()`` is used. Alternately the string can be either\n of the following:\n\n * Linux\n * Mac OS X\n * Windows\n \"\"\"\n if isinstance(value, six.text_type):\n if len(value) == 1:\n value = ord(value)\n else:\n value = {\n \"linux\": 108,\n \"mac os x\": 111,\n \"windows\": 119,\n }.get(value.lower())\n if value is None:\n raise ValueError(\"Couldn't convert string {!r} to valid \"\n \"platform identifier\".format(value))\n if value not in {76, 87, 108, 109, 111, 119}:\n raise ValueError(\"Invalid platform identifier {!r}\".format(value))\n self.value = value\n\n def __repr__(self):\n return \"<{self.__class__.__name__} \" \\\n \"{self.value} '{self}'>\".format(self=self)\n\n def __unicode__(self):\n return {\n 76: \"Linux\",\n 87: \"Windows\",\n 108: \"Linux\",\n 109: \"Mac OS X\",\n 111: \"Mac OS X\",\n 119: \"Windows\",\n }[self.value]\n\n if six.PY3:\n def __str__(self):\n return self.__unicode__()\n\n def __bytes__(self):\n return self.__unicode__().encode(sys.getdefaultencoding())\n else:\n def __str__(self):\n return self.__unicode__().encode(sys.getdefaultencoding())\n\n def __int__(self):\n return self.value\n\n def __eq__(self, other):\n \"\"\"Check for equality between two platforms\n\n If ``other`` is not a Platform instance then an attempt is made to\n convert it to one using same approach as :meth:`__init__`. This means\n platforms can be compared against integers and strings. For example:\n\n .. code:: pycon\n\n >>>Platform(108) == \"linux\"\n True\n >>>Platform(109) == 109\n True\n >>>Platform(119) == \"w\"\n True\n\n Despite the fact there are two numerical identifers for Mac (109 and\n 111) comparing either of them together will yield ``True``.\n\n .. code:: pycon\n\n >>>Platform(109) == Platform(111)\n True\n \"\"\"\n if not isinstance(other, Platform):\n other = Platform(other)\n if self.value == 76 or self.value == 108:\n return other.value == 76 or other.value == 108\n elif self.value == 109 or self.value == 111:\n return other.value == 109 or other.value == 111\n else:\n return self.value == other.value\n\n @property\n def os_name(self):\n \"\"\"Convenience mapping to names returned by ``os.name``\"\"\"\n return {\n 76: \"posix\",\n 108: \"posix\",\n 109: \"posix\",\n 111: \"posix\",\n 119: \"nt\",\n }[self.value]\n\n\nPlatform.LINUX = Platform(108)\nPlatform.MAC_OS_X = Platform(111)\nPlatform.WINDOWS = Platform(119)\n\n\nclass ServerType(object):\n \"\"\"A Source server platform identifier\n\n This class provides utilities for representing Source server types\n as returned from a A2S_INFO request. Each server type is ultimately\n represented by one of the following integers:\n\n +-----+---------------+\n | ID | Server type |\n +=====+===============+\n | 68 | Dedicated |\n +-----+---------------+\n | 100 | Dedicated |\n +-----+---------------+\n | 108 | Non-dedicated |\n +-----+---------------+\n | 112 | SourceTV |\n +-----+---------------+\n\n .. note::\n Starbound uses 68 instead of 100 for a dedicated server in the old\n GoldSource style.\n \"\"\"\n\n def __init__(self, value):\n \"\"\"Initialise the server type identifier\n\n The given ``value`` will be mapped to a numeric identifier. If the\n value is already an integer it must then it must exist in the table\n above else ValueError is returned.\n\n If ``value`` is a one character long string then it's ordinal value\n as given by ``ord()`` is used. Alternately the string can be either\n of the following:\n\n * Dedicated\n * Non-Dedicated\n * SourceTV\n \"\"\"\n if isinstance(value, six.text_type):\n if len(value) == 1:\n value = ord(value)\n else:\n value = {\n \"dedicated\": 100,\n \"non-dedicated\": 108,\n \"sourcetv\": 112,\n }.get(value.lower())\n if value is None:\n raise ValueError(\"Couldn't convert string {!r} to valid \"\n \"server type identifier\".format(value))\n if value not in {68, 100, 108, 112}:\n raise ValueError(\n \"Invalid server type identifier {!r}\".format(value))\n self.value = value\n\n def __repr__(self):\n return \"<{self.__class__.__name__} \" \\\n \"{self.value} '{self}'>\".format(self=self)\n\n def __unicode__(self):\n return {\n 68: \"Dedicated\",\n 100: \"Dedicated\",\n 108: \"Non-Dedicated\",\n 112: \"SourceTV\",\n }[self.value]\n\n if six.PY3:\n def __str__(self):\n return self.__unicode__()\n\n def __bytes__(self):\n return self.__unicode__().encode(sys.getdefaultencoding())\n else:\n def __str__(self):\n return self.__unicode__().encode(sys.getdefaultencoding())\n\n def __int__(self):\n return self.value\n\n def __eq__(self, other):\n \"\"\"Check for equality between two server types\n\n If ``other`` is not a ServerType instance then an attempt is made to\n convert it to one using same approach as :meth:`.__init__`. This means\n server types can be compared against integers and strings. For example:\n\n .. code:: pycon\n\n >>>Server(100) == \"dedicated\"\n True\n >>>Platform(108) == 108\n True\n >>>Platform(112) == \"p\"\n True\n \"\"\"\n if not isinstance(other, ServerType):\n other = ServerType(other)\n if self.value == 68 or self.value == 100:\n return other.value == 68 or other.value == 100\n else:\n return self.value == other.value\n\n @property\n def char(self):\n return chr(self.value)\n\n\nServerType.DEDICATED = ServerType(100)\nServerType.NON_DEDICATED = ServerType(108)\nServerType.SOURCETV = ServerType(112)\n", "id": "10038272", "language": "Python", "matching_score": 1.0774264335632324, "max_stars_count": 0, "path": "valve/source/util.py" }, { "content": "import json, requests, re\nfrom lib.util.parameter import parameter\n\nclass queryli:\n def __init__(self, game, ip, port):\n self._game = game\n self.ip = ip\n self.port = port\n self.refresh()\n\n\n def refresh(self):\n response = requests.get('https://query.li/api/%s/%s/%d' % (self._game, self.ip, self.port), timeout=5)\n self.data = json.loads(response.text)\n return self\n\n @property\n def game(self):\n return QueryResult(self.data['game'])\n\n @property\n def whois(self):\n return QueryResult(self.data['whois'])\n\n @property\n def status(self):\n return QueryResult(self.data['status'])\n\n @property\n def cached(self):\n return self.data['cached']\n\n def toJson(self):\n return json.dumps(self.data)\n \nclass QueryResult:\n def __init__(self, data):\n self.data = data\n\n def __getattr__(self, name):\n if name in self.data:\n if type(self.data[name]) == dict:\n return QueryResult(self.data[name])\n return self.data[name]\n\n @property\n def __dict__(self):\n d = self.data\n for name in d:\n if type(d[name]) == dict:\n d[name] = QueryResult(self.data[name])\n return d\n\n def toJson(self):\n return json.dumps(self.data)\n\n\ndef format_message(server, gs, tag, printAll=False, monitoring=False):\n p = parameter.getInstance()\n cfg = p.__config__\n section_name = server + ':format'\n prefix='status'\n if monitoring:\n prefix='monitoring'\n if not section_name in cfg.sections():\n section_name='default:format'\n type='online'\n if gs.status.error:\n type='offline'\n key_name = '%s_%s_%s' % (prefix, type, tag)\n if not key_name in cfg.options(section_name):\n key_name='%s_%s' % (prefix, type)\n message = cfg.get(section_name, key_name)\n for key, value in gs.game.info.__dict__.items():\n if key == 'server_name':\n value = re.sub(r'<.+?>', '', value)\n if printAll:\n message+='%s = %s\\n' % (key, value)\n message = message.replace('{' + key + '}', str(value))\n\n for key, value in gs.whois.addr.__dict__.items():\n if printAll:\n message+='%s = %s\\n' % (key, value)\n message = message.replace('{' + key + '}', str(value))\n\n if printAll:\n message+='%s = %s\\n' % ('organization', gs.whois.organization)\n message+='%s = %s\\n' % ('tag', tag)\n message+='%s = %s\\n' % ('break', 'linebreak')\n message = message.replace('{organization}', gs.whois.organization)\n message = message.replace('{tag}', tag)\n message = message.replace('{break}', '\\r\\n')\n return message", "id": "4855703", "language": "Python", "matching_score": 3.3833837509155273, "max_stars_count": 3, "path": "lib/util/queryli.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.util.parameter import parameter\nfrom lib.util.queryli import queryli\nfrom lib.util.error import DiscordError\nfrom lib.util.queryli import format_message\n\nclass query_server(base_message):\n validationRules = {\n 1: '^[a-z0-9-_~!+\\.]+$',\n 2: '^(all)$',\n }\n length = {\n 'min': 0,\n 'max': 2\n }\n\n def run(self, client):\n self.assert_length()\n self.assert_valid()\n message_parts = self.getMessageParts()\n tag='main'\n if len(message_parts) > 1:\n tag=message_parts[1]\n p = parameter.getInstance()\n cfg = p.__config__\n tags = []\n if tag == 'all':\n for section in cfg.sections():\n if section.endswith(':' + self.message.server.id):\n tags.append(section.split(':')[0])\n else:\n tags.append(tag)\n message=''\n for tag in tags:\n keyindex=tag + ':' + self.message.server.id\n if not keyindex in cfg.sections():\n raise DiscordError('No server with the tag \"%s\" found.' % (tag))\n game = cfg.get(keyindex, 'game')\n ip = cfg.get(keyindex, 'ip')\n port = cfg.get(keyindex, 'port')\n gs = queryli(game, ip, int(port))\n printAll = False\n if len(self.getMessageParts()) > 2:\n printAll=True\n message+=format_message(self.message.server.id, gs, game, printAll, False)\n yield from client.send_message(self.message.channel, message)\n", "id": "2703689", "language": "Python", "matching_score": 3.436553478240967, "max_stars_count": 3, "path": "lib/message/query_server.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.util.parameter import parameter\nfrom lib.util.error import DiscordError\n\nclass print_server(base_message):\n validationRules = {\n 1: '^[a-z0-9-_~!+\\.]+$',\n }\n length = {\n 'min': 1,\n 'max': 1\n }\n\n def run(self, client):\n self.assert_length()\n self.assert_valid()\n message_parts = self.getMessageParts()\n tag=message_parts[1]\n p = parameter.getInstance()\n cfg = p.__config__\n keyindex=tag + ':' + self.message.server.id\n if not keyindex in cfg.sections():\n raise DiscordError('No server with the tag \"%s\" found.' % (tag))\n game = cfg.get(keyindex, 'game')\n ip = cfg.get(keyindex, 'ip')\n port = cfg.get(keyindex, 'port')\n yield from client.send_message(self.message.channel, '```ini\\nip = %s\\nport = %s\\ngame = %s```' % (ip, port, game))", "id": "9745853", "language": "Python", "matching_score": 3.613877773284912, "max_stars_count": 3, "path": "lib/message/print_server.py" }, { "content": "from lib.util.error import DiscordError\nimport re\n\nclass base_message():\n validationRules = {}\n length = {\n 'min': False,\n 'max': False\n }\n\n def __init__(self, message):\n self.message = message\n\n def getMessageParts(self):\n return self.message.content.split()\n\n def run(self, client):\n return dict()\n\n def assert_permission(self):\n if not self.message.author.server_permissions.manage_server:\n raise DiscordError('Missing permissions')\n\n def assert_length(self):\n message_length = len(self.getMessageParts())-1\n if self.length['min'] != False:\n if message_length < self.length['min']:\n raise DiscordError('Missing arguments!')\n if self.length['max'] != False:\n if message_length > self.length['max']:\n raise DiscordError('Too many arguments!')\n\n def assert_valid(self):\n message_parts = self.getMessageParts()\n for key in range(1, len(message_parts)):\n if key in self.validationRules:\n reg=re.compile(self.validationRules[key])\n if not bool(reg.match(message_parts[key])):\n raise DiscordError('Invalid format for the ' + self.ndarg(key-1) + ' argument, only ' + self.validationRules[key] + ' is allowed!')\n\n def ndarg(self, pos):\n vals=[\n 'first',\n 'second',\n 'third',\n 'fourth',\n 'fifth',\n 'sixth'\n ]\n return vals[pos]", "id": "12082131", "language": "Python", "matching_score": 1.6472225189208984, "max_stars_count": 3, "path": "lib/message/base_message.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.util.error import DiscordError\nfrom lib.util.parameter import parameter\n\nclass format_edit(base_message):\n validationRules = {\n 1: '^(?i)(monitoring|status)$',\n 2: '^(?i)(offline|online)$',\n 3: '^[a-zA-Z0-9]+$',\n }\n length = {\n 'min': 4,\n 'max': False\n }\n\n\n def run(self, client):\n self.assert_permission()\n self.assert_length()\n self.assert_valid()\n message_parts = self.getMessageParts()\n p = parameter.getInstance()\n cfg = p.__config__\n section_name = self.message.server.id + ':format'\n if not section_name in cfg.sections():\n cfg.add_section(section_name)\n for default in cfg.options('default:format'):\n cfg.set(section_name, default, cfg.get('default:format', default))\n with open(p.getConfig(), 'w+') as f:\n cfg.write(f)\n if message_parts[3] != 'general':\n keyname=message_parts[1] + '_' + message_parts[2] + '_' + message_parts[3]\n else:\n keyname=message_parts[1] + '_' + message_parts[2]\n cfg.set(section_name, keyname, ' '.join(message_parts[4:]))\n with open(p.getConfig(), 'w+') as f:\n cfg.write(f)\n yield from client.send_message(self.message.channel, 'Formatting for ' + message_parts[1] + ':' + message_parts[2] + ' changed to \"' + ' '.join(message_parts[4:]) + '\"')\n", "id": "6671417", "language": "Python", "matching_score": 4.662894248962402, "max_stars_count": 3, "path": "lib/message/format_edit.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.util.error import DiscordError\nfrom lib.util.parameter import parameter\n\nclass monitoring(base_message):\n validationRules = {\n 1: '^[a-z0-9-_~!+\\.]+$',\n 2: '^(?i)(true|false)$',\n }\n length = {\n 'min': 1,\n 'max': 2\n }\n\n def run(self, client):\n self.assert_permission()\n self.assert_length()\n self.assert_valid()\n message_parts = self.getMessageParts()\n p = parameter.getInstance()\n cfg = p.__config__\n if not self.message.server.id in cfg.sections():\n cfg.add_section(self.message.server.id)\n if not cfg.has_option(self.message.server.id, message_parts[1]):\n raise DiscordError('No server configured with this tag')\n if len(message_parts) > 2:\n option=message_parts[2]\n if message_parts[2] == 'true':\n option=self.message.channel.id\n cfg.set(self.message.server.id, message_parts[1], option)\n with open(p.getConfig(), 'w+') as f:\n cfg.write(f)\n yield from client.send_message(self.message.channel, 'Monitoring was set to %s' % (message_parts[2]))\n else:\n yield from client.send_message(self.message.channel, 'Monitoring is currenty %s' % (cfg.get(self.message.server.id, message_parts[1])))", "id": "118268", "language": "Python", "matching_score": 4.107400894165039, "max_stars_count": 3, "path": "lib/message/monitoring.py" }, { "content": "from lib.message.base_message import base_message\nfrom lib.util.error import DiscordError\nfrom lib.util.parameter import parameter\n\nclass list_tags(base_message):\n length = {\n 'min': 0,\n 'max': 0\n }\n\n\n def run(self, client):\n self.assert_length()\n p = parameter.getInstance()\n cfg = p.__config__\n if not self.message.server.id in cfg.sections():\n cfg.add_section(self.message.server.id)\n tags = []\n for section in cfg.sections():\n if section.endswith(':' + self.message.server.id):\n tags.append(section.split(':')[0])\n if len(tags) == 0:\n raise DiscordError('No servers configured!')\n yield from client.send_message(self.message.channel, 'Currently the following tags are configured:' + self.format_list(tags))\n\n def format_list(self,parts):\n return '\\n\\u2022 ' + ('\\n\\u2022 ').join(parts)", "id": "9420761", "language": "Python", "matching_score": 1.3994641304016113, "max_stars_count": 3, "path": "lib/message/list_tags.py" }, { "content": "class ConfigurationError(Exception):\n pass\n\nclass DiscordError(Exception):\n pass", "id": "489417", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "lib/util/error.py" }, { "content": "import os, sys\n__version__=\"0.0.1\"\n\n\nfrozen = 'not'\nif getattr(sys, 'frozen', False):\n __bundledir__ = sys._MEIPASS\nelse:\n __bundledir__ = os.path.dirname(os.path.abspath(__file__ + '/../'))", "id": "12357593", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "bin/__init__.py" } ]
1.647223
richardf
[ { "content": "import unittest\nimport os\nfrom binp import *\n\nTEST_DIR = os.path.join(os.curdir, \"tests\")\nDATA_DIR = os.path.join(TEST_DIR, \"data\")\n\nclass ORLibraryInstanceReaderTest(unittest.TestCase):\n def test_get_instance_definition_with_non_numeric_data_should_raise_valueerror(self):\n with self.assertRaises(ValueError):\n ORLibraryInstanceReader._get_instance_definition(\"10 AA 20\")\n\n def test_get_instance_definition_with_invalid_data_should_raise_valueerror(self):\n with self.assertRaises(ValueError):\n ORLibraryInstanceReader._get_instance_definition(\"invalid line\")\n\n def test_get_instance_definition_with_ivalid_data_should_return_1_10_20(self):\n cap, num, best = ORLibraryInstanceReader._get_instance_definition(\"1 10 20\")\n self.assertEqual(1, cap)\n self.assertEqual(10, num)\n self.assertEqual(20, best)\n\n def test_get_number_of_instances_with_valid_data_should_return_4(self):\n data = [\"20\", \"1 2 3\", \"22\"]\n ret = ORLibraryInstanceReader._get_number_of_instances(data)\n self.assertEqual(20, ret)\n \n def test_get_number_of_instances_with_invalid_data_should_raise_valueerror(self):\n data = [\"A\", \"1 2 3\", \"22\"]\n with self.assertRaises(ValueError):\n ORLibraryInstanceReader._get_number_of_instances(data)\n\n def test_get_instances_with_inst1_should_return_1_instance(self):\n ret = ORLibraryInstanceReader.get_instances(os.path.join(DATA_DIR, \"inst1.txt\"))\n\n self.assertIsInstance(ret[0], Instance)\n self.assertEqual(\"inst_01\", ret[0].instance_name)\n self.assertEqual(90, ret[0].bin_capacity)\n self.assertEqual(2, ret[0].best_known_sol)\n self.assertEqual([42, 69, 30], ret[0].objects)\n\n def test_get_instances_with_inst2_should_return_2_instances(self):\n ret = ORLibraryInstanceReader.get_instances(os.path.join(DATA_DIR, \"inst2.txt\"))\n\n self.assertIsInstance(ret[0], Instance)\n self.assertEqual(\"inst_01\", ret[0].instance_name)\n self.assertEqual(90, ret[0].bin_capacity)\n self.assertEqual(2, ret[0].best_known_sol)\n self.assertEqual([42, 69, 30], ret[0].objects)\n\n self.assertIsInstance(ret[1], Instance)\n self.assertEqual(\"inst_02\", ret[1].instance_name)\n self.assertEqual(50, ret[1].bin_capacity)\n self.assertEqual(2, ret[1].best_known_sol)\n self.assertEqual([10, 20, 30, 40], ret[1].objects)\n\n\nclass InstanceTest(unittest.TestCase):\n def test_Instance_object_should_receive_data_in_init_method(self):\n inst = Instance(\"name\", 100, [30, 20], 1)\n \n self.assertIsInstance(inst, Instance)\n self.assertEqual(\"name\", inst.instance_name)\n self.assertEqual(100, inst.bin_capacity)\n self.assertEqual([30, 20], inst.objects)\n\n\nclass SolutionTest(unittest.TestCase):\n def test_solution_of_size_3_should_have_weights_with_zeros(self):\n solution = Solution(1, 3)\n self.assertEqual(3, len(solution.weights))\n self.assertEqual([0, 0, 0], solution.weights)\n \n def test_solution_of_invalid_size_should_raise_error(self):\n with self.assertRaises(ValueError):\n solution = Solution(1, -1)\n\n def test_has_space_box_with_weight_bigger_than_box_should_return_false(self):\n solution = Solution(5, 2)\n self.assertFalse(solution.has_space_box(0, 10))\n \n def test_has_space_box_with_weight_smaller_than_box_should_return_true(self):\n solution = Solution(5, 2)\n self.assertTrue(solution.has_space_box(0, 4))\n\n def test_has_space_box_with_weight_smaller_than_space_available_in_box_should_return_false(self):\n solution = Solution(5, 2)\n solution.create_box()\n solution.add_object(0, 3, 0)\n self.assertFalse(solution.has_space_box(0, 4))\n \n def test_add_solution_with_weight_bigger_than_box_size_should_return_false(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n self.assertFalse(solution.add_object(0, 10, 0))\n \n def test_add_solution_with_weight_smaller_than_box_size_should_return_true(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n self.assertTrue(solution.add_object(0, 3, 0)) \n \n def test_add_solution_with_invalid_object_should_raise_error(self):\n solution = Solution(5, 2)\n with self.assertRaises(ValueError):\n solution.add_object(-1, 3, 0)\n \n def test_add_solution_with_invalid_object_size_should_raise_error(self):\n solution = Solution(5, 2)\n with self.assertRaises(ValueError):\n solution.add_object(0, -3, 0) \n \n def test_add_solution_with_invalid_box_should_raise_error(self):\n solution = Solution(5, 2)\n with self.assertRaises(ValueError):\n solution.add_object(0, 3, -1) \n\n def test_add_solution_in_a_full_box_should_return_false(self):\n solution = Solution(5, 2)\n solution.add_object(0, 5, 0)\n self.assertFalse(solution.add_object(0, 3, 0))\n\n def test_add_solution_in_a_box_that_can_hold_it_should_return_true(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n solution.add_object(0, 3, 0)\n self.assertTrue(solution.add_object(0, 2, 0))\n\n def test_boxes_dict_should_have_one_box_with_one_object(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n solution.add_object(0, 3, 0)\n self.assertEqual(1, len(solution.boxes))\n self.assertEqual(1, len(solution.boxes[0]))\n self.assertEqual(0, solution.boxes[0][0])\n \n def test_boxes_dict_should_have_two_boxes_with_one_object_each(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n solution.boxes[1] = []\n solution.add_object(1, 3, 0)\n solution.add_object(0, 3, 1)\n self.assertEqual(2, len(solution.boxes))\n self.assertEqual(1, len(solution.boxes[0]))\n self.assertEqual(1, solution.boxes[0][0]) \n self.assertEqual(1, len(solution.boxes[1]))\n self.assertEqual(0, solution.boxes[1][0]) \n \n def test_boxes_dict_should_have_one_box_with_two_objects(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n solution.add_object(0, 3, 0)\n solution.add_object(1, 2, 0)\n self.assertEqual(1, len(solution.boxes))\n self.assertEqual(0, solution.boxes[0][0])\n self.assertEqual(2, len(solution.boxes[0]))\n self.assertEqual(1, solution.boxes[0][1])\n\n def test_weights_list_should_have_one_element_with_weight_3(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n solution.add_object(0, 3, 0)\n self.assertEqual(3, solution.weights[0])\n \n def test_weights_list_should_have_two_elements_with_weights_3_and_2(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n solution.boxes[1] = []\n solution.add_object(0, 3, 0)\n solution.add_object(1, 2, 0)\n self.assertEqual(3, solution.weights[0])\n self.assertEqual(2, solution.weights[1])\n \n def test_create_box_with_an_empty_solution_should_return_box_0(self):\n solution = Solution(5, 2)\n self.assertEqual(0, solution.create_box())\n self.assertEqual(1, len(solution.boxes))\n\n def test_create_box_with_an_solution__with_one_box_should_return_box_1(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n self.assertEqual(1, solution.create_box())\n self.assertEqual(2, len(solution.boxes))\n\n def test_amount_space_available_box_with_full_box_should_return_0(self):\n solution = Solution(5, 2)\n solution.boxes[0] = [0]\n solution.weights[0] = 5\n self.assertEqual(0, solution.amount_space_available_box(0))\n\n def test_amount_space_available_box_with_empty_box_should_return_box_capacity(self):\n solution = Solution(5, 2)\n solution.boxes[0] = []\n self.assertEqual(5, solution.amount_space_available_box(0))\n\n def test_amount_space_available_box_with_box_space_equal_3_should_return_3(self):\n solution = Solution(5, 2)\n solution.boxes[0] = [0]\n solution.weights[0] = 2\n self.assertEqual(3, solution.amount_space_available_box(0))\n\n \nclass FirstFitConstructorTest(unittest.TestCase):\n def test_find_box_that_fits_5_with_empty_solution_should_return_box_0(self):\n instance = Instance(\"inst_name\", 20, [5, 10, 15, 20], 3)\n solution = Solution(10, 4)\n constructor = FirstFitConstructor(instance)\n self.assertEqual(0, constructor._find_box_that_fits(5, solution))\n \n def test_find_box_that_fits_20_with_one_full_box_should_return_box_1(self):\n instance = Instance(\"inst_name\", 20, [5, 10, 15, 20], 3)\n solution = Solution(10, 4)\n solution.boxes[0] = [0, 1, 2]\n solution.weights = [5, 10, 15, 20]\n constructor = FirstFitConstructor(instance)\n self.assertEqual(1, constructor._find_box_that_fits(5, solution))\n \n def test_generate_solution_valid_should_return_a_solution(self):\n instance = Instance(\"inst_name\", 10, [6, 10, 4, 5], 3)\n constructor = FirstFitConstructor(instance)\n solution = constructor.generate_solution()\n self.assertIsInstance(solution, Solution)\n self.assertEqual(3, len(solution.boxes))\n self.assertEqual([0, 2], solution.boxes[0])\n self.assertEqual([1], solution.boxes[1])\n self.assertEqual([3], solution.boxes[2])\n \n def test_generate_solution_with_weight_bigger_than_box_capacity_should_raise_error(self):\n instance = Instance(\"inst_name\", 5, [6, 10, 4, 5], 3)\n constructor = FirstFitConstructor(instance)\n with self.assertRaises(ValueError):\n solution = constructor.generate_solution()\n\n\nclass DescendingFirstFitConstructorTest(unittest.TestCase):\n def test_get_objects_in_order_should_return_ordered_list(self):\n instance = Instance(\"inst_name\", 10, [6, 10, 4, 5], 3)\n constructor = DescendingFirstFitConstructor(instance)\n self.assertEqual([8, 5, 3, 1], constructor._get_objects_in_order([8, 1, 5, 3]))\n\n\nclass BestFitConstructorTest(unittest.TestCase):\n def test_find_box_that_fits_5_with_empty_solution_should_return_box_0(self):\n instance = Instance(\"inst_name\", 20, [5, 10, 15, 20], 3)\n solution = Solution(20, 4)\n constructor = BestFitConstructor(instance)\n self.assertEqual(0, constructor._find_box_that_fits(5, solution))\n \n def test_find_box_that_fits_5_with_one_full_box_should_return_box_1(self):\n instance = Instance(\"inst_name\", 20, [5, 10, 15, 20], 3)\n solution = Solution(20, 4)\n solution.boxes[0] = [3]\n solution.weights = [5, 10, 15, 20]\n constructor = BestFitConstructor(instance)\n self.assertEqual(1, constructor._find_box_that_fits(5, solution))\n \n def test_find_box_that_fits_5_with_two_boxes_that_fits_should_return_fullst_box(self):\n instance = Instance(\"inst_name\", 20, [5, 10, 15, 20], 3)\n solution = Solution(20, 4)\n solution.boxes[0] = [3]\n solution.boxes[1] = [0]\n solution.boxes[2] = [2]\n solution.weights = [5, 10, 15, 20]\n constructor = BestFitConstructor(instance)\n self.assertEqual(2, constructor._find_box_that_fits(5, solution))\n \n def test_generate_solution_valid_should_return_a_solution(self):\n instance = Instance(\"inst_name\", 10, [5, 6, 4, 5], 2)\n constructor = BestFitConstructor(instance)\n solution = constructor.generate_solution()\n self.assertIsInstance(solution, Solution)\n self.assertEqual(2, len(solution.boxes))\n self.assertEqual([0, 3], solution.boxes[0])\n self.assertEqual([1, 2], solution.boxes[1])\n \n def test_generate_solution_with_weight_bigger_than_box_capacity_should_raise_error(self):\n instance = Instance(\"inst_name\", 5, [6, 10, 4, 5], 3)\n constructor = BestFitConstructor(instance)\n with self.assertRaises(ValueError):\n solution = constructor.generate_solution() \n\n\nclass DescendingBestFitConstructorTest(unittest.TestCase):\n def test_get_objects_in_order_should_return_ordered_list(self):\n instance = Instance(\"inst_name\", 10, [6, 10, 4, 5], 3)\n constructor = DescendingBestFitConstructor(instance)\n self.assertEqual([8, 5, 3, 1], constructor._get_objects_in_order([8, 1, 5, 3]))\n", "id": "1355108", "language": "Python", "matching_score": 3.598768711090088, "max_stars_count": 1, "path": "tests/test_binp.py" }, { "content": "class ORLibraryInstanceReader(object):\n \"\"\"Class that knows how to load the ORLibrary instances for the 1-D bin\n packing instances\"\"\"\n\n @classmethod\n def get_instances(cls, file):\n \"\"\"Returns a list of Instance objects with the instances found in file param\"\"\"\n file_data = cls._read_file(file)\n number_of_instances = cls._get_number_of_instances(file_data)\n \n instances = []\n idx = 1\n for x in range(0, number_of_instances):\n instance_name = file_data[idx].strip()\n bin_cap, n_itens, best_sol = cls._get_instance_definition(file_data[idx+1])\n \n objects = []\n for line in file_data[idx+2:idx+2+n_itens]:\n objects.append(float(line))\n \n inst = Instance(instance_name, bin_cap, objects, best_sol)\n instances.append(inst)\n idx = idx+n_itens+2\n \n return instances\n\n @classmethod\n def _get_instance_definition(cls, line):\n \"\"\"It returns the bin capacity, number of itens in instance and the number of bins used \n in the best known solution\"\"\"\n bin_capacity, number_of_itens, bins_in_best_sol = line.split()\n return float(bin_capacity), int(number_of_itens), int(bins_in_best_sol)\n\n @classmethod\n def _get_number_of_instances(cls, data):\n \"\"\"Returns the number of instances in the data file\"\"\"\n return int(data[0])\n\n @classmethod\n def _read_file(cls, file):\n \"\"\"Reads a file, returning a list with its contents\"\"\"\n input_file = open(file, 'r')\n data = list(input_file)\n input_file.close()\n return data\n\n\nclass Instance(object):\n \"\"\"Class that represents an 1-D bin packing problem instance\"\"\"\n \n def __init__(self, instance_name, bin_cap, objects, best_sol):\n self.instance_name = instance_name\n self.bin_capacity = bin_cap\n self.objects = objects\n self.best_known_sol = best_sol\n\n\nclass Constructor(object):\n \"\"\"Base class of constructive algorithms\"\"\"\n \n def __init__(self, instance):\n self.instance = instance\n\n def generate_solution(self):\n \"\"\"Generates a new solution for a given instance\"\"\"\n solution = Solution(self.instance.bin_capacity, len(self.instance.objects))\n for obj, weight in enumerate(self._get_objects_in_order(self.instance.objects)):\n box_number = self._find_box_that_fits(weight, solution)\n isAdded = solution.add_object(obj, weight, box_number)\n if not isAdded:\n raise ValueError(\"Impossible to add object to box.\")\n \n return solution\n\n def _get_objects_in_order(self, objects):\n \"\"\"Return the objects in the order that they need to be processed by\n the algorithm.\"\"\"\n return objects\n\n\nclass FirstFitConstructor(Constructor):\n \"\"\"Constructor algorithm that inserts each object in the first box that\n can hold it.\"\"\"\n \n def _find_box_that_fits(self, weight, solution):\n \"\"\"Return a box that have enough space to hold the given weight. \n If none, it opens a new box.\"\"\"\n for box_number, box in enumerate(solution.boxes):\n if solution.has_space_box(box_number, weight):\n return box_number\n \n return solution.create_box()\n\n\nclass DescendingFirstFitConstructor(FirstFitConstructor):\n \"\"\"Constructor algorithm based on First Fit. It sorts the objects descending\n by its weight prior to processing them.\"\"\"\n \n def _get_objects_in_order(self, objects):\n return sorted(objects, reverse=True)\n\n\nclass BestFitConstructor(Constructor):\n \"\"\"Constructor algorithm that searches for boxes that can store the object, and puts it\n in the fullest one.\"\"\"\n \n def _find_box_that_fits(self, weight, solution):\n \"\"\"Return a box that have enough space to hold the given weight. \n If none, it opens a new box.\"\"\"\n boxes_that_fit = {}\n for box_number, box in enumerate(solution.boxes):\n space_available = solution.amount_space_available_box(box_number)\n if space_available >= weight:\n if space_available not in boxes_that_fit:\n boxes_that_fit[space_available] = box_number\n\n if(len(boxes_that_fit) == 0):\n return solution.create_box()\n else:\n dict_key = sorted(boxes_that_fit.keys())[0]\n return boxes_that_fit[dict_key]\n\n\nclass DescendingBestFitConstructor(BestFitConstructor):\n \"\"\"Constructor algorithm based on Best Fit. It sorts the objects descending\n by its weight prior to processing them.\"\"\"\n \n def _get_objects_in_order(self, objects):\n return sorted(objects, reverse=True)\n\n\nclass Solution(object):\n \"\"\"A solution for the bin packing problem.\"\"\"\n \n def __init__(self, box_size, size=1):\n if size <= 0:\n raise ValueError(\"The solution size should be greater than zero.\")\n \n self.weights = [0] * size\n self.box_size = box_size\n self.boxes = {}\n\n def add_object(self, obj, weight, box):\n \"\"\"Adds an object obj with a weight in a given box, if possible.\n Returns true if added, false otherwise.\"\"\"\n self._validate(obj, weight, box)\n \n if self.has_space_box(box, weight):\n if not box in self.boxes:\n return False\n \n self.boxes[box].append(obj)\n self.weights[obj] = weight\n return True\n return False\n \n def create_box(self):\n \"\"\"Create a new box, returning its box number\"\"\"\n next_box_number = len(self.boxes)\n self.boxes[next_box_number] = []\n return next_box_number\n\n def has_space_box(self, box, weight):\n \"\"\"True if the box can hold a given weight\"\"\"\n free_weight = self.amount_space_available_box(box)\n if free_weight >= weight:\n return True\n return False\n \n def amount_space_available_box(self, box):\n \"\"\"Returns the amount of space available in a given box.\"\"\"\n used_weight = 0\n \n if box in self.boxes:\n for obj in self.boxes[box]:\n used_weight = used_weight + self.weights[obj]\n \n free_weight = self.box_size - used_weight\n return free_weight\n\n def _validate(self, obj, weight, box):\n \"\"\"Validate the object, weight and box parameters\"\"\"\n if obj < 0 or weight <=0 or box < 0:\n raise ValueError(\"Invalid data passed as argument.\")\n", "id": "11946151", "language": "Python", "matching_score": 2.9607691764831543, "max_stars_count": 1, "path": "binp.py" }, { "content": "# Script to solve the 1-D bin packing instances from OR-Library \n# utilizing the algorithms implemented in binp.py file.\nfrom binp import *\nimport os\nimport functools\nimport time\n\nINSTANCE_PATH = os.path.join(os.curdir, \"instances\")\n\nINSTANCE_FILES = [\"binpack1.txt\", \"binpack2.txt\", \"binpack3.txt\"\n ,\"binpack4.txt\", \"binpack5.txt\", \"binpack6.txt\"\n ,\"binpack7.txt\", \"binpack8.txt\"]\n\nCONSTRUCTOR_ALGORITHMS = [\"FirstFitConstructor\", \"DescendingFirstFitConstructor\"\n ,\"BestFitConstructor\", \"DescendingBestFitConstructor\"]\n\ndef timed(f):\n \"\"\"This function is used as a decorator to measure time spent by each algorithm\"\"\"\n @functools.wraps(f)\n def wrapper(*args, **kwds):\n start = time.clock()\n result = f(*args, **kwds)\n end = time.clock()\n elapsed = \"%.2f\" % (end - start)\n return elapsed, result\n return wrapper\n\ndef execute_exp():\n \"\"\"Execution of the experiment reading the OR-Library instances and running them with\n all implemented algorithms\"\"\"\n for instance_file in INSTANCE_FILES:\n instances = ORLibraryInstanceReader.get_instances(os.path.join(INSTANCE_PATH, instance_file))\n for constructor_name in CONSTRUCTOR_ALGORITHMS:\n run_algorithm(constructor_name, instances)\n\ndef run_algorithm(algorithm, instances):\n \"\"\"Executes the algorithm for the given instances. The algorithm parameter should be the class \n name that implements the algorithm.\"\"\"\n print(algorithm)\n for instance in instances:\n constructor = globals()[algorithm](instance)\n time_elapsed, solution = solve_instance(constructor)\n print(generate_result_string(instance, solution, time_elapsed))\n\n@timed\ndef solve_instance(constructor):\n \"\"\"It simply calls the generate_solution() method. Defined in a function to be timed alone.\"\"\"\n return constructor.generate_solution()\n\ndef generate_result_string(instance, solution, time_elapsed):\n \"\"\"Returns a string representing the results of a algorithm.\n It is in format: instance_name boxes_in_solution boxes_in_best_known_solution time_spent\"\"\"\n return \"{0}\\t{1}\\t{2}\\t{3}\".format(instance.instance_name, len(solution.boxes), \n instance.best_known_sol, time_elapsed)\n\nif __name__ == \"__main__\":\n execute_exp()", "id": "6924958", "language": "Python", "matching_score": 0.8495720028877258, "max_stars_count": 1, "path": "run_instances.py" }, { "content": "from optparse import OptionParser\nfrom languages import *\n\n\ndef main():\n\tpath, language = parse_command_line()\n\tcounter = get_counter_for_language(language)\n\tresult = counter.get_packages_size(path)\n\tprint_result(result)\n\n\ndef parse_command_line():\n\t\"\"\"Do the command line parsing, returning (the language, base path)\"\"\"\n\n\tparser = OptionParser(usage=\"Usage: %prog [options] base_path\")\n\tparser.add_option(\"-l\", \"--language\", help=\"Source code language\")\n\n\t(options, args) = parser.parse_args()\n\terror_message = check_parse_errors(options, args)\n\n\tif error_message != None:\n\t\tparser.error(error_message)\n\n\treturn (args[0], options.language.lower())\n\n\ndef check_parse_errors(options, args):\n\t\"\"\"Do validations on command line options, returning error messages, if any.\"\"\"\n\n\tif not options.language:\n\t\treturn \"language parameter not informed.\"\n\telif not args:\n\t\treturn \"base path not informed.\"\n\telse:\n\t\treturn None\n\n\ndef print_result(result):\n\t\"\"\"Prints to stdout the dictionary of package counts in format\n\t[package_name][count]\"\"\"\n\n\tfor pack, count in result.items():\n\t\tprint(\"{:s}\\t{:d}\".format(pack, count))\n\n\tprint(\"\\nAverage:\\t{:.2f}\".format(calculate_average(result)))\n\n\ndef calculate_average(result):\n\t\"\"\"Calculates the average package size\"\"\"\n\n\tvals = result.values()\n\tif len(vals) == 0:\n\t\traise ValueError(\"Cannot calculate average on empty dictionary.\")\n\n\treturn sum(vals)/float(len(vals))\n\n\nif __name__ == '__main__':\n\tmain()", "id": "2850525", "language": "Python", "matching_score": 1.45124351978302, "max_stars_count": 0, "path": "packageitems.py" }, { "content": "import unittest\nfrom packageitems import *\n\n\nclass PackageItemsTestCase(unittest.TestCase):\n\n def test_calculate_average_with_empty_argument_should_raise_exception(self):\n dic = {}\n with self.assertRaises(ValueError):\n \tcalculate_average(dic)\n\n def test_calculate_average_with_valid_argument_should_return_1(self):\n dic = {'one.package' : 1}\n \tresult = calculate_average(dic)\n \tself.assertEquals(1, result)\n\n def test_calculate_average_with_valid_argument_should_return_4_5(self):\n dic = {'one.package' : 7, 'other.package' : 2}\n \tresult = calculate_average(dic)\n \tself.assertEquals(4.5, result)", "id": "5627411", "language": "Python", "matching_score": 1.9119534492492676, "max_stars_count": 0, "path": "tests/test_packageitems.py" }, { "content": "import unittest\nimport os\nfrom languages import *\n\n\nTEST_DIR = os.path.join(os.curdir, \"tests\")\nPYTHON_DATA_DIR = os.path.join(TEST_DIR, \"data/python\")\nJAVA_DATA_DIR = os.path.join(TEST_DIR, \"data/java\")\n\nclass LanguagesTestCase(unittest.TestCase):\n\n def test_get_counter_for_language_with_java_param_should_return_java_counter(self):\n ret = get_counter_for_language(\"java\")\n self.assertIsInstance(ret, Java)\n\n def test_get_counter_for_language_with_unknown_param_should_raise_exception(self):\n with self.assertRaises(ValueError):\n get_counter_for_language(\"UnknownLanguage\")\n\n\nclass JavaLanguageTestCase(unittest.TestCase):\n def setUp(self):\n self.counter = Java()\n\n def test_java_process_data_with_only_default_package_should_return_one_package(self):\n data = [\"//no package\", \"class Foo() {}\"]\n self.counter._process_data(data)\n self.assertEquals(1, len(self.counter.package_stats))\n self.assertEquals(1, self.counter.package_stats[self.counter.DEFAULT_PACKAGE])\n\n def test_java_process_data_twice_with_only_default_package_should_return_one_package(self):\n data = [\"//no package\", \"class Foo() {}\"]\n self.counter._process_data(data)\n self.counter._process_data(data)\n self.assertEquals(1, len(self.counter.package_stats))\n self.assertEquals(2, self.counter.package_stats[self.counter.DEFAULT_PACKAGE])\n\n def test_java_process_data_with_test_package_should_return_one_package(self):\n data = [\"//comment\" ,\"package a.test.package;\", \"class Foo() {}\"]\n self.counter._process_data(data)\n self.assertEquals(1, len(self.counter.package_stats))\n self.assertEquals(1, self.counter.package_stats['a.test.package'])\n\n def test_java_process_data_with_two_test_packages_should_return_two_packages(self):\n data = [\"//comment\" ,\"package a.test.package;\", \"class Foo() {}\"]\n another_data = [\"package another.test;\"]\n self.counter._process_data(data)\n self.counter._process_data(another_data)\n self.assertEquals(2, len(self.counter.package_stats))\n self.assertEquals(1, self.counter.package_stats['a.test.package'])\n self.assertEquals(1, self.counter.package_stats['another.test'])\n\n def test_java_get_package_name_with_valid_data_should_return_package_name(self):\n data = \"package a.test.package;\"\n self.assertEquals(\"a.test.package\", self.counter._get_package_name(data))\n\n def test_java_get_package_name_with_valid_data_with_linebreaks_should_return_package_name(self):\n data = \"//comment\\n int foo = 3; \\npackage a.test.package;\"\n self.assertEquals(\"a.test.package\", self.counter._get_package_name(data))\n\n def test_java_get_package_name_without_package_should_return_default_package(self):\n data = \"//comment\\n int foo = 3; \\na.test.package;\"\n self.assertEquals(self.counter.DEFAULT_PACKAGE, self.counter._get_package_name(data))\n\n def test_java_get_files_in_test_dir_should_return_list_with_1_element(self):\n expected = [os.path.join('.', 'tests', 'data', 'java', 'javaclass.java')]\n self.assertEquals(expected, self.counter._get_files(TEST_DIR))\n\n def test_java_get_files_in_python_dir_should_return_empty_list(self):\n self.assertEquals([], self.counter._get_files(PYTHON_DATA_DIR))\n\n def test_java_should_count_with_py_should_return_false(self):\n self.assertEquals(False, self.counter._should_count(\".py\"))\n\n def test_java_should_count_with_caps_py_should_return_false(self):\n self.assertEquals(False, self.counter._should_count(\".PY\"))\n\n def test_java_should_count_with_java_should_return_true(self):\n self.assertEquals(True, self.counter._should_count(\".java\"))\n\n def test_java_should_count_with_caps_java_should_return_true(self):\n self.assertEquals(True, self.counter._should_count(\".JAVA\"))\n\n def test_java_get_packages_size_in_python_dir_should_return_0_elements(self):\n self.assertEquals(0, len(self.counter.get_packages_size(PYTHON_DATA_DIR)))\n\n def test_java_get_packages_size_in_test_dir_should_return_1_element(self):\n ret = self.counter.get_packages_size(TEST_DIR)\n self.assertEquals(1, len(ret))\n self.assertEquals(1, ret['a.sample.testpackage'])\n\n \n", "id": "7858827", "language": "Python", "matching_score": 3.3723697662353516, "max_stars_count": 0, "path": "tests/test_languages.py" }, { "content": "\"\"\" This file defines the programming languages supported by package-items,\nand handles the module counting for each of them.\n\"\"\"\n\nimport os\nimport re\n\ndef get_counter_for_language(language):\n\t\"\"\"Returns the appropriated counter for a given language\"\"\"\n\n\tif(language != \"java\"): \n\t\traise ValueError(\"Unknown language name informed: {:s}\".format(language))\n\n\treturn Java()\n\n\nclass Java(object):\n\t\"\"\"Responsible for counting modules on Java projects\"\"\"\n\t\n\tFILE_EXTENSIONS = (\".java\",)\n\tDEFAULT_PACKAGE = \"DEFAULT\"\n\n\tdef __init__(self):\n\t\tself.package_stats = {}\n\t\tself.regexp = re.compile(r\"package[\\s](?P<package_name>[a-zA-Z0-9._]+)[;]\")\n\n\n\tdef get_packages_size(self, base_path):\n\t\t\"\"\"Returns a dictionary with each package and its items count,\n\t\tlooking at base_path and its subdirectories\"\"\"\n\n\t\tfilenames = self._get_files(base_path)\n\n\t\tfor filename in filenames:\n\t\t\tdata = self._read_file(filename)\n\t\t\tself._process_data(data)\n\n\t\treturn self.package_stats\n\n\n\tdef _read_file(self, path):\n\t\t\"\"\"Read a file, returning a list with its contents\"\"\"\n\n\t\tinput_file = open(path, 'r')\n\t\tdata = list(input_file)\n\t\tinput_file.close()\n\t\treturn data\n\n\n\tdef _process_data(self, data):\n\t\t\"\"\"Process a list of data, storing the package name for the current data\n\t\tin self.package_stats \"\"\"\n\t\tstr_data = ' '.join(data)\n\t\tpackage_name = self._get_package_name(str_data)\n\t\tcounter = self.package_stats.get(package_name, 0)\n\t\tcounter += 1\n\t\tself.package_stats[package_name] = counter\n\n\n\tdef _get_package_name(self, data):\n\t\t\"\"\"Returns a package name for a given file (data) using a regexp. \n\t\tIf none is found, returns DEFAULT_PACKAGE\"\"\"\n\n\t\tresult = self.regexp.search(data)\n\t\tif result == None:\n\t\t\treturn self.DEFAULT_PACKAGE\n\t\telse:\n\t\t\treturn result.group('package_name')\n\n\n\tdef _get_files(self, base_path):\n\t\t\"\"\"Returns a list of filenames (with path) that contains one of FILE_EXTENSIONS\"\"\"\n\n\t\treturn_files = []\n\t\tfor root, dirs, files in os.walk(base_path, followlinks=False):\n\t\t\tselected_files = \\\n\t\t\t\t[filename for filename in files if self._should_count(filename)]\n\n\t\t\tfiles_with_path = \\\n\t\t\t\t[os.path.join(root, filename) for filename in selected_files]\n\n\t\t\treturn_files += files_with_path\n\n\t\treturn return_files\n\n\n\tdef _should_count(self, filename):\n\t\t\"\"\"Tells if the current file should be counted based on FILE_EXTENSIONS\"\"\"\n\n\t\treturn filename.lower().endswith(self.FILE_EXTENSIONS)\n", "id": "3602576", "language": "Python", "matching_score": 3.3396687507629395, "max_stars_count": 0, "path": "languages.py" } ]
2.960769
harveyslash
[ { "content": "# from .PatchMatchOrig import PatchMatch as PatchMatchOrig\nfrom .PatchMatchCuda import PatchMatch as PatchMatchOrig\n\n", "id": "104784", "language": "Python", "matching_score": 0.8936952948570251, "max_stars_count": 4, "path": "src/PatchMatch/__init__.py" }, { "content": "\"\"\"\nThe Patchmatch Algorithm. The actual algorithm is a nearly\nline to line port of the original c++ version.\nThe distance calculation is different to leverage numpy's vectorized\noperations.\n\nThis version uses 4 images instead of 2.\nYou can supply the same image twice to use patchmatch between 2 images.\n\n\"\"\"\nimport os \npackage_directory = os.path.dirname(os.path.abspath(__file__))\n\nimport numpy as np\nimport cv2\nimport pycuda.autoinit\nimport pycuda.driver as drv\nimport numpy\nimport pycuda.autoinit\nimport pycuda.gpuarray as gpuarray\nimport numpy as np\nfrom pycuda.compiler import SourceModule\nimport cv2\n\nfrom PIL import Image\n\nclass PatchMatch(object):\n def __init__(self, a, aa, b, bb, patch_size):\n \"\"\"\n Initialize Patchmatch Object.\n This method also randomizes the nnf , which will eventually\n be optimized.\n \"\"\"\n assert a.shape == b.shape == aa.shape == bb.shape, \"Dimensions were unequal for patch-matching input\"\n print(\"called\")\n self.A = a.copy(order='C')\n self.B = b.copy(order='C')\n self.AA = aa.copy(order='C')\n self.BB = bb.copy(order='C')\n self.patch_size = patch_size\n self.nnf = np.zeros(shape=(self.A.shape[0], self.A.shape[1],2)).astype(np.int32) # the nearest neighbour field\n self.nnd = np.random.rand(self.A.shape[0], self.A.shape[1]).astype(np.float32) # the distance map for the nnf\n self.initialise_nnf()\n\n def initialise_nnf(self):\n \"\"\"\n Set up a random NNF\n Then calculate the distances to fill up the NND\n :return:\n \"\"\"\n self.nnf = self.nnf.transpose((2, 0, 1)) \n self.nnf[0] = np.random.randint(self.B.shape[1], size=(self.A.shape[0], self.A.shape[1]))\n self.nnf[1] = np.random.randint(self.B.shape[0], size=(self.A.shape[0], self.A.shape[1]))\n self.nnf = self.nnf.transpose((1, 2, 0)) \n self.nnf = self.nnf.copy(\"C\")\n \n def reconstruct_image(self, img_a):\n \"\"\"\n Reconstruct image using the NNF and img_a.\n :param img_a: the patches to reconstruct from\n :return: reconstructed image\n \"\"\"\n final_img = np.zeros_like(img_a)\n size = self.nnf.shape[0]\n scale = img_a.shape[0] // self.nnf.shape[0]\n for i in range(size):\n for j in range(size):\n x, y = self.nnf[i, j]\n if final_img[scale * i:scale * (i + 1), scale * j:scale * (j + 1)].shape == img_a[scale * y:scale * (y + 1), scale * x:scale * (x + 1)].shape:\n final_img[scale * i:scale * (i + 1), scale * j:scale * (j + 1)] = img_a[scale * y:scale * (y + 1), scale * x:scale * (x + 1)]\n return final_img\n \n def upsample_nnf(self, size):\n \"\"\"\n Upsample NNF based on size. It uses nearest neighbour interpolation\n :param size: INT size to upsample to.\n\n :return: upsampled NNF\n \"\"\"\n\n temp = np.zeros((self.nnf.shape[0], self.nnf.shape[1], 3))\n\n for y in range(self.nnf.shape[0]):\n for x in range(self.nnf.shape[1]):\n temp[y][x] = [self.nnf[y][x][0], self.nnf[y][x][1], 0]\n\n img = np.zeros(shape=(size, size, 2), dtype=np.int)\n small_size = self.nnf.shape[0]\n aw_ratio = ((size) // small_size)\n ah_ratio = ((size) // small_size)\n\n temp = cv2.resize(temp, None, fx=aw_ratio, fy=aw_ratio, interpolation=cv2.INTER_NEAREST)\n\n for i in range(temp.shape[0]):\n for j in range(temp.shape[1]):\n pos = temp[i, j]\n img[i, j] = pos[0] * aw_ratio, pos[1] * ah_ratio\n\n return img\n \n \n def reconstruct_avg(self, img, patch_size=5):\n \"\"\"\n Reconstruct image using average voting.\n :param img: the image to reconstruct from. Numpy array of dim H*W*3\n :param patch_size: the patch size to use\n\n :return: reconstructed image\n \"\"\"\n\n final = np.zeros_like(img)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n\n dx0 = dy0 = patch_size // 2\n dx1 = dy1 = patch_size // 2 + 1\n dx0 = min(j, dx0)\n dx1 = min(img.shape[0] - j, dx1)\n dy0 = min(i, dy0)\n dy1 = min(img.shape[1] - i, dy1)\n\n patch = self.nnf[i - dy0:i + dy1, j - dx0:j + dx1]\n\n lookups = np.zeros(shape=(patch.shape[0], patch.shape[1], img.shape[2]), dtype=np.float32)\n\n for ay in range(patch.shape[0]):\n for ax in range(patch.shape[1]):\n x, y = patch[ay, ax]\n lookups[ay, ax] = img[y, x]\n\n if lookups.size > 0:\n value = np.mean(lookups, axis=(0, 1))\n final[i, j] = value\n\n return final\n \n \n def visualize(self):\n \"\"\"\n Get the NNF visualisation\n :return: The RGB Matrix of the NNF\n \"\"\"\n nnf = self.nnf\n\n img = np.zeros((nnf.shape[0], nnf.shape[1], 3), dtype=np.uint8)\n\n for i in range(nnf.shape[0]):\n for j in range(nnf.shape[1]):\n pos = nnf[i, j]\n img[i, j, 0] = int(255 * (pos[0] / self.B.shape[1]))\n img[i, j, 2] = int(255 * (pos[1] / self.B.shape[0]))\n\n return img\n\n\n\n\n def propagate(self, iters=2, rand_search_radius=500):\n \"\"\"\n Optimize the NNF using PatchMatch Algorithm\n :param iters: number of iterations\n :param rand_search_radius: max radius to use in random search\n :return:\n \"\"\"\n mod = SourceModule(open(os.path.join(package_directory,\"patchmatch.cu\")).read(),no_extern_c=True)\n patchmatch = mod.get_function(\"patch_match\")\n \n rows = self.A.shape[0]\n cols = self.A.shape[1]\n channels = np.int32(self.A.shape[2])\n nnf_t = np.zeros(shape=(rows,cols),dtype=np.uint32)\n threads = 20\n \n def get_blocks_for_dim(dim,blocks):\n #if dim % blocks ==0:\n # return dim//blocks\n return dim// blocks +1 \n patchmatch(\n drv.In(self.A),\n drv.In(self.AA),\n drv.In(self.B),\n drv.In(self.BB),\n drv.InOut(self.nnf),\n drv.InOut(nnf_t),\n drv.InOut(self.nnd),\n np.int32(rows),\n np.int32(cols),\n channels,\n np.int32(self.patch_size),\n np.int32(iters),\n np.int32(8),\n np.int32(rand_search_radius),\n block=(threads,threads,1),\n grid=(get_blocks_for_dim(rows,threads),\n get_blocks_for_dim(cols,threads)))\n\n\n\n", "id": "11068587", "language": "Python", "matching_score": 2.281364679336548, "max_stars_count": 4, "path": "src/PatchMatch/PatchMatchCuda.py" }, { "content": "from torchvision import transforms\nfrom PIL import Image, ImageOps\nfrom torch.autograd import Variable\nfrom torchvision.utils import make_grid\nimport numpy as np\n\n\ndef load_image(img_path, to_array=False, to_variable=False):\n img = Image.open(img_path).convert(\"RGB\")\n s = 224*2\n img = ImageOps.fit(img, (s,s), Image.ANTIALIAS)\n\n scale = transforms.Scale((s,s))\n tensorize = transforms.ToTensor()\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n loader = transforms.Compose([\n scale, tensorize, normalize\n ])\n img_tensor = loader(img)\n\n if to_array:\n img_tensor = img_tensor.unsqueeze(0)\n if to_variable:\n img_tensor = Variable(img_tensor)\n\n return img_tensor\n\n\ndef deprocess_image(tensor, is_th_variable=False, is_th_tensor=False, un_normalize=True):\n img = tensor\n if is_th_variable:\n img = tensor.data.numpy()\n if is_th_tensor:\n img = tensor.numpy()\n if un_normalize:\n img[:, :, 0] = (img[:, :, 0] * .228 + .485)\n img[:, :, 1] = (img[:, :, 1] * .224 + .456)\n img[:, :, 2] = (img[:, :, 2] * .225 + .406)\n return img\n\n\ndef get_viz_tensor(activations_tensor):\n \"\"\"\n :param activations_tensor: pytorch variable of shape C * H * W\n :return: a numpy array of H * W * 3\n \"\"\"\n reshaped_tensor = activations_tensor.contiguous().view(-1, 1, activations_tensor.size()[1], activations_tensor.size()[2])\n grid = make_grid(reshaped_tensor).numpy()\n grid = np.transpose(grid, (1, 2, 0))\n return grid\n\n\n", "id": "8425674", "language": "Python", "matching_score": 1.2612700462341309, "max_stars_count": 4, "path": "src/Utils.py" } ]
1.26127
AnshGaikwad
[ { "content": "a = 2\nb=3\nprint(a+b)", "id": "11163685", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "server/compiler/codes/8b5c3001-3685-4287-a539-2310d3f5ee7f.py" } ]
0
kiwiidb
[ { "content": "#Folder where the images are saved as the bot receives them\nuploaded_image_folder = \"./uploaded_images\"\n#Folder where the images are saved as the bot receives from satoshis place\nsatoshis_image_folder = \"./satoshis_images\"\n#Folder where the images are saved as the bot resizes them \n# and converts them to the right colors\nconverted_image_folder = \"./converted_images\"\n#Image size as uploaded to satoshis.place\nimage_size = 100 # 100 x 100 pixels\n#Twitter mention (or hashtag or keyword ..) to respond to\ntwitter_handle = '@McmemeLightning'\n#Frequency of the updates of the canvas in seconds\nsleep_period = 60*60*2", "id": "11374554", "language": "Python", "matching_score": 1.1298246383666992, "max_stars_count": 4, "path": "config.py" }, { "content": "import base64\nimport tweepy\nfrom time import sleep\nfrom os import path\n\nfrom secrets import consumer_key, consumer_secret, access_token, access_token_secret\nfrom satoshis_place import SatPlaceSocket\nimport config\n\ndef download_latest_canvas(imagename = path.join(config.satoshis_image_folder, 'latest_canvas.png')):\n sps = SatPlaceSocket()\n sps.emitLatestPixels()\n sps.wait(seconds = 1)\n\n img = sps.latestImage\n imgdata = base64.b64decode(img)\n filename = imagename # I assume you have a way of picking unique filenames\n with open(filename, 'wb') as f:\n f.write(imgdata)\n\ndef initialize_api():\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth)\n return api\n\ndef update_twitter_status(api):\n print(\"Posting canvas to twitter\")\n download_latest_canvas()\n api.update_with_media(path.join(config.satoshis_image_folder, \"latest_canvas.png\"))\n\nif __name__ == \"__main__\":\n api = initialize_api()\n while True:\n print(\"Starting up twitter bot\")\n update_twitter_status(api)\n sleep(config.sleep_period)\n", "id": "10002800", "language": "Python", "matching_score": 4.240970611572266, "max_stars_count": 4, "path": "twitterbot.py" }, { "content": "import wget\nimport json\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom multiprocessing import Process\n\nfrom image import convert_image\nimport config\nfrom satoshis_place import SatPlaceSocket\nfrom twitterbot import initialize_api\n\n#download image, convert image, send to satoshis place, answer tweet id with invoice\ndef handleTweet(imgurl, name, tweetid, api, startX, startY, satPlaceSocket):\n print(\"startX: \", startX)\n print(\"startY: \", startY)\n filename = wget.download(imgurl, out=config.uploaded_image_folder)\n cj = convert_image(filename, (config.image_size, config.image_size), startX, startY)\n emitResult = satPlaceSocket.emitNewOrder(cj)\n if emitResult:\n satPlaceSocket.wait(seconds=5)\n try:\n invoice = satPlaceSocket.receivedInvoice\n print(invoice)\n api.update_status(\"@\" + name + \" \" + invoice['paymentRequest'], tweetid)\n except AttributeError:\n print(\"Failed to get invoice\")\n api.update_status(\"Sorry @\" + name + \", try again please!\", tweetid)\n else:\n api.update_status(\"Sorry @\" + name + \", try again please!\", tweetid)\n\n\n\n#Import credentials\nfrom secrets import consumer_key, consumer_secret, access_token, access_token_secret\n#This is a listener that calls a subprocess processing the tweet\nclass StdOutListener(StreamListener): \n \n def __init__(self):\n self.satPlaceSocket = SatPlaceSocket()\n self.api = initialize_api()\n self.coord = 1100\n def on_data(self, data):\n tweet = json.loads(data)\n try:\n imgurl = tweet['entities']['media'][0]['media_url']\n except KeyError:\n imgurl = False\n tweetid = tweet['id_str']\n name = tweet['user']['screen_name']\n if imgurl:\n p = Process(target=handleTweet, args=(imgurl,name, tweetid, self.api, int(self.coord%1000), config.image_size*int(self.coord/1000), self.satPlaceSocket))\n p.start()\n self.coord = (self.coord + config.image_size) % 1e6\n print(\"handled tweet with id\", tweetid)\n\n def on_error(self, status):\n print(status)\n \nif __name__ == '__main__':\n\n #This handles Twitter authetification and the connection to Twitter Streaming API\n l = StdOutListener()\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n print(\"Starting stream\")\n stream = Stream(auth, l)\n\n #This line filter Twitter Streams to capture data from mentions\n stream.filter(track=[config.twitter_handle])\n", "id": "11213860", "language": "Python", "matching_score": 3.0328235626220703, "max_stars_count": 4, "path": "twitter_replier.py" }, { "content": "from socketIO_client import SocketIO, BaseNamespace\nfrom socketIO_client.exceptions import ConnectionError\nimport numpy as np\nimport json\n\n\nallowed_colors = ['#ffffff', '#e4e4e4', '#888888', '#222222', '#e4b4ca', '#d4361e', '#db993e', '#8e705d',\n'#e6d84e', '#a3dc67', '#4aba38', '#7fcbd0', '#5880a8', '#3919d1', '#c27ad0', '#742671']\n\nclass SatPlaceSocket:\n\n def __init__(self, url='https://api.satoshis.place'):\n socketIO = SocketIO(url)\n socketIO.on('GET_SETTINGS_RESULT', lambda *args: self._on_get_settings_result(*args))\n socketIO.on('GET_LATEST_PIXELS_RESULT', lambda *args: self._on_get_latest_pixels_result(*args))\n socketIO.on('NEW_ORDER_RESULT', lambda *args: self._on_new_order_result(*args))\n socketIO.on('ORDER_SETTLED', lambda *args: self._on_order_settled(*args))\n self.socketIO = socketIO\n self.maxAttempts = 3\n\n def wait(self, seconds=0):\n if seconds > 0:\n self.socketIO.wait(seconds=seconds)\n else:\n self.socketIO.wait()\n\n def emitSettings(self, AttemptNr=0):\n if AttemptNr > self.maxAttempts:\n #failed\n return false\n try:\n self.socketIO.emit(\"GET_SETTINGS\")\n except ConnectionError:\n #try again\n self.emitSettings(AttemptNr=AttemptNr+1)\n \n #succes\n return True\n \n\n def emitLatestPixels(self, AttemptNr=0):\n if AttemptNr > self.maxAttempts:\n #failed\n return false\n try:\n self.socketIO.emit(\"GET_LATEST_PIXELS\")\n except ConnectionError:\n #try again\n self.emitLatestPixels(AttemptNr=AttemptNr+1)\n \n #succes\n return True\n \n\n\n\n def emitNewOrder(self, order, AttemptNr=0):\n if AttemptNr > self.maxAttempts:\n #failed\n return false\n try:\n self.socketIO.emit(\"NEW_ORDER\", order)\n except ConnectionError:\n #try again\n self.emitNewOrder(order, AttemptNr=AttemptNr+1)\n \n #succes\n return True\n \n def _on_get_settings_result(self, *args):\n self.settings = args[0]['data']\n\n #this returns base64 image\n def _on_get_latest_pixels_result(self, *args):\n self.latestImage = args[0]['data'][len('data:image/bmp;base64,'):]\n\n #this returns the invoice\n def _on_new_order_result(self, *args):\n self.receivedInvoice = args[0]['data']\n\n def _on_order_settled(self, *args):\n self.latestImage = args[0]['data']['image']\n self.latestPixelsPainted = args[0]['data']['pixelsPaintedCount']\n #self.latestSessionID = args[0]['data']['SessionID']\n\n\ndef test_satoshi(cj):\n sps = SatPlaceSocket()\n sps.emitNewOrder(cj)\n sps.wait()\n return sps", "id": "11829980", "language": "Python", "matching_score": 1.6573814153671265, "max_stars_count": 4, "path": "satoshis_place.py" }, { "content": "from PIL import Image\nfrom os import path\nfrom satoshis_place import allowed_colors\nimport numpy as np\nimport math\n\nimport config\n\ndef distance(c1, c2):\n (r1,g1,b1) = c1\n (r2,g2,b2) = c2\n return math.sqrt((r1 - r2)**2 + (g1 - g2) ** 2 + (b1 - b2) **2)\n\ndef hex_to_rgb(hex_pixel):\n hex_pixel = hex_pixel.lstrip('#')\n return tuple(int(hex_pixel[i:i+2], 16) for i in (0, 2 ,4))\n\ndef construct_satoshi_dict():\n sat_dict = dict()\n for hex_color in allowed_colors:\n sat_dict[hex_to_rgb(hex_color)] = hex_color\n return sat_dict\n\ndef closest_color(rgb_code_dictionary, point):\n colors = list(rgb_code_dictionary.keys())\n closest_colors = sorted(colors, key=lambda color: distance(color, point))\n closest_color = closest_colors[0]\n return closest_color\n\ndef image_to_allowed_color(filename, rgb_code_dictionary, size):\n img = Image.open(filename)\n img = img.resize(size)\n height, width = img.size\n pixels = img.load()\n for y in range(height):\n for x in range(width):\n rgbpixel = pixels[x,y][0:3]\n cc = closest_color(rgb_code_dictionary, rgbpixel)\n pixels[x, y] = (cc[0], cc[1], cc[2], 255)\n return img\n\ndef construct_color_json(converted_image_filename, sat_dict, startx, starty):\n img = Image.open(converted_image_filename)\n converted_image_array = np.array(img)\n result = []\n for y, row in enumerate(converted_image_array):\n for x, pixel in enumerate(row):\n color = sat_dict[tuple(pixel[0:3])]\n json_data = {}\n json_data['coordinates'] = [startx + x, starty + y]\n json_data['color'] = color\n result.append(json_data)\n return result\n\ndef convert_image(original_image_filepath, size, startx, starty):\n sd = construct_satoshi_dict()\n img = image_to_allowed_color(original_image_filepath, sd, size)\n convertedpath = path.join(config.converted_image_folder, path.basename(original_image_filepath).replace(\".jpg\", \".png\"))\n img.save(convertedpath)\n cj = construct_color_json(convertedpath, sd, startx, starty)\n return cj", "id": "5141462", "language": "Python", "matching_score": 0.32042935490608215, "max_stars_count": 4, "path": "image.py" } ]
1.657381
Blockchain-Solutions-BCS
[ { "content": "#!/usr/bin/env python3\n# Copyright (c) 2014 <NAME>\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n'''\nA script to check that the (Linux) executables produced by gitian only contain\nallowed gcc, glibc and libstdc++ version symbols. This makes sure they are\nstill compatible with the minimum supported Linux distribution versions.\n\nExample usage:\n\n find ../gitian-builder/build -type f -executable | xargs python3 contrib/devtools/symbol-check.py\n'''\nimport subprocess\nimport re\nimport sys\nimport os\n\n# Debian 6.0.9 (Squeeze) has:\n#\n# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=g%2B%2B)\n# - libc version 2.11.3 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libc6)\n# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default&section=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)\n#\n# Ubuntu 10.04.4 (Lucid Lynx) has:\n#\n# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid&section=all)\n# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid&section=all)\n# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid&section=all&arch=any&keywords=libstdc%2B%2B&searchon=names)\n#\n# Taking the minimum of these as our target.\n#\n# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:\n# GCC 4.4.0: GCC_4.4.0\n# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3\n# (glibc) GLIBC_2_11\n#\nMAX_VERSIONS = {\n'GCC': (4,4,0),\n'CXXABI': (1,3,3),\n'GLIBCXX': (3,4,13),\n'GLIBC': (2,11),\n}\n# See here for a description of _IO_stdin_used:\n# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109\n\n# Ignore symbols that are exported as part of every executable\nIGNORE_EXPORTS = {\n'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr', 'in6addr_any'\n}\nREADELF_CMD = os.getenv('READELF', '/usr/bin/readelf')\nCPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')\n# Allowed NEEDED libraries\nALLOWED_LIBRARIES = {\n# bsmcoind and bsmcoin-qt\n'libgcc_s.so.1', # GCC base support\n'libc.so.6', # C library\n'libpthread.so.0', # threading\n'libanl.so.1', # DNS resolve\n'libm.so.6', # math library\n'librt.so.1', # real-time (clock)\n'ld-linux-x86-64.so.2', # 64-bit dynamic linker\n'ld-linux.so.2', # 32-bit dynamic linker\n'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker\n'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker\n'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker\n# bsmcoin-qt only\n'libX11-xcb.so.1', # part of X11\n'libX11.so.6', # part of X11\n'libxcb.so.1', # part of X11\n'libfontconfig.so.1', # font support\n'libfreetype.so.6', # font parsing\n'libdl.so.2' # programming interface to dynamic linker\n}\nARCH_MIN_GLIBC_VER = {\n'80386': (2,1),\n'X86-64': (2,2,5),\n'ARM': (2,4),\n'AArch64':(2,17),\n'RISC-V': (2,27)\n}\nclass CPPFilt(object):\n '''\n Demangle C++ symbol names.\n\n Use a pipe to the 'c++filt' command.\n '''\n def __init__(self):\n self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)\n\n def __call__(self, mangled):\n self.proc.stdin.write(mangled + '\\n')\n self.proc.stdin.flush()\n return self.proc.stdout.readline().rstrip()\n\n def close(self):\n self.proc.stdin.close()\n self.proc.stdout.close()\n self.proc.wait()\n\ndef read_symbols(executable, imports=True):\n '''\n Parse an ELF executable and return a list of (symbol,version) tuples\n for dynamic, imported symbols.\n '''\n p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', '-h', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)\n (stdout, stderr) = p.communicate()\n if p.returncode:\n raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))\n syms = []\n for line in stdout.splitlines():\n line = line.split()\n if 'Machine:' in line:\n arch = line[-1]\n if len(line)>7 and re.match('[0-9]+:$', line[0]):\n (sym, _, version) = line[7].partition('@')\n is_import = line[6] == 'UND'\n if version.startswith('@'):\n version = version[1:]\n if is_import == imports:\n syms.append((sym, version, arch))\n return syms\n\ndef check_version(max_versions, version, arch):\n if '_' in version:\n (lib, _, ver) = version.rpartition('_')\n else:\n lib = version\n ver = '0'\n ver = tuple([int(x) for x in ver.split('.')])\n if not lib in max_versions:\n return False\n return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch]\n\ndef read_libraries(filename):\n p = subprocess.Popen([READELF_CMD, '-d', '-W', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)\n (stdout, stderr) = p.communicate()\n if p.returncode:\n raise IOError('Error opening file')\n libraries = []\n for line in stdout.splitlines():\n tokens = line.split()\n if len(tokens)>2 and tokens[1] == '(NEEDED)':\n match = re.match('^Shared library: \\[(.*)\\]$', ' '.join(tokens[2:]))\n if match:\n libraries.append(match.group(1))\n else:\n raise ValueError('Unparseable (NEEDED) specification')\n return libraries\n\nif __name__ == '__main__':\n cppfilt = CPPFilt()\n retval = 0\n for filename in sys.argv[1:]:\n # Check imported symbols\n for sym,version,arch in read_symbols(filename, True):\n if version and not check_version(MAX_VERSIONS, version, arch):\n print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))\n retval = 1\n # Check exported symbols\n if arch != 'RISC-V':\n for sym,version,arch in read_symbols(filename, False):\n if sym in IGNORE_EXPORTS:\n continue\n print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))\n retval = 1\n # Check dependency libraries\n for library_name in read_libraries(filename):\n if library_name not in ALLOWED_LIBRARIES:\n print('%s: NEEDED library %s is not allowed' % (filename, library_name))\n retval = 1\n\n sys.exit(retval)\n", "id": "5917068", "language": "Python", "matching_score": 1.1790801286697388, "max_stars_count": 0, "path": "contrib/devtools/symbol-check.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2014-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n\n#\n# Expanded helper routines for regression testing of the BSM Coin community fund\n#\n\nfrom test_framework.util import *\n\ndef activateHardFork(node, versionBit, activationHeight):\n # check the block reports the correct version bit after the hardfork height\n slow_gen(node, 1)\n\n blockHash1 = node.getbestblockhash()\n block1 = node.getblock(blockHash1)\n versionBinary1 = bin(int(block1[\"versionHex\"], 16))[2:]\n versionBit1 = versionBinary1[(versionBit+1)*-1]\n assert(int(versionBit1) == 0)\n\n # activate hard fork\n slow_gen(node, activationHeight - node.getblockchaininfo()[\"blocks\"] + 1)\n\n blockHash2 = node.getbestblockhash()\n block2 = node.getblock(blockHash2)\n versionBinary2 = bin(int(block2[\"versionHex\"], 16))[2:]\n versionBit2 = versionBinary2[(versionBit+1)*-1]\n assert(int(versionBit2) == 1)\n", "id": "12050903", "language": "Python", "matching_score": 0.22473707795143127, "max_stars_count": 0, "path": "qa/rpc-tests/test_framework/hardfork_util.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2018 The Bsmcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\nfrom test_framework.test_framework import BsmCoinTestFramework\nfrom test_framework.staticr_util import *\n\n#import time\n\nclass GetStakingInfo(BsmCoinTestFramework):\n \"\"\"Tests getstakereport accounting.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.setup_clean_chain = True\n self.num_nodes = 1\n\n def setup_network(self, split=False):\n self.nodes = self.setup_nodes()\n self.is_network_split = False\n\n def run_test(self):\n # Turn off staking\n self.nodes[0].staking(False)\n\n # Check if we get the error for nWeight\n assert(not self.nodes[0].getstakinginfo()['enabled'])\n assert(not self.nodes[0].getstakinginfo()['staking'])\n assert_equal(\"Warning: We don't appear to have mature coins.\", self.nodes[0].getstakinginfo()['errors'])\n\n # Make it to the static rewards fork!\n activate_staticr(self.nodes[0])\n\n # Check balance\n assert_equal(59814950, self.nodes[0].getwalletinfo()['balance'] + self.nodes[0].getwalletinfo()['immature_balance'])\n\n # Turn on staking\n self.nodes[0].generate(2000)\n self.nodes[0].staking(True)\n\n # Check for staking after we have matured coins\n assert(self.nodes[0].getstakinginfo()['enabled'])\n # Wait for the node to start staking\n while not self.nodes[0].getstakinginfo()['staking']:\n time.sleep(0.5)\n assert(self.nodes[0].getstakinginfo()['staking'])\n assert_equal(\"\", self.nodes[0].getstakinginfo()['errors'])\n\n # Get the current block count to check against while we wait for a stake\n blockcount = self.nodes[0].getblockcount()\n\n # wait for a new block to be mined\n while self.nodes[0].getblockcount() == blockcount:\n print(\"waiting for a new block...\")\n time.sleep(1)\n\n # We got one\n print(\"found a new block...\")\n\n # Check balance\n assert_equal(59914952, self.nodes[0].getwalletinfo()['balance'] + self.nodes[0].getwalletinfo()['immature_balance'])\n\n # Check if we get the error for nWeight again after a stake\n assert(self.nodes[0].getstakinginfo()['enabled'])\n assert(self.nodes[0].getstakinginfo()['staking'])\n assert_equal(\"\", self.nodes[0].getstakinginfo()['errors'])\n\n # Check expecteddailyreward\n assert_equal(86400 / (self.nodes[0].getstakinginfo()['expectedtime'] + 1) * 2, self.nodes[0].getstakinginfo()['expecteddailyreward'])\n\n # LOCK the wallet\n self.nodes[0].encryptwallet(\"password\")\n stop_nodes(self.nodes)\n wait_bsmcoinds()\n self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)\n\n # Check if we get the error for nWeight again after a stake\n assert(self.nodes[0].getstakinginfo()['enabled'])\n assert(not self.nodes[0].getstakinginfo()['staking'])\n assert_equal(\"Warning: Wallet is locked. Please enter the wallet passphrase with walletpassphrase first.\", self.nodes[0].getstakinginfo()['errors'])\n\n\nif __name__ == '__main__':\n GetStakingInfo().main()\n", "id": "386073", "language": "Python", "matching_score": 2.506869077682495, "max_stars_count": 0, "path": "qa/rpc-tests/getstakinginfo.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2018 The Bsmcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\nfrom test_framework.test_framework import BsmCoinTestFramework\nfrom test_framework.staticr_util import *\n\n#import time\n\nclass GetStakeReport(BsmCoinTestFramework):\n \"\"\"Tests getstakereport accounting.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.setup_clean_chain = True\n self.num_nodes = 3\n\n def setup_network(self, split=False):\n self.nodes = self.setup_nodes()\n connect_nodes(self.nodes[0], 1)\n connect_nodes(self.nodes[1], 2)\n connect_nodes(self.nodes[2], 0)\n self.is_network_split = False\n\n def run_test(self):\n # Turn off staking until we need it\n self.nodes[0].staking(False)\n self.nodes[1].staking(False)\n self.nodes[2].staking(False)\n\n # Make it to the static rewards fork!\n activate_staticr(self.nodes[0])\n self.sync_all()\n\n # Use THE spending address\n spending_address_public_key = self.nodes[1].getnewaddress()\n spending_address_private_key = self.nodes[1].dumpprivkey(spending_address_public_key)\n\n # Create a staking address\n staking_address_public_key = self.nodes[2].getnewaddress()\n staking_address_private_key = self.nodes[2].dumpprivkey(staking_address_public_key)\n\n # Import the 2 keys into a third wallet\n self.nodes[0].importprivkey(spending_address_private_key)\n self.nodes[0].importprivkey(staking_address_private_key)\n\n # Create the cold address\n coldstaking_address_staking = self.nodes[1].getcoldstakingaddress(staking_address_public_key, spending_address_public_key)\n\n # Send funds to the spending address (leave me BSM for fees)\n self.nodes[0].sendtoaddress(spending_address_public_key, self.nodes[0].getbalance() - 1)\n self.nodes[0].generate(1)\n self.sync_all()\n\n # Stake a block\n self.stake_block(self.nodes[1])\n\n # Load the last 24h stake amount for the wallets/nodes\n merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']\n spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']\n staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']\n # print('spending', spending_address_last_24h)\n # print('staking', staking_address_last_24h)\n # print('merged', merged_address_last_24h)\n\n # Make sure we have staked 2 BSM to the spending address\n # So that means spending last 24h == 2\n # And staking last 24h == 0 We have not sent any coins yet\n # And merged will have the total of the spending + staking\n assert_equal('2.00', merged_address_last_24h)\n assert_equal('2.00', spending_address_last_24h)\n assert_equal('0.00', staking_address_last_24h)\n\n # Send funds to the cold staking address (leave some BSM for fees)\n self.nodes[1].sendtoaddress(coldstaking_address_staking, self.nodes[1].getbalance() - 1)\n self.nodes[1].generate(1)\n self.sync_all()\n\n # Stake a block\n self.stake_block(self.nodes[2])\n\n # Load the last 24h stake amount for the wallets/nodes\n merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']\n spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']\n staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']\n # print('spending', spending_address_last_24h)\n # print('staking', staking_address_last_24h)\n # print('merged', merged_address_last_24h)\n\n # Make sure we staked 4 BSM in spending address (2 BSM via COLD Stake)\n # So that means spending last 24h == 4\n # And staking last 24h == 2 We stake 2 BSM via COLD already\n # And merged will have the total of the spending + staking\n assert_equal('4.00', merged_address_last_24h)\n assert_equal('4.00', spending_address_last_24h)\n assert_equal('2.00', staking_address_last_24h)\n\n # Time travel 2 days in the future\n cur_time = int(time.time())\n self.nodes[0].setmocktime(cur_time + 172800)\n self.nodes[1].setmocktime(cur_time + 172800)\n self.nodes[2].setmocktime(cur_time + 172800)\n\n # Stake a block\n self.stake_block(self.nodes[2])\n\n # Load the last 24h stake amount for the wallets/nodes\n merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']\n spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']\n staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']\n\n # Check the amounts\n assert_equal('2.00', merged_address_last_24h)\n assert_equal('2.00', spending_address_last_24h)\n assert_equal('2.00', staking_address_last_24h)\n\n # Load the last 7 days stake amount for the wallets/nodes\n merged_address_last_7d = self.nodes[0].getstakereport()['Last 7 Days']\n spending_address_last_7d = self.nodes[1].getstakereport()['Last 7 Days']\n staking_address_last_7d = self.nodes[2].getstakereport()['Last 7 Days']\n\n # Check the amounts\n assert_equal('6.00', merged_address_last_7d)\n assert_equal('6.00', spending_address_last_7d)\n assert_equal('4.00', staking_address_last_7d)\n\n # Load the averages for stake amounts\n avg_last7d = self.nodes[0].getstakereport()['Last 7 Days Avg']\n avg_last30d = self.nodes[0].getstakereport()['Last 30 Days Avg']\n avg_last365d = self.nodes[0].getstakereport()['Last 365 Days Avg']\n\n # Check the amounts\n assert_equal('3.00', avg_last7d)\n assert_equal('3.00', avg_last30d)\n assert_equal('3.00', avg_last365d)\n\n # Time travel 8 days in the future\n cur_time = int(time.time())\n self.nodes[0].setmocktime(cur_time + 691200)\n self.nodes[1].setmocktime(cur_time + 691200)\n self.nodes[2].setmocktime(cur_time + 691200)\n\n # Load the last 24h stake amount for the wallets/nodes\n merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']\n spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']\n staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']\n\n # Check the amounts\n assert_equal('0.00', merged_address_last_24h)\n assert_equal('0.00', spending_address_last_24h)\n assert_equal('0.00', staking_address_last_24h)\n\n # Load the last 7 days stake amount for the wallets/nodes\n merged_address_last_7d = self.nodes[0].getstakereport()['Last 7 Days']\n spending_address_last_7d = self.nodes[1].getstakereport()['Last 7 Days']\n staking_address_last_7d = self.nodes[2].getstakereport()['Last 7 Days']\n\n # Check the amounts\n assert_equal('2.00', merged_address_last_7d)\n assert_equal('2.00', spending_address_last_7d)\n assert_equal('2.00', staking_address_last_7d)\n\n # Load the averages for stake amounts\n avg_last7d = self.nodes[0].getstakereport()['Last 7 Days Avg']\n avg_last30d = self.nodes[0].getstakereport()['Last 30 Days Avg']\n avg_last365d = self.nodes[0].getstakereport()['Last 365 Days Avg']\n\n # Check the amounts\n assert_equal('0.28571428', avg_last7d)\n assert_equal('0.75', avg_last30d)\n assert_equal('0.75', avg_last365d)\n\n # Time travel 31 days in the future\n cur_time = int(time.time())\n self.nodes[0].setmocktime(cur_time + 2678400)\n self.nodes[1].setmocktime(cur_time + 2678400)\n self.nodes[2].setmocktime(cur_time + 2678400)\n\n # Load the last 24h stake amount for the wallets/nodes\n merged_address_last_24h = self.nodes[0].getstakereport()['Last 24H']\n spending_address_last_24h = self.nodes[1].getstakereport()['Last 24H']\n staking_address_last_24h = self.nodes[2].getstakereport()['Last 24H']\n\n # Check the amounts\n assert_equal('0.00', merged_address_last_24h)\n assert_equal('0.00', spending_address_last_24h)\n assert_equal('0.00', staking_address_last_24h)\n\n # Load the last 7 days stake amount for the wallets/nodes\n merged_address_last_7d = self.nodes[0].getstakereport()['Last 7 Days']\n spending_address_last_7d = self.nodes[1].getstakereport()['Last 7 Days']\n staking_address_last_7d = self.nodes[2].getstakereport()['Last 7 Days']\n\n # Check the amounts\n assert_equal('0.00', merged_address_last_7d)\n assert_equal('0.00', spending_address_last_7d)\n assert_equal('0.00', staking_address_last_7d)\n\n # Load the averages for stake amounts\n avg_last7d = self.nodes[0].getstakereport()['Last 7 Days Avg']\n avg_last30d = self.nodes[0].getstakereport()['Last 30 Days Avg']\n avg_last365d = self.nodes[0].getstakereport()['Last 365 Days Avg']\n\n # Check the amounts\n assert_equal('0.00', avg_last7d)\n assert_equal('0.06666666', avg_last30d)\n assert_equal('0.19354838', avg_last365d)\n\n # Disconnect the nodes\n for node in self.nodes[0].getpeerinfo():\n self.nodes[0].disconnectnode(node['addr'])\n time.sleep(2) #disconnecting a node needs a little bit of time\n assert(self.nodes[0].getpeerinfo() == [])\n\n # Stake a block on node 0\n orphaned_block_hash = self.stake_block(self.nodes[0], False)\n\n # Generate some blocks on node 1\n self.nodes[1].generate(100)\n\n # Reconnect the nodes\n connect_nodes(self.nodes[0], 1)\n connect_nodes(self.nodes[1], 2)\n connect_nodes(self.nodes[2], 0)\n\n # Wait for blocks to sync\n self.sync_all()\n\n # Make sure the block was orphaned\n assert(self.nodes[0].getblock(orphaned_block_hash)['confirmations'] == -1)\n\n # Check the staked amount\n # Should be 0 (Zero) as the last staked block is orphaned\n assert_equal('0.00', self.nodes[0].getstakereport()['Last 7 Days'])\n\n def stake_block(self, node, mature = True):\n # Get the current block count to check against while we wait for a stake\n blockcount = node.getblockcount()\n\n # Turn staking on\n node.staking(True)\n\n # wait for a new block to be mined\n while node.getblockcount() == blockcount:\n # print(\"waiting for a new block...\")\n time.sleep(1)\n\n # We got one\n # print(\"found a new block...\")\n\n # Turn staking off\n node.staking(False)\n\n # Get the staked block\n block_hash = node.getbestblockhash()\n\n # Only mature the blocks if we asked for it\n if (mature):\n # Make sure the blocks are mature before we check the report\n slow_gen(node, 5, 0.5)\n self.sync_all()\n\n # return the block hash to the function caller\n return block_hash\n\n\nif __name__ == '__main__':\n GetStakeReport().main()\n", "id": "10299662", "language": "Python", "matching_score": 3.5424368381500244, "max_stars_count": 0, "path": "qa/rpc-tests/getstakereport.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2019 The Bsmcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\nfrom test_framework.test_framework import BsmCoinTestFramework\nfrom test_framework.cfund_util import *\n\nimport time\nimport urllib.parse\n\n\nclass CFundPaymentRequestStateReorg(BsmCoinTestFramework):\n \"\"\"Tests consistency of Community Fund Payment Requests state through reorgs.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.setup_clean_chain = True\n self.num_nodes = 2\n\n def setup_network(self, split=False):\n self.nodes = []\n self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-headerspamfiltermaxsize=1000\"]))\n self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-headerspamfiltermaxsize=1000\"]))\n connect_nodes(self.nodes[0], 1)\n self.is_network_split = False\n\n def run_test(self):\n self.nodes[0].staking(False)\n self.nodes[1].staking(False)\n activate_cfund(self.nodes[0])\n self.sync_all()\n\n self.nodes[0].donatefund(100)\n\n # Generate our addresses\n node_0_address = self.nodes[0].getnewaddress()\n node_1_address = self.nodes[1].getnewaddress()\n\n # Split funds\n self.nodes[0].sendtoaddress(node_1_address, 5000000)\n proposal=self.nodes[0].createproposal(node_0_address, 100, 36000, \"test\")\n proposal_id=proposal[\"hash\"]\n\n slow_gen(self.nodes[0], 1)\n end_cycle(self.nodes[0])\n\n self.sync_all()\n\n self.nodes[0].proposalvote(proposal_id, \"yes\")\n\n slow_gen(self.nodes[0], 1)\n end_cycle(self.nodes[0])\n\n self.sync_all()\n\n assert(self.nodes[0].getproposal(proposal_id)[\"state\"] == 1)\n assert(self.nodes[0].getproposal(proposal_id)[\"status\"] == \"accepted\")\n\n assert(self.nodes[1].getproposal(proposal_id)[\"state\"] == 1)\n assert(self.nodes[1].getproposal(proposal_id)[\"status\"] == \"accepted\")\n\n raw_preq = self.nodes[0].createpaymentrequest(proposal_id, 100, \"preq\", 1, True)[\"raw\"]\n self.sync_all()\n\n # Disconnect Nodes 0 and 1\n url = urllib.parse.urlparse(self.nodes[1].url)\n self.nodes[0].disconnectnode(url.hostname+\":\"+str(p2p_port(1)))\n\n self.nodes[0].forcetransactions([raw_preq])\n self.nodes[1].forcetransactions([raw_preq])\n\n blockcount_0 = self.nodes[0].getblockcount()\n blockcount_1 = self.nodes[1].getblockcount()\n\n self.nodes[0].staking(True)\n self.nodes[1].staking(True)\n\n # Let's wait for at least 20 blocks from Node 0\n while self.nodes[0].getblockcount() - blockcount_0 < 3:\n time.sleep(1)\n\n # Node 1 only has 1 output so it will only stake 1 block\n while self.nodes[1].getblockcount() == blockcount_1:\n time.sleep(1)\n\n print(\"nodes staked!\")\n\n self.nodes[0].staking(False)\n self.nodes[1].staking(False)\n\n node_0_best_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount())\n node_1_best_hash = self.nodes[1].getblockhash(self.nodes[1].getblockcount())\n\n # Node 0 and Node 1 have forked.\n assert(node_0_best_hash != node_1_best_hash)\n\n connect_nodes(self.nodes[0], 1)\n\n self.sync_all()\n\n node_1_best_hash_ = self.nodes[1].getblockhash(self.nodes[1].getblockcount())\n\n # Node 1 must have reorg'd to Node 0 chain\n assert(node_0_best_hash == node_1_best_hash_)\n\nif __name__ == '__main__':\n CFundPaymentRequestStateReorg().main()\n", "id": "12808451", "language": "Python", "matching_score": 2.2007415294647217, "max_stars_count": 0, "path": "qa/rpc-tests/cfund-paymentrequest-state-reorg.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2019 The BsmCoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#\n# Expanded helper routines for regression testing of the BSM Coin community fund\n#\n\nfrom test_framework.util import *\n\ndef givenIHaveActivatedTheCFund(node=None, \ntext=None, \nquestions=None, \nwithAnswers=False):\n\n if (node is None):\n print('givenIHaveActivatedTheCFund: invalid parameters')\n assert(False)\n\n if (get_bip9_status(node, \"communityfund\")[\"status\"] == \"defined\"):\n slow_gen(node, 100)\n # Verify the Community Fund is started\n assert (get_bip9_status(node, \"communityfund\")[\"status\"] == \"started\")\n\n if (get_bip9_status(node, \"communityfund\")[\"status\"] == \"started\"):\n slow_gen(node, 100)\n # Verify the Community Fund is locked_in\n assert (get_bip9_status(node, \"communityfund\")[\"status\"] == \"locked_in\")\n\n if (get_bip9_status(node, \"communityfund\")[\"status\"] == \"locked_in\"):\n slow_gen(node, 100)\n # Verify the Community Fund is active\n assert (get_bip9_status(node, \"communityfund\")[\"status\"] == \"active\")", "id": "12116326", "language": "Python", "matching_score": 2.02447509765625, "max_stars_count": 0, "path": "qa/rpc-tests/dao/given/iHaveActivatedTheCFund.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2019 The BsmCoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#\n# Expanded helper routines for regression testing of the BSM Coin community fund\n#\n\nimport sys, os #include the parent folder so the test_framework is available\nsys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))\n\nfrom test_framework.util import *\nfrom dao.given import (givenIHaveActivatedTheCFund, \n givenIHaveDonatedToTheCFund, \n givenIHaveCreatedANewAddress, \n givenIHaveCreatedAProposal, \n givenIHaveVotedOnTheProposal)\n\nfrom dao.when import *\nfrom dao.then import *\n\ndef givenIHaveAnExpiredProposal(node=None, \naddress=None,\namount=None,\nduration=None,\ndescription=None,\ndump=False):\n\n if (node is None\n or address is None\n or amount is None\n or duration is None\n or description is None):\n print('givenIHaveAnAcceptedProposal: invalid parameters')\n assert(False)\n\n givenIHaveActivatedTheCFund(node)\n givenIHaveDonatedToTheCFund(node, amount)\n\n if (address == False):\n address = givenIHaveCreatedANewAddress(node)[\"pubkey\"]\n\n hash = givenIHaveCreatedAProposal(node, address, amount, duration, description)\n\n whenTheVotingCycleEnds(node, -1)\n thenTheProposalShouldBeExpired(node, hash)\n\n return {\n \"hash\": hash,\n \"address\": address\n } ", "id": "12617387", "language": "Python", "matching_score": 2.748265027999878, "max_stars_count": 0, "path": "qa/rpc-tests/dao/given/iHaveAnExpiredProposal.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2019 The BsmCoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#\n# Expanded helper routines for regression testing of the BSM Coin community fund\n#\n\nfrom test_framework.util import *\n\ndef whenTheVotingCycleEnds(node=None, \ncycles=1):\n\n if (node is None\n or cycles < -1):\n print('whenTheVotingCycleEnds: invalid parameters')\n assert(False)\n \n try:\n blocksRemaining = node.cfundstats()[\"votingPeriod\"][\"ending\"] - node.cfundstats()[\"votingPeriod\"][\"current\"]\n periodStart = node.cfundstats()[\"votingPeriod\"][\"starting\"]\n periodEnd = node.cfundstats()[\"votingPeriod\"][\"ending\"]\n except JSONRPCException as e:\n print(e.error)\n assert(False)\n\n assert(node.getblockcount() >= periodStart)\n assert(node.getblockcount() <= periodEnd)\n\n slow_gen(node, blocksRemaining)\n\n assert(node.getblockcount() == periodEnd)\n\n # parsing -1 will end a full round of proposal voting cycles\n if (cycles == -1): \n cycles = node.cfundstats()[\"consensus\"][\"maxCountVotingCycleProposals\"] + 1\n\n if (cycles > 1):\n blocksPerCycle = node.cfundstats()[\"consensus\"][\"blocksPerVotingCycle\"]\n for i in range(1, cycles):\n slow_gen(node, blocksPerCycle)\n assert(node.getblockcount() == periodEnd + (i * blocksPerCycle))\n\n slow_gen(node, 1) # proceed to the first block of the next cycle", "id": "10111013", "language": "Python", "matching_score": 2.7312002182006836, "max_stars_count": 0, "path": "qa/rpc-tests/dao/when/theVotingCycleEnds.py" }, { "content": "#!/usr/bin/env python3\n# Copyright (c) 2019 The BsmCoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n#\n# Expanded helper routines for regression testing of the BSM Coin community fund\n#\n\nfrom test_framework.util import *\n\ndef thenTheProposalShouldBeRejected(node=None, \nproposalHash=None):\n\n if (node is None\n or proposalHash is None):\n print('thenTheProposalShouldBeRejected: invalid parameters')\n assert(False)\n\n try:\n proposal = node.getproposal(proposalHash)\n except JSONRPCException as e:\n print(e.error)\n assert(False)\n\n assert(proposal[\"status\"] == \"rejected\")\n assert(proposal[\"state\"] == 2)", "id": "5924170", "language": "Python", "matching_score": 1.0038412809371948, "max_stars_count": 0, "path": "qa/rpc-tests/dao/then/theProposalShouldBeRejected.py" } ]
2.200742
freakboy3742
[ { "content": "# Generated by h2py from /Applications/Xcode.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS13.2.sdk/usr/include/netinet/in.h\n\n# Included from sys/appleapiopts.h\n\n# Included from sys/_types.h\n\n# Included from sys/cdefs.h\ndef __has_builtin(x): return 0\n\ndef __has_include(x): return 0\n\ndef __has_feature(x): return 0\n\ndef __has_attribute(x): return 0\n\ndef __has_extension(x): return 0\n\ndef __P(protos): return protos \n\ndef __STRING(x): return #x\n\ndef __P(protos): return () \n\ndef __STRING(x): return \"x\"\n\ndef __deprecated_msg(_msg): return __attribute__((__deprecated__(_msg)))\n\ndef __deprecated_msg(_msg): return __attribute__((__deprecated__))\n\ndef __deprecated_enum_msg(_msg): return __deprecated_msg(_msg)\n\ndef __swift_unavailable(_msg): return __attribute__((__availability__(swift, unavailable, message=_msg)))\n\ndef __COPYRIGHT(s): return __IDSTRING(copyright,s)\n\ndef __RCSID(s): return __IDSTRING(rcsid,s)\n\ndef __SCCSID(s): return __IDSTRING(sccsid,s)\n\ndef __PROJECT_VERSION(s): return __IDSTRING(project_version,s)\n\n__DARWIN_ONLY_64_BIT_INO_T = 1\n__DARWIN_ONLY_UNIX_CONFORMANCE = 1\n__DARWIN_ONLY_VERS_1050 = 1\n__DARWIN_ONLY_UNIX_CONFORMANCE = 1\n__DARWIN_ONLY_UNIX_CONFORMANCE = 0\n__DARWIN_UNIX03 = 1\n__DARWIN_UNIX03 = 0\n__DARWIN_UNIX03 = 1\n__DARWIN_UNIX03 = 0\n__DARWIN_UNIX03 = 0\n__DARWIN_UNIX03 = 1\n__DARWIN_64_BIT_INO_T = 1\n__DARWIN_64_BIT_INO_T = 0\n__DARWIN_64_BIT_INO_T = 1\n__DARWIN_64_BIT_INO_T = 0\n__DARWIN_64_BIT_INO_T = 1\n__DARWIN_VERS_1050 = 1\n__DARWIN_VERS_1050 = 0\n__DARWIN_VERS_1050 = 1\n__DARWIN_NON_CANCELABLE = 0\n__DARWIN_SUF_UNIX03 = \"$UNIX2003\"\n__DARWIN_SUF_64_BIT_INO_T = \"$INODE64\"\n__DARWIN_SUF_1050 = \"$1050\"\n__DARWIN_SUF_NON_CANCELABLE = \"$NOCANCEL\"\n__DARWIN_SUF_EXTSN = \"$DARWIN_EXTSN\"\n\n# Included from sys/_symbol_aliasing.h\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_2_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_3_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_4_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_5_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_5_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_6_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_6_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_7_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_7_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_8_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_9_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_10_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_11_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_12_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_IPHONE___IPHONE_13_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_0(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_5(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_6(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_7(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_8(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_9(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_10(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_10_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_10_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_11(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_11_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_11_3(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_11_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_12(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_12_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_12_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_12_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_13(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_13_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_13_2(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_13_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_14(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_1(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_4(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_5(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_14_6(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_15(x): return x\n\ndef __DARWIN_ALIAS_STARTING_MAC___MAC_10_15_1(x): return x\n\n\n# Included from sys/_posix_availability.h\ndef __POSIX_C_DEPRECATED(ver): return ___POSIX_C_DEPRECATED_STARTING_##ver\n\n__STDC_WANT_LIB_EXT1__ = 1\n__DARWIN_NO_LONG_LONG = 1\n__DARWIN_NO_LONG_LONG = 0\n_DARWIN_FEATURE_64_BIT_INODE = 1\n_DARWIN_FEATURE_ONLY_64_BIT_INODE = 1\n_DARWIN_FEATURE_ONLY_VERS_1050 = 1\n_DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE = 1\n_DARWIN_FEATURE_UNIX_CONFORMANCE = 3\n\n# Included from machine/_types.h\n__DARWIN_NULL = 0\n\n# Included from sys/_pthread/_pthread_types.h\n__PTHREAD_SIZE__ = 8176\n__PTHREAD_ATTR_SIZE__ = 56\n__PTHREAD_MUTEXATTR_SIZE__ = 8\n__PTHREAD_MUTEX_SIZE__ = 56\n__PTHREAD_CONDATTR_SIZE__ = 8\n__PTHREAD_COND_SIZE__ = 40\n__PTHREAD_ONCE_SIZE__ = 8\n__PTHREAD_RWLOCK_SIZE__ = 192\n__PTHREAD_RWLOCKATTR_SIZE__ = 16\n__PTHREAD_SIZE__ = 4088\n__PTHREAD_ATTR_SIZE__ = 36\n__PTHREAD_MUTEXATTR_SIZE__ = 8\n__PTHREAD_MUTEX_SIZE__ = 40\n__PTHREAD_CONDATTR_SIZE__ = 4\n__PTHREAD_COND_SIZE__ = 24\n__PTHREAD_ONCE_SIZE__ = 4\n__PTHREAD_RWLOCK_SIZE__ = 124\n__PTHREAD_RWLOCKATTR_SIZE__ = 12\n\n# Included from stdint.h\n__WORDSIZE = 64\n__WORDSIZE = 32\n\n# Included from sys/_types/_int8_t.h\n\n# Included from sys/_types/_int16_t.h\n\n# Included from sys/_types/_int32_t.h\n\n# Included from sys/_types/_int64_t.h\n\n# Included from _types/_uint8_t.h\n\n# Included from _types/_uint16_t.h\n\n# Included from _types/_uint32_t.h\n\n# Included from _types/_uint64_t.h\n\n# Included from sys/_types/_intptr_t.h\n\n# Included from machine/types.h\n\n# Included from sys/_types/_uintptr_t.h\n\n# Included from _types/_intmax_t.h\n\n# Included from _types/_uintmax_t.h\ndef INT8_C(v): return (v)\n\ndef INT16_C(v): return (v)\n\ndef INT32_C(v): return (v)\n\ndef UINT8_C(v): return (v)\n\ndef UINT16_C(v): return (v)\n\nINT8_MAX = 127\nINT16_MAX = 32767\nINT32_MAX = 2147483647\nINT8_MIN = -128\nINT16_MIN = -32768\nINT32_MIN = (-INT32_MAX-1)\nUINT8_MAX = 255\nUINT16_MAX = 65535\nINT_LEAST8_MIN = INT8_MIN\nINT_LEAST16_MIN = INT16_MIN\nINT_LEAST32_MIN = INT32_MIN\nINT_LEAST8_MAX = INT8_MAX\nINT_LEAST16_MAX = INT16_MAX\nINT_LEAST32_MAX = INT32_MAX\nUINT_LEAST8_MAX = UINT8_MAX\nUINT_LEAST16_MAX = UINT16_MAX\nINT_FAST8_MIN = INT8_MIN\nINT_FAST16_MIN = INT16_MIN\nINT_FAST32_MIN = INT32_MIN\nINT_FAST8_MAX = INT8_MAX\nINT_FAST16_MAX = INT16_MAX\nINT_FAST32_MAX = INT32_MAX\nUINT_FAST8_MAX = UINT8_MAX\nUINT_FAST16_MAX = UINT16_MAX\nPTRDIFF_MIN = INT32_MIN\nPTRDIFF_MAX = INT32_MAX\nWCHAR_MAX = 0x7fffffff\nWCHAR_MIN = 0\nWCHAR_MIN = (-WCHAR_MAX-1)\nWINT_MIN = INT32_MIN\nWINT_MAX = INT32_MAX\nSIG_ATOMIC_MIN = INT32_MIN\nSIG_ATOMIC_MAX = INT32_MAX\n\n# Included from Availability.h\n__API_TO_BE_DEPRECATED = 100000\n__MAC_10_0 = 1000\n__MAC_10_1 = 1010\n__MAC_10_2 = 1020\n__MAC_10_3 = 1030\n__MAC_10_4 = 1040\n__MAC_10_5 = 1050\n__MAC_10_6 = 1060\n__MAC_10_7 = 1070\n__MAC_10_8 = 1080\n__MAC_10_9 = 1090\n__MAC_10_10 = 101000\n__MAC_10_10_2 = 101002\n__MAC_10_10_3 = 101003\n__MAC_10_11 = 101100\n__MAC_10_11_2 = 101102\n__MAC_10_11_3 = 101103\n__MAC_10_11_4 = 101104\n__MAC_10_12 = 101200\n__MAC_10_12_1 = 101201\n__MAC_10_12_2 = 101202\n__MAC_10_12_4 = 101204\n__MAC_10_13 = 101300\n__MAC_10_13_1 = 101301\n__MAC_10_13_2 = 101302\n__MAC_10_13_4 = 101304\n__MAC_10_14 = 101400\n__MAC_10_14_1 = 101401\n__MAC_10_14_4 = 101404\n__MAC_10_15 = 101500\n__MAC_10_15_1 = 101501\n__IPHONE_2_0 = 20000\n__IPHONE_2_1 = 20100\n__IPHONE_2_2 = 20200\n__IPHONE_3_0 = 30000\n__IPHONE_3_1 = 30100\n__IPHONE_3_2 = 30200\n__IPHONE_4_0 = 40000\n__IPHONE_4_1 = 40100\n__IPHONE_4_2 = 40200\n__IPHONE_4_3 = 40300\n__IPHONE_5_0 = 50000\n__IPHONE_5_1 = 50100\n__IPHONE_6_0 = 60000\n__IPHONE_6_1 = 60100\n__IPHONE_7_0 = 70000\n__IPHONE_7_1 = 70100\n__IPHONE_8_0 = 80000\n__IPHONE_8_1 = 80100\n__IPHONE_8_2 = 80200\n__IPHONE_8_3 = 80300\n__IPHONE_8_4 = 80400\n__IPHONE_9_0 = 90000\n__IPHONE_9_1 = 90100\n__IPHONE_9_2 = 90200\n__IPHONE_9_3 = 90300\n__IPHONE_10_0 = 100000\n__IPHONE_10_1 = 100100\n__IPHONE_10_2 = 100200\n__IPHONE_10_3 = 100300\n__IPHONE_11_0 = 110000\n__IPHONE_11_1 = 110100\n__IPHONE_11_2 = 110200\n__IPHONE_11_3 = 110300\n__IPHONE_11_4 = 110400\n__IPHONE_12_0 = 120000\n__IPHONE_12_1 = 120100\n__IPHONE_12_2 = 120200\n__IPHONE_12_3 = 120300\n__IPHONE_13_0 = 130000\n__IPHONE_13_1 = 130100\n__IPHONE_13_2 = 130200\n__TVOS_9_0 = 90000\n__TVOS_9_1 = 90100\n__TVOS_9_2 = 90200\n__TVOS_10_0 = 100000\n__TVOS_10_0_1 = 100001\n__TVOS_10_1 = 100100\n__TVOS_10_2 = 100200\n__TVOS_11_0 = 110000\n__TVOS_11_1 = 110100\n__TVOS_11_2 = 110200\n__TVOS_11_3 = 110300\n__TVOS_11_4 = 110400\n__TVOS_12_0 = 120000\n__TVOS_12_1 = 120100\n__TVOS_12_2 = 120200\n__TVOS_12_3 = 120300\n__TVOS_13_0 = 130000\n__TVOS_13_2 = 130200\n__WATCHOS_1_0 = 10000\n__WATCHOS_2_0 = 20000\n__WATCHOS_2_1 = 20100\n__WATCHOS_2_2 = 20200\n__WATCHOS_3_0 = 30000\n__WATCHOS_3_1 = 30100\n__WATCHOS_3_1_1 = 30101\n__WATCHOS_3_2 = 30200\n__WATCHOS_4_0 = 40000\n__WATCHOS_4_1 = 40100\n__WATCHOS_4_2 = 40200\n__WATCHOS_4_3 = 40300\n__WATCHOS_5_0 = 50000\n__WATCHOS_5_1 = 50100\n__WATCHOS_5_2 = 50200\n__WATCHOS_6_0 = 60000\n__WATCHOS_6_1 = 60100\n__DRIVERKIT_19_0 = 190000\n\n# Included from AvailabilityInternal.h\n__TV_OS_VERSION_MAX_ALLOWED = __TVOS_13_0\n__IPHONE_OS_VERSION_MIN_REQUIRED = 90000\n__WATCH_OS_VERSION_MAX_ALLOWED = 60000\n__IPHONE_OS_VERSION_MIN_REQUIRED = 90000\n__BRIDGE_OS_VERSION_MAX_ALLOWED = 20000\n__IPHONE_OS_VERSION_MIN_REQUIRED = 110000\n__MAC_OS_X_VERSION_MAX_ALLOWED = __MAC_10_15\n__IPHONE_OS_VERSION_MAX_ALLOWED = __IPHONE_13_0\n__IPHONE_OS_VERSION_MIN_REQUIRED = __IPHONE_2_0\ndef __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated(_msg)))\n\ndef __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated))\n\ndef __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated(_msg)))\n\ndef __AVAILABILITY_INTERNAL_DEPRECATED_MSG(_msg): return __attribute__((deprecated))\n\n__ENABLE_LEGACY_IPHONE_AVAILABILITY = 1\n__ENABLE_LEGACY_MAC_AVAILABILITY = 1\n__ENABLE_LEGACY_IPHONE_AVAILABILITY = 1\n__ENABLE_LEGACY_MAC_AVAILABILITY = 1\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=2.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=3.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=2.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=2.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=3.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=2.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=2.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_2_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=2.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=3.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=2.2,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_2_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=2.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=3.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=3.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=3.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=3.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=3.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_3_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=3.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=3.2,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_3_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=3.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_12_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=12.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_12_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=12.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.2,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=4.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_4_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=4.3,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_4_3_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=4.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=5.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=5.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_5_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=5.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_5_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=5.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=6.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=6.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_6_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=6.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_6_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=6.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_12_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=12.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_12_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=12.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=7.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=7.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_7_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=7.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_7_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=7.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_11_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_11_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=11)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=8.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=8.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=8.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_8_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.2,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=8.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=8.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_8_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.3,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_3_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=8.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=8.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_8_4_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=8.4,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_8_4_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=8.4)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.0,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=9.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=9.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_9_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.1,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=9.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=9.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_9_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.2,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=9.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=9.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_9_3_MSG(_msg): return __attribute__((availability(ios,introduced=9.3,deprecated=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_9_3_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=9.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_0_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_0_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=10.0)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=10.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_1_DEP__IPHONE_10_1_MSG(_msg): return __attribute__((availability(ios,introduced=10.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=10.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_1_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=10.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_1_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_1_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=10.1)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=10.2,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_2_DEP__IPHONE_10_2_MSG(_msg): return __attribute__((availability(ios,introduced=10.2,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.2,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_2_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.2,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_2_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=10.2)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.3,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_3_DEP__IPHONE_10_3_MSG(_msg): return __attribute__((availability(ios,introduced=10.3,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_10_3_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,introduced=10.3)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_NA_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,unavailable)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_COMPAT_VERSION_DEP__IPHONE_COMPAT_VERSION_MSG(_msg): return __attribute__((availability(ios,unavailable)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_COMPAT_VERSION_DEP__IPHONE_COMPAT_VERSION_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_COMPAT_VERSION_DEP__IPHONE_COMPAT_VERSION_MSG(_msg): return __attribute__((availability(ios,introduced=4.0,deprecated=4.0)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.4)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.5,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.5)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.6,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_1_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.1)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.4)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.5,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.5)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.6,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_2_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.2)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.4)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.5,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.5)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.6,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_3_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.3)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.4)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.5,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.5)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.6,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_4_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.4)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.5,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.5)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.6,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_5_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.5)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.6,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_6_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_7_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_8_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_9_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_0_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.0,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_0_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.0)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.1,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_1_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.1)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.2,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_2_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.2)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.3,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_3_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.3)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.4,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_4_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.4)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.5,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_5_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.5)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.6,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_6_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.6)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.7,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_7_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.7)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.8,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_8_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.8)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.9,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_9_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.9)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_13_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.13,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_10_13_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0,deprecated=10.13)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_0_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.0)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.10,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_10_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_13_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.13,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_10_13_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10,deprecated=10.13)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_10_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.10)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_11_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.11,deprecated=10.11,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_11_DEP__MAC_10_11_MSG(_msg): return __attribute__((availability(macosx,introduced=10.11,deprecated=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_11_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.11,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_11_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.11,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_11_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.11)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_12_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.12,deprecated=10.12,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_12_DEP__MAC_10_12_MSG(_msg): return __attribute__((availability(macosx,introduced=10.12,deprecated=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_12_DEP__MAC_10_13_MSG(_msg): return __attribute__((availability(macosx,introduced=10.12,deprecated=10.13,message=_msg)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_12_DEP__MAC_10_13_MSG(_msg): return __attribute__((availability(macosx,introduced=10.12,deprecated=10.13)))\n\ndef __AVAILABILITY_INTERNAL__MAC_10_12_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,introduced=10.12)))\n\ndef __AVAILABILITY_INTERNAL__MAC_NA_DEP__MAC_NA_MSG(_msg): return __attribute__((availability(macosx,unavailable)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_NA_DEP__IPHONE_NA_MSG(_msg): return __attribute__((availability(ios,unavailable)))\n\ndef __AVAILABILITY_INTERNAL__IPHONE_COMPAT_VERSION_DEP__IPHONE_COMPAT_VERSION_MSG(_msg): return __attribute__((availability(ios,unavailable)))\n\ndef __API_AVAILABLE1(x): return __API_A(x)\n\ndef __API_RANGE_STRINGIFY(x): return __API_RANGE_STRINGIFY2(x)\n\ndef __API_RANGE_STRINGIFY2(x): return #x \n\ndef __API_AVAILABLE_BEGIN1(a): return __API_A_BEGIN(a)\n\ndef __API_UNAVAILABLE_PLATFORM_uikitformac(x): return uikitformac,unavailable\n\ndef __API_UNAVAILABLE1(x): return __API_U(x)\n\ndef __API_UNAVAILABLE_BEGIN1(a): return __API_U_BEGIN(a)\n\ndef __OSX_EXTENSION_UNAVAILABLE(_msg): return __OS_AVAILABILITY_MSG(macosx_app_extension,unavailable,_msg)\n\ndef __IOS_EXTENSION_UNAVAILABLE(_msg): return __OS_AVAILABILITY_MSG(ios_app_extension,unavailable,_msg)\n\ndef __OSX_AVAILABLE(_vers): return __OS_AVAILABILITY(macosx,introduced=_vers)\n\ndef __IOS_AVAILABLE(_vers): return __OS_AVAILABILITY(ios,introduced=_vers)\n\ndef __TVOS_AVAILABLE(_vers): return __OS_AVAILABILITY(tvos,introduced=_vers)\n\ndef __WATCHOS_AVAILABLE(_vers): return __OS_AVAILABILITY(watchos,introduced=_vers)\n\ndef __SWIFT_UNAVAILABLE_MSG(_msg): return __OS_AVAILABILITY_MSG(swift,unavailable,_msg)\n\n\n# Included from sys/_types/_in_addr_t.h\n\n# Included from sys/_types/_in_port_t.h\n\n# Included from sys/socket.h\n\n# Included from sys/types.h\n\n# Included from machine/endian.h\n\n# Included from sys/_types/_u_char.h\n\n# Included from sys/_types/_u_short.h\n\n# Included from sys/_types/_u_int.h\n\n# Included from sys/_types/_caddr_t.h\n\n# Included from sys/_types/_dev_t.h\n\n# Included from sys/_types/_blkcnt_t.h\n\n# Included from sys/_types/_blksize_t.h\n\n# Included from sys/_types/_gid_t.h\n\n# Included from sys/_types/_ino_t.h\n\n# Included from sys/_types/_ino64_t.h\n\n# Included from sys/_types/_key_t.h\n\n# Included from sys/_types/_mode_t.h\n\n# Included from sys/_types/_nlink_t.h\n\n# Included from sys/_types/_id_t.h\n\n# Included from sys/_types/_pid_t.h\n\n# Included from sys/_types/_off_t.h\n\n# Included from sys/_types/_uid_t.h\ndef major(x): return ((int32_t)(((u_int32_t)(x) >> 24) & 0xff))\n\ndef minor(x): return ((int32_t)((x) & 0xffffff))\n\n\n# Included from sys/_types/_clock_t.h\n\n# Included from sys/_types/_size_t.h\n\n# Included from sys/_types/_ssize_t.h\n\n# Included from sys/_types/_time_t.h\n\n# Included from sys/_types/_useconds_t.h\n\n# Included from sys/_types/_suseconds_t.h\n\n# Included from sys/_types/_rsize_t.h\n\n# Included from sys/_types/_errno_t.h\n\n# Included from sys/_types/_fd_def.h\n__DARWIN_FD_SETSIZE = 1024\n__DARWIN_NBBY = 8\ndef __DARWIN_FD_ZERO(p): return __builtin_bzero(p, sizeof(*(p)))\n\ndef __DARWIN_FD_ZERO(p): return bzero(p, sizeof(*(p)))\n\nNBBY = __DARWIN_NBBY\n\n# Included from sys/_types/_fd_setsize.h\nFD_SETSIZE = __DARWIN_FD_SETSIZE\n\n# Included from sys/_types/_fd_set.h\n\n# Included from sys/_types/_fd_clr.h\n\n# Included from sys/_types/_fd_zero.h\ndef FD_ZERO(p): return __DARWIN_FD_ZERO(p)\n\n\n# Included from sys/_types/_fd_isset.h\n\n# Included from sys/_types/_fd_copy.h\n\n# Included from sys/_pthread/_pthread_attr_t.h\n\n# Included from sys/_pthread/_pthread_cond_t.h\n\n# Included from sys/_pthread/_pthread_condattr_t.h\n\n# Included from sys/_pthread/_pthread_mutex_t.h\n\n# Included from sys/_pthread/_pthread_mutexattr_t.h\n\n# Included from sys/_pthread/_pthread_once_t.h\n\n# Included from sys/_pthread/_pthread_rwlock_t.h\n\n# Included from sys/_pthread/_pthread_rwlockattr_t.h\n\n# Included from sys/_pthread/_pthread_t.h\n\n# Included from sys/_pthread/_pthread_key_t.h\n\n# Included from sys/_types/_fsblkcnt_t.h\n\n# Included from sys/_types/_fsfilcnt_t.h\n\n# Included from machine/_param.h\n\n# Included from net/net_kev.h\nKEV_INET_SUBCLASS = 1\nKEV_INET_NEW_ADDR = 1\nKEV_INET_CHANGED_ADDR = 2\nKEV_INET_ADDR_DELETED = 3\nKEV_INET_SIFDSTADDR = 4\nKEV_INET_SIFBRDADDR = 5\nKEV_INET_SIFNETMASK = 6\nKEV_INET_ARPCOLLISION = 7\nKEV_INET_PORTINUSE = 8\nKEV_INET_ARPRTRFAILURE = 9\nKEV_INET_ARPRTRALIVE = 10\nKEV_DL_SUBCLASS = 2\nKEV_DL_SIFFLAGS = 1\nKEV_DL_SIFMETRICS = 2\nKEV_DL_SIFMTU = 3\nKEV_DL_SIFPHYS = 4\nKEV_DL_SIFMEDIA = 5\nKEV_DL_SIFGENERIC = 6\nKEV_DL_ADDMULTI = 7\nKEV_DL_DELMULTI = 8\nKEV_DL_IF_ATTACHED = 9\nKEV_DL_IF_DETACHING = 10\nKEV_DL_IF_DETACHED = 11\nKEV_DL_LINK_OFF = 12\nKEV_DL_LINK_ON = 13\nKEV_DL_PROTO_ATTACHED = 14\nKEV_DL_PROTO_DETACHED = 15\nKEV_DL_LINK_ADDRESS_CHANGED = 16\nKEV_DL_WAKEFLAGS_CHANGED = 17\nKEV_DL_IF_IDLE_ROUTE_REFCNT = 18\nKEV_DL_IFCAP_CHANGED = 19\nKEV_DL_LINK_QUALITY_METRIC_CHANGED = 20\nKEV_DL_NODE_PRESENCE = 21\nKEV_DL_NODE_ABSENCE = 22\nKEV_DL_MASTER_ELECTED = 23\nKEV_DL_ISSUES = 24\nKEV_DL_IFDELEGATE_CHANGED = 25\nKEV_DL_AWDL_RESTRICTED = 26\nKEV_DL_AWDL_UNRESTRICTED = 27\nKEV_DL_RRC_STATE_CHANGED = 28\nKEV_DL_QOS_MODE_CHANGED = 29\nKEV_DL_LOW_POWER_MODE_CHANGED = 30\nKEV_INET6_SUBCLASS = 6\nKEV_INET6_NEW_USER_ADDR = 1\nKEV_INET6_CHANGED_ADDR = 2\nKEV_INET6_ADDR_DELETED = 3\nKEV_INET6_NEW_LL_ADDR = 4\nKEV_INET6_NEW_RTADV_ADDR = 5\nKEV_INET6_DEFROUTER = 6\nKEV_INET6_REQUEST_NAT64_PREFIX = 7\n\n# Included from sys/_types/_sa_family_t.h\n\n# Included from sys/_types/_socklen_t.h\n\n# Included from sys/_types/_iovec_t.h\nSOCK_STREAM = 1\nSOCK_DGRAM = 2\nSOCK_RAW = 3\nSOCK_RDM = 4\nSOCK_SEQPACKET = 5\nSO_DEBUG = 0x0001\nSO_ACCEPTCONN = 0x0002\nSO_REUSEADDR = 0x0004\nSO_KEEPALIVE = 0x0008\nSO_DONTROUTE = 0x0010\nSO_BROADCAST = 0x0020\nSO_USELOOPBACK = 0x0040\nSO_LINGER = 0x0080\nSO_LINGER = 0x1080\nSO_OOBINLINE = 0x0100\nSO_REUSEPORT = 0x0200\nSO_TIMESTAMP = 0x0400\nSO_TIMESTAMP_MONOTONIC = 0x0800\nSO_ACCEPTFILTER = 0x1000\nSO_DONTTRUNC = 0x2000\nSO_WANTMORE = 0x4000\nSO_WANTOOBFLAG = 0x8000\nSO_SNDBUF = 0x1001\nSO_RCVBUF = 0x1002\nSO_SNDLOWAT = 0x1003\nSO_RCVLOWAT = 0x1004\nSO_SNDTIMEO = 0x1005\nSO_RCVTIMEO = 0x1006\nSO_ERROR = 0x1007\nSO_TYPE = 0x1008\nSO_LABEL = 0x1010\nSO_PEERLABEL = 0x1011\nSO_NREAD = 0x1020\nSO_NKE = 0x1021\nSO_NOSIGPIPE = 0x1022\nSO_NOADDRERR = 0x1023\nSO_NWRITE = 0x1024\nSO_REUSESHAREUID = 0x1025\nSO_NOTIFYCONFLICT = 0x1026\nSO_UPCALLCLOSEWAIT = 0x1027\nSO_LINGER_SEC = 0x1080\nSO_RANDOMPORT = 0x1082\nSO_NP_EXTENSIONS = 0x1083\nSO_NUMRCVPKT = 0x1112\nSO_NET_SERVICE_TYPE = 0x1116\nSO_NETSVC_MARKING_LEVEL = 0x1119\nNET_SERVICE_TYPE_BE = 0\nNET_SERVICE_TYPE_BK = 1\nNET_SERVICE_TYPE_SIG = 2\nNET_SERVICE_TYPE_VI = 3\nNET_SERVICE_TYPE_VO = 4\nNET_SERVICE_TYPE_RV = 5\nNET_SERVICE_TYPE_AV = 6\nNET_SERVICE_TYPE_OAM = 7\nNET_SERVICE_TYPE_RD = 8\nNETSVC_MRKNG_UNKNOWN = 0\nNETSVC_MRKNG_LVL_L2 = 1\nNETSVC_MRKNG_LVL_L3L2_ALL = 2\nNETSVC_MRKNG_LVL_L3L2_BK = 3\nSAE_ASSOCID_ANY = 0\nSAE_CONNID_ANY = 0\nCONNECT_RESUME_ON_READ_WRITE = 0x1\nCONNECT_DATA_IDEMPOTENT = 0x2\nCONNECT_DATA_AUTHENTICATED = 0x4\nSONPX_SETOPTSHUT = 0x000000001\nSOL_SOCKET = 0xffff\nAF_UNSPEC = 0\nAF_UNIX = 1\nAF_LOCAL = AF_UNIX\nAF_INET = 2\nAF_IMPLINK = 3\nAF_PUP = 4\nAF_CHAOS = 5\nAF_NS = 6\nAF_ISO = 7\nAF_OSI = AF_ISO\nAF_ECMA = 8\nAF_DATAKIT = 9\nAF_CCITT = 10\nAF_SNA = 11\nAF_DECnet = 12\nAF_DLI = 13\nAF_LAT = 14\nAF_HYLINK = 15\nAF_APPLETALK = 16\nAF_ROUTE = 17\nAF_LINK = 18\npseudo_AF_XTP = 19\nAF_COIP = 20\nAF_CNT = 21\npseudo_AF_RTIP = 22\nAF_IPX = 23\nAF_SIP = 24\npseudo_AF_PIP = 25\nAF_NDRV = 27\nAF_ISDN = 28\nAF_E164 = AF_ISDN\npseudo_AF_KEY = 29\nAF_INET6 = 30\nAF_NATM = 31\nAF_SYSTEM = 32\nAF_NETBIOS = 33\nAF_PPP = 34\npseudo_AF_HDRCMPLT = 35\nAF_RESERVED_36 = 36\nAF_IEEE80211 = 37\nAF_UTUN = 38\nAF_MAX = 40\nSOCK_MAXADDRLEN = 255\n_SS_MAXSIZE = 128\nPF_UNSPEC = AF_UNSPEC\nPF_LOCAL = AF_LOCAL\nPF_UNIX = PF_LOCAL\nPF_INET = AF_INET\nPF_IMPLINK = AF_IMPLINK\nPF_PUP = AF_PUP\nPF_CHAOS = AF_CHAOS\nPF_NS = AF_NS\nPF_ISO = AF_ISO\nPF_OSI = AF_ISO\nPF_ECMA = AF_ECMA\nPF_DATAKIT = AF_DATAKIT\nPF_CCITT = AF_CCITT\nPF_SNA = AF_SNA\nPF_DECnet = AF_DECnet\nPF_DLI = AF_DLI\nPF_LAT = AF_LAT\nPF_HYLINK = AF_HYLINK\nPF_APPLETALK = AF_APPLETALK\nPF_ROUTE = AF_ROUTE\nPF_LINK = AF_LINK\nPF_XTP = pseudo_AF_XTP\nPF_COIP = AF_COIP\nPF_CNT = AF_CNT\nPF_SIP = AF_SIP\nPF_IPX = AF_IPX\nPF_RTIP = pseudo_AF_RTIP\nPF_PIP = pseudo_AF_PIP\nPF_NDRV = AF_NDRV\nPF_ISDN = AF_ISDN\nPF_KEY = pseudo_AF_KEY\nPF_INET6 = AF_INET6\nPF_NATM = AF_NATM\nPF_SYSTEM = AF_SYSTEM\nPF_NETBIOS = AF_NETBIOS\nPF_PPP = AF_PPP\nPF_RESERVED_36 = AF_RESERVED_36\nPF_UTUN = AF_UTUN\nPF_MAX = AF_MAX\nNET_MAXID = AF_MAX\nNET_RT_DUMP = 1\nNET_RT_FLAGS = 2\nNET_RT_IFLIST = 3\nNET_RT_STAT = 4\nNET_RT_TRASH = 5\nNET_RT_IFLIST2 = 6\nNET_RT_DUMP2 = 7\nNET_RT_FLAGS_PRIV = 10\nNET_RT_MAXID = 11\nSOMAXCONN = 128\nMSG_OOB = 0x1\nMSG_PEEK = 0x2\nMSG_DONTROUTE = 0x4\nMSG_EOR = 0x8\nMSG_TRUNC = 0x10\nMSG_CTRUNC = 0x20\nMSG_WAITALL = 0x40\nMSG_DONTWAIT = 0x80\nMSG_EOF = 0x100\nMSG_WAITSTREAM = 0x200\nMSG_FLUSH = 0x400\nMSG_HOLD = 0x800\nMSG_SEND = 0x1000\nMSG_HAVEMORE = 0x2000\nMSG_RCVMORE = 0x4000\nMSG_NEEDSA = 0x10000\nCMGROUP_MAX = 16\ndef CMSG_FIRSTHDR(mhdr): return \\\n\nSCM_RIGHTS = 0x01\nSCM_TIMESTAMP = 0x02\nSCM_CREDS = 0x03\nSCM_TIMESTAMP_MONOTONIC = 0x04\nSHUT_RD = 0\nSHUT_WR = 1\nSHUT_RDWR = 2\n\n# Included from sys/_endian.h\ndef ntohl(x): return ((__uint32_t)(x))\n\ndef ntohs(x): return ((__uint16_t)(x))\n\ndef htonl(x): return ((__uint32_t)(x))\n\ndef htons(x): return ((__uint16_t)(x))\n\ndef ntohll(x): return ((__uint64_t)(x))\n\ndef htonll(x): return ((__uint64_t)(x))\n\ndef NTOHL(x): return (x)\n\ndef NTOHS(x): return (x)\n\ndef NTOHLL(x): return (x)\n\ndef HTONL(x): return (x)\n\ndef HTONS(x): return (x)\n\ndef HTONLL(x): return (x)\n\n\n# Included from libkern/_OSByteOrder.h\ndef __DARWIN_OSSwapConstInt16(x): return \\\n\ndef __DARWIN_OSSwapConstInt32(x): return \\\n\ndef __DARWIN_OSSwapConstInt64(x): return \\\n\n\n# Included from libkern/arm/OSByteOrder.h\n\n# Included from arm/arch.h\n\n# Included from sys/_types/_os_inline.h\ndef __DARWIN_OSSwapInt16(x): return \\\n\ndef __DARWIN_OSSwapInt32(x): return \\\n\ndef __DARWIN_OSSwapInt64(x): return \\\n\ndef __DARWIN_OSSwapInt16(x): return _OSSwapInt16(x)\n\ndef __DARWIN_OSSwapInt32(x): return _OSSwapInt32(x)\n\ndef __DARWIN_OSSwapInt64(x): return _OSSwapInt64(x)\n\ndef ntohs(x): return __DARWIN_OSSwapInt16(x)\n\ndef htons(x): return __DARWIN_OSSwapInt16(x)\n\ndef ntohl(x): return __DARWIN_OSSwapInt32(x)\n\ndef htonl(x): return __DARWIN_OSSwapInt32(x)\n\ndef ntohll(x): return __DARWIN_OSSwapInt64(x)\n\ndef htonll(x): return __DARWIN_OSSwapInt64(x)\n\nIPPROTO_IP = 0\nIPPROTO_HOPOPTS = 0\nIPPROTO_ICMP = 1\nIPPROTO_IGMP = 2\nIPPROTO_GGP = 3\nIPPROTO_IPV4 = 4\nIPPROTO_IPIP = IPPROTO_IPV4\nIPPROTO_TCP = 6\nIPPROTO_ST = 7\nIPPROTO_EGP = 8\nIPPROTO_PIGP = 9\nIPPROTO_RCCMON = 10\nIPPROTO_NVPII = 11\nIPPROTO_PUP = 12\nIPPROTO_ARGUS = 13\nIPPROTO_EMCON = 14\nIPPROTO_XNET = 15\nIPPROTO_CHAOS = 16\nIPPROTO_UDP = 17\nIPPROTO_MUX = 18\nIPPROTO_MEAS = 19\nIPPROTO_HMP = 20\nIPPROTO_PRM = 21\nIPPROTO_IDP = 22\nIPPROTO_TRUNK1 = 23\nIPPROTO_TRUNK2 = 24\nIPPROTO_LEAF1 = 25\nIPPROTO_LEAF2 = 26\nIPPROTO_RDP = 27\nIPPROTO_IRTP = 28\nIPPROTO_TP = 29\nIPPROTO_BLT = 30\nIPPROTO_NSP = 31\nIPPROTO_INP = 32\nIPPROTO_SEP = 33\nIPPROTO_3PC = 34\nIPPROTO_IDPR = 35\nIPPROTO_XTP = 36\nIPPROTO_DDP = 37\nIPPROTO_CMTP = 38\nIPPROTO_TPXX = 39\nIPPROTO_IL = 40\nIPPROTO_IPV6 = 41\nIPPROTO_SDRP = 42\nIPPROTO_ROUTING = 43\nIPPROTO_FRAGMENT = 44\nIPPROTO_IDRP = 45\nIPPROTO_RSVP = 46\nIPPROTO_GRE = 47\nIPPROTO_MHRP = 48\nIPPROTO_BHA = 49\nIPPROTO_ESP = 50\nIPPROTO_AH = 51\nIPPROTO_INLSP = 52\nIPPROTO_SWIPE = 53\nIPPROTO_NHRP = 54\nIPPROTO_ICMPV6 = 58\nIPPROTO_NONE = 59\nIPPROTO_DSTOPTS = 60\nIPPROTO_AHIP = 61\nIPPROTO_CFTP = 62\nIPPROTO_HELLO = 63\nIPPROTO_SATEXPAK = 64\nIPPROTO_KRYPTOLAN = 65\nIPPROTO_RVD = 66\nIPPROTO_IPPC = 67\nIPPROTO_ADFS = 68\nIPPROTO_SATMON = 69\nIPPROTO_VISA = 70\nIPPROTO_IPCV = 71\nIPPROTO_CPNX = 72\nIPPROTO_CPHB = 73\nIPPROTO_WSN = 74\nIPPROTO_PVP = 75\nIPPROTO_BRSATMON = 76\nIPPROTO_ND = 77\nIPPROTO_WBMON = 78\nIPPROTO_WBEXPAK = 79\nIPPROTO_EON = 80\nIPPROTO_VMTP = 81\nIPPROTO_SVMTP = 82\nIPPROTO_VINES = 83\nIPPROTO_TTP = 84\nIPPROTO_IGP = 85\nIPPROTO_DGP = 86\nIPPROTO_TCF = 87\nIPPROTO_IGRP = 88\nIPPROTO_OSPFIGP = 89\nIPPROTO_SRPC = 90\nIPPROTO_LARP = 91\nIPPROTO_MTP = 92\nIPPROTO_AX25 = 93\nIPPROTO_IPEIP = 94\nIPPROTO_MICP = 95\nIPPROTO_SCCSP = 96\nIPPROTO_ETHERIP = 97\nIPPROTO_ENCAP = 98\nIPPROTO_APES = 99\nIPPROTO_GMTP = 100\nIPPROTO_PIM = 103\nIPPROTO_IPCOMP = 108\nIPPROTO_PGM = 113\nIPPROTO_SCTP = 132\nIPPROTO_DIVERT = 254\nIPPROTO_RAW = 255\nIPPROTO_MAX = 256\nIPPROTO_DONE = 257\n__DARWIN_IPPORT_RESERVED = 1024\nIPPORT_RESERVED = __DARWIN_IPPORT_RESERVED\nIPPORT_USERRESERVED = 5000\nIPPORT_HIFIRSTAUTO = 49152\nIPPORT_HILASTAUTO = 65535\nIPPORT_RESERVEDSTART = 600\ndef IN_CLASSA(i): return (((u_int32_t)(i) & 0x80000000) == 0)\n\nIN_CLASSA_NET = 0xff000000\nIN_CLASSA_NSHIFT = 24\nIN_CLASSA_HOST = 0x00ffffff\nIN_CLASSA_MAX = 128\ndef IN_CLASSB(i): return (((u_int32_t)(i) & 0xc0000000) == 0x80000000)\n\nIN_CLASSB_NET = 0xffff0000\nIN_CLASSB_NSHIFT = 16\nIN_CLASSB_HOST = 0x0000ffff\nIN_CLASSB_MAX = 65536\ndef IN_CLASSC(i): return (((u_int32_t)(i) & 0xe0000000) == 0xc0000000)\n\nIN_CLASSC_NET = 0xffffff00\nIN_CLASSC_NSHIFT = 8\nIN_CLASSC_HOST = 0x000000ff\ndef IN_CLASSD(i): return (((u_int32_t)(i) & 0xf0000000) == 0xe0000000)\n\nIN_CLASSD_NET = 0xf0000000\nIN_CLASSD_NSHIFT = 28\nIN_CLASSD_HOST = 0x0fffffff\ndef IN_MULTICAST(i): return IN_CLASSD(i)\n\ndef IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)\n\ndef IN_BADCLASS(i): return (((u_int32_t)(i) & 0xf0000000) == 0xf0000000)\n\nINADDR_NONE = 0xffffffff\ndef IN_LINKLOCAL(i): return (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM)\n\ndef IN_LOOPBACK(i): return (((u_int32_t)(i) & 0xff000000) == 0x7f000000)\n\ndef IN_ZERONET(i): return (((u_int32_t)(i) & 0xff000000) == 0)\n\ndef IN_LOCAL_GROUP(i): return (((u_int32_t)(i) & 0xffffff00) == 0xe0000000)\n\nIN_LOOPBACKNET = 127\nINET_ADDRSTRLEN = 16\nIP_OPTIONS = 1\nIP_HDRINCL = 2\nIP_TOS = 3\nIP_TTL = 4\nIP_RECVOPTS = 5\nIP_RECVRETOPTS = 6\nIP_RECVDSTADDR = 7\nIP_RETOPTS = 8\nIP_MULTICAST_IF = 9\nIP_MULTICAST_TTL = 10\nIP_MULTICAST_LOOP = 11\nIP_ADD_MEMBERSHIP = 12\nIP_DROP_MEMBERSHIP = 13\nIP_MULTICAST_VIF = 14\nIP_RSVP_ON = 15\nIP_RSVP_OFF = 16\nIP_RSVP_VIF_ON = 17\nIP_RSVP_VIF_OFF = 18\nIP_PORTRANGE = 19\nIP_RECVIF = 20\nIP_IPSEC_POLICY = 21\nIP_FAITH = 22\nIP_STRIPHDR = 23\nIP_RECVTTL = 24\nIP_BOUND_IF = 25\nIP_PKTINFO = 26\nIP_RECVPKTINFO = IP_PKTINFO\nIP_RECVTOS = 27\nIP_FW_ADD = 40\nIP_FW_DEL = 41\nIP_FW_FLUSH = 42\nIP_FW_ZERO = 43\nIP_FW_GET = 44\nIP_FW_RESETLOG = 45\nIP_OLD_FW_ADD = 50\nIP_OLD_FW_DEL = 51\nIP_OLD_FW_FLUSH = 52\nIP_OLD_FW_ZERO = 53\nIP_OLD_FW_GET = 54\nIP_NAT__XXX = 55\nIP_OLD_FW_RESETLOG = 56\nIP_DUMMYNET_CONFIGURE = 60\nIP_DUMMYNET_DEL = 61\nIP_DUMMYNET_FLUSH = 62\nIP_DUMMYNET_GET = 64\nIP_TRAFFIC_MGT_BACKGROUND = 65\nIP_MULTICAST_IFINDEX = 66\nIP_ADD_SOURCE_MEMBERSHIP = 70\nIP_DROP_SOURCE_MEMBERSHIP = 71\nIP_BLOCK_SOURCE = 72\nIP_UNBLOCK_SOURCE = 73\nIP_MSFILTER = 74\nMCAST_JOIN_GROUP = 80\nMCAST_LEAVE_GROUP = 81\nMCAST_JOIN_SOURCE_GROUP = 82\nMCAST_LEAVE_SOURCE_GROUP = 83\nMCAST_BLOCK_SOURCE = 84\nMCAST_UNBLOCK_SOURCE = 85\nIP_DEFAULT_MULTICAST_TTL = 1\nIP_DEFAULT_MULTICAST_LOOP = 1\nIP_MIN_MEMBERSHIPS = 31\nIP_MAX_MEMBERSHIPS = 4095\nIP_MAX_GROUP_SRC_FILTER = 512\nIP_MAX_SOCK_SRC_FILTER = 128\nIP_MAX_SOCK_MUTE_FILTER = 128\nMCAST_UNDEFINED = 0\nMCAST_INCLUDE = 1\nMCAST_EXCLUDE = 2\nIP_PORTRANGE_DEFAULT = 0\nIP_PORTRANGE_HIGH = 1\nIP_PORTRANGE_LOW = 2\nIPPROTO_MAXID = (IPPROTO_AH + 1)\nIPCTL_FORWARDING = 1\nIPCTL_SENDREDIRECTS = 2\nIPCTL_DEFTTL = 3\nIPCTL_DEFMTU = 4\nIPCTL_RTEXPIRE = 5\nIPCTL_RTMINEXPIRE = 6\nIPCTL_RTMAXCACHE = 7\nIPCTL_SOURCEROUTE = 8\nIPCTL_DIRECTEDBROADCAST = 9\nIPCTL_INTRQMAXLEN = 10\nIPCTL_INTRQDROPS = 11\nIPCTL_STATS = 12\nIPCTL_ACCEPTSOURCEROUTE = 13\nIPCTL_FASTFORWARDING = 14\nIPCTL_KEEPFAITH = 15\nIPCTL_GIF_TTL = 16\nIPCTL_MAXID = 17\n\n# Included from netinet6/in6.h\n__KAME_VERSION = \"2009/apple-darwin\"\nIPV6PORT_RESERVED = 1024\nIPV6PORT_ANONMIN = 49152\nIPV6PORT_ANONMAX = 65535\nIPV6PORT_RESERVEDMIN = 600\nIPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)\nINET6_ADDRSTRLEN = 46\ndef IN6_IS_ADDR_UNSPECIFIED(a): return \\\n\ndef IN6_IS_ADDR_LOOPBACK(a): return \\\n\ndef IN6_IS_ADDR_V4COMPAT(a): return \\\n\ndef IN6_IS_ADDR_V4MAPPED(a): return \\\n\n__IPV6_ADDR_SCOPE_NODELOCAL = 0x01\n__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01\n__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02\n__IPV6_ADDR_SCOPE_SITELOCAL = 0x05\n__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08\n__IPV6_ADDR_SCOPE_GLOBAL = 0x0e\ndef IN6_IS_ADDR_LINKLOCAL(a): return \\\n\ndef IN6_IS_ADDR_SITELOCAL(a): return \\\n\ndef IN6_IS_ADDR_UNIQUE_LOCAL(a): return \\\n\ndef IN6_IS_ADDR_MC_NODELOCAL(a): return \\\n\ndef IN6_IS_ADDR_MC_LINKLOCAL(a): return \\\n\ndef IN6_IS_ADDR_MC_SITELOCAL(a): return \\\n\ndef IN6_IS_ADDR_MC_ORGLOCAL(a): return \\\n\ndef IN6_IS_ADDR_MC_GLOBAL(a): return \\\n\nIPV6_OPTIONS = 1\nIPV6_RECVOPTS = 5\nIPV6_RECVRETOPTS = 6\nIPV6_RECVDSTADDR = 7\nIPV6_RETOPTS = 8\nIPV6_SOCKOPT_RESERVED1 = 3\nIPV6_UNICAST_HOPS = 4\nIPV6_MULTICAST_IF = 9\nIPV6_MULTICAST_HOPS = 10\nIPV6_MULTICAST_LOOP = 11\nIPV6_JOIN_GROUP = 12\nIPV6_LEAVE_GROUP = 13\nIPV6_PORTRANGE = 14\nICMP6_FILTER = 18\nIPV6_2292PKTINFO = 19\nIPV6_2292HOPLIMIT = 20\nIPV6_2292NEXTHOP = 21\nIPV6_2292HOPOPTS = 22\nIPV6_2292DSTOPTS = 23\nIPV6_2292RTHDR = 24\nIPV6_2292PKTOPTIONS = 25\nIPV6_PKTINFO = IPV6_2292PKTINFO\nIPV6_HOPLIMIT = IPV6_2292HOPLIMIT\nIPV6_NEXTHOP = IPV6_2292NEXTHOP\nIPV6_HOPOPTS = IPV6_2292HOPOPTS\nIPV6_DSTOPTS = IPV6_2292DSTOPTS\nIPV6_RTHDR = IPV6_2292RTHDR\nIPV6_PKTOPTIONS = IPV6_2292PKTOPTIONS\nIPV6_CHECKSUM = 26\nIPV6_V6ONLY = 27\nIPV6_BINDV6ONLY = IPV6_V6ONLY\nIPV6_IPSEC_POLICY = 28\nIPV6_FAITH = 29\nIPV6_FW_ADD = 30\nIPV6_FW_DEL = 31\nIPV6_FW_FLUSH = 32\nIPV6_FW_ZERO = 33\nIPV6_FW_GET = 34\nIPV6_RECVTCLASS = 35\nIPV6_TCLASS = 36\nIPV6_RTHDRDSTOPTS = 57\nIPV6_RECVPKTINFO = 61\nIPV6_RECVHOPLIMIT = 37\nIPV6_RECVRTHDR = 38\nIPV6_RECVHOPOPTS = 39\nIPV6_RECVDSTOPTS = 40\nIPV6_USE_MIN_MTU = 42\nIPV6_RECVPATHMTU = 43\nIPV6_PATHMTU = 44\nIPV6_REACHCONF = 45\nIPV6_3542PKTINFO = 46\nIPV6_3542HOPLIMIT = 47\nIPV6_3542NEXTHOP = 48\nIPV6_3542HOPOPTS = 49\nIPV6_3542DSTOPTS = 50\nIPV6_3542RTHDR = 51\nIPV6_PKTINFO = IPV6_3542PKTINFO\nIPV6_HOPLIMIT = IPV6_3542HOPLIMIT\nIPV6_NEXTHOP = IPV6_3542NEXTHOP\nIPV6_HOPOPTS = IPV6_3542HOPOPTS\nIPV6_DSTOPTS = IPV6_3542DSTOPTS\nIPV6_RTHDR = IPV6_3542RTHDR\nIPV6_AUTOFLOWLABEL = 59\nIPV6_DONTFRAG = 62\nIPV6_PREFER_TEMPADDR = 63\nIPV6_MSFILTER = 74\nIPV6_BOUND_IF = 125\nIPV6_RTHDR_LOOSE = 0\nIPV6_RTHDR_STRICT = 1\nIPV6_RTHDR_TYPE_0 = 0\nIPV6_DEFAULT_MULTICAST_HOPS = 1\nIPV6_DEFAULT_MULTICAST_LOOP = 1\nIPV6_MIN_MEMBERSHIPS = 31\nIPV6_MAX_MEMBERSHIPS = 4095\nIPV6_MAX_GROUP_SRC_FILTER = 512\nIPV6_MAX_SOCK_SRC_FILTER = 128\nIPV6_PORTRANGE_DEFAULT = 0\nIPV6_PORTRANGE_HIGH = 1\nIPV6_PORTRANGE_LOW = 2\nIPV6PROTO_MAXID = (IPPROTO_PIM + 1)\nIPV6CTL_FORWARDING = 1\nIPV6CTL_SENDREDIRECTS = 2\nIPV6CTL_DEFHLIM = 3\nIPV6CTL_DEFMTU = 4\nIPV6CTL_FORWSRCRT = 5\nIPV6CTL_STATS = 6\nIPV6CTL_MRTSTATS = 7\nIPV6CTL_MRTPROTO = 8\nIPV6CTL_MAXFRAGPACKETS = 9\nIPV6CTL_SOURCECHECK = 10\nIPV6CTL_SOURCECHECK_LOGINT = 11\nIPV6CTL_ACCEPT_RTADV = 12\nIPV6CTL_KEEPFAITH = 13\nIPV6CTL_LOG_INTERVAL = 14\nIPV6CTL_HDRNESTLIMIT = 15\nIPV6CTL_DAD_COUNT = 16\nIPV6CTL_AUTO_FLOWLABEL = 17\nIPV6CTL_DEFMCASTHLIM = 18\nIPV6CTL_GIF_HLIM = 19\nIPV6CTL_KAME_VERSION = 20\nIPV6CTL_USE_DEPRECATED = 21\nIPV6CTL_RR_PRUNE = 22\nIPV6CTL_MAPPED_ADDR = 23\nIPV6CTL_V6ONLY = 24\nIPV6CTL_RTEXPIRE = 25\nIPV6CTL_RTMINEXPIRE = 26\nIPV6CTL_RTMAXCACHE = 27\nIPV6CTL_USETEMPADDR = 32\nIPV6CTL_TEMPPLTIME = 33\nIPV6CTL_TEMPVLTIME = 34\nIPV6CTL_AUTO_LINKLOCAL = 35\nIPV6CTL_RIP6STATS = 36\nIPV6CTL_PREFER_TEMPADDR = 37\nIPV6CTL_ADDRCTLPOLICY = 38\nIPV6CTL_USE_DEFAULTZONE = 39\nIPV6CTL_MAXFRAGS = 41\nIPV6CTL_MCAST_PMTU = 44\nIPV6CTL_NEIGHBORGCTHRESH = 46\nIPV6CTL_MAXIFPREFIXES = 47\nIPV6CTL_MAXIFDEFROUTERS = 48\nIPV6CTL_MAXDYNROUTES = 49\nICMPV6CTL_ND6_ONLINKNSRFC4861 = 50\nIPV6CTL_MAXID = 51\n", "id": "4249398", "language": "Python", "matching_score": 2.4270453453063965, "max_stars_count": 4, "path": "Lib/plat-tvos/IN.py" }, { "content": "\"\"\"Tests for distutils.util.\"\"\"\nimport os\nimport sys\nimport unittest\nfrom copy import copy\nfrom test.support import run_unittest\nfrom unittest import mock\n\nfrom distutils.errors import DistutilsPlatformError, DistutilsByteCompileError\nfrom distutils.util import (get_platform, convert_path, change_root,\n check_environ, split_quoted, strtobool,\n rfc822_escape, byte_compile,\n grok_environment_error)\nfrom distutils import util # used to patch _environ_checked\nfrom distutils.sysconfig import get_config_vars\nfrom distutils import sysconfig\nfrom distutils.tests import support\nimport _osx_support\n\nclass UtilTestCase(support.EnvironGuard, unittest.TestCase):\n\n def setUp(self):\n super(UtilTestCase, self).setUp()\n # saving the environment\n self.name = os.name\n self.platform = sys.platform\n self.version = sys.version\n self.sep = os.sep\n self.join = os.path.join\n self.isabs = os.path.isabs\n self.splitdrive = os.path.splitdrive\n self._config_vars = copy(sysconfig._config_vars)\n\n # patching os.uname\n if hasattr(os, 'uname'):\n self.uname = os.uname\n self._uname = os.uname()\n else:\n self.uname = None\n self._uname = None\n\n os.uname = self._get_uname\n\n def tearDown(self):\n # getting back the environment\n os.name = self.name\n sys.platform = self.platform\n sys.version = self.version\n os.sep = self.sep\n os.path.join = self.join\n os.path.isabs = self.isabs\n os.path.splitdrive = self.splitdrive\n if self.uname is not None:\n os.uname = self.uname\n else:\n del os.uname\n sysconfig._config_vars = copy(self._config_vars)\n super(UtilTestCase, self).tearDown()\n\n def _set_uname(self, uname):\n self._uname = uname\n\n def _get_uname(self):\n return self._uname\n\n def test_get_platform(self):\n\n # windows XP, 32bits\n os.name = 'nt'\n sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '\n '[MSC v.1310 32 bit (Intel)]')\n sys.platform = 'win32'\n self.assertEqual(get_platform(), 'win32')\n\n # windows XP, amd64\n os.name = 'nt'\n sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '\n '[MSC v.1310 32 bit (Amd64)]')\n sys.platform = 'win32'\n self.assertEqual(get_platform(), 'win-amd64')\n\n # macbook\n os.name = 'posix'\n sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '\n '\\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')\n sys.platform = 'darwin'\n self._set_uname(('Darwin', 'macziade', '8.11.1',\n ('Darwin Kernel Version 8.11.1: '\n 'Wed Oct 10 18:23:28 PDT 2007; '\n 'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))\n _osx_support._remove_original_values(get_config_vars())\n get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'\n\n get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '\n '-fwrapv -O3 -Wall -Wstrict-prototypes')\n\n cursize = sys.maxsize\n sys.maxsize = (2 ** 31)-1\n try:\n self.assertEqual(get_platform(), 'macosx-10.3-i386')\n finally:\n sys.maxsize = cursize\n\n # macbook with fat binaries (fat, universal or fat64)\n _osx_support._remove_original_values(get_config_vars())\n get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'\n get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '\n '/Developer/SDKs/MacOSX10.4u.sdk '\n '-fno-strict-aliasing -fno-common '\n '-dynamic -DNDEBUG -g -O3')\n\n self.assertEqual(get_platform(), 'macosx-10.4-fat')\n\n _osx_support._remove_original_values(get_config_vars())\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.1'\n self.assertEqual(get_platform(), 'macosx-10.4-fat')\n\n\n _osx_support._remove_original_values(get_config_vars())\n get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '\n '/Developer/SDKs/MacOSX10.4u.sdk '\n '-fno-strict-aliasing -fno-common '\n '-dynamic -DNDEBUG -g -O3')\n\n self.assertEqual(get_platform(), 'macosx-10.4-intel')\n\n _osx_support._remove_original_values(get_config_vars())\n get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '\n '/Developer/SDKs/MacOSX10.4u.sdk '\n '-fno-strict-aliasing -fno-common '\n '-dynamic -DNDEBUG -g -O3')\n self.assertEqual(get_platform(), 'macosx-10.4-fat3')\n\n _osx_support._remove_original_values(get_config_vars())\n get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '\n '/Developer/SDKs/MacOSX10.4u.sdk '\n '-fno-strict-aliasing -fno-common '\n '-dynamic -DNDEBUG -g -O3')\n self.assertEqual(get_platform(), 'macosx-10.4-universal')\n\n _osx_support._remove_original_values(get_config_vars())\n get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '\n '/Developer/SDKs/MacOSX10.4u.sdk '\n '-fno-strict-aliasing -fno-common '\n '-dynamic -DNDEBUG -g -O3')\n\n self.assertEqual(get_platform(), 'macosx-10.4-fat64')\n\n for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):\n _osx_support._remove_original_values(get_config_vars())\n get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '\n '/Developer/SDKs/MacOSX10.4u.sdk '\n '-fno-strict-aliasing -fno-common '\n '-dynamic -DNDEBUG -g -O3'%(arch,))\n\n self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,))\n\n\n # linux debian sarge\n os.name = 'posix'\n sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '\n '\\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')\n sys.platform = 'linux2'\n self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',\n '#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))\n\n self.assertEqual(get_platform(), 'linux-i686')\n\n # XXX more platforms to tests here\n\n def test_convert_path(self):\n # linux/mac\n os.sep = '/'\n def _join(path):\n return '/'.join(path)\n os.path.join = _join\n\n self.assertEqual(convert_path('/home/to/my/stuff'),\n '/home/to/my/stuff')\n\n # win\n os.sep = '\\\\'\n def _join(*path):\n return '\\\\'.join(path)\n os.path.join = _join\n\n self.assertRaises(ValueError, convert_path, '/home/to/my/stuff')\n self.assertRaises(ValueError, convert_path, 'home/to/my/stuff/')\n\n self.assertEqual(convert_path('home/to/my/stuff'),\n 'home\\\\to\\\\my\\\\stuff')\n self.assertEqual(convert_path('.'),\n os.curdir)\n\n def test_change_root(self):\n # linux/mac\n os.name = 'posix'\n def _isabs(path):\n return path[0] == '/'\n os.path.isabs = _isabs\n def _join(*path):\n return '/'.join(path)\n os.path.join = _join\n\n self.assertEqual(change_root('/root', '/old/its/here'),\n '/root/old/its/here')\n self.assertEqual(change_root('/root', 'its/here'),\n '/root/its/here')\n\n # windows\n os.name = 'nt'\n def _isabs(path):\n return path.startswith('c:\\\\')\n os.path.isabs = _isabs\n def _splitdrive(path):\n if path.startswith('c:'):\n return ('', path.replace('c:', ''))\n return ('', path)\n os.path.splitdrive = _splitdrive\n def _join(*path):\n return '\\\\'.join(path)\n os.path.join = _join\n\n self.assertEqual(change_root('c:\\\\root', 'c:\\\\old\\\\its\\\\here'),\n 'c:\\\\root\\\\old\\\\its\\\\here')\n self.assertEqual(change_root('c:\\\\root', 'its\\\\here'),\n 'c:\\\\root\\\\its\\\\here')\n\n # BugsBunny os (it's a great os)\n os.name = 'BugsBunny'\n self.assertRaises(DistutilsPlatformError,\n change_root, 'c:\\\\root', 'its\\\\here')\n\n # XXX platforms to be covered: mac\n\n @unittest.skipUnless((os.name == 'posix' and hasattr(os, 'fork') and os.allows_subprocesses),\n \"distutils cannot spawn child processes\")\n def test_check_environ(self):\n util._environ_checked = 0\n os.environ.pop('HOME', None)\n\n check_environ()\n\n self.assertEqual(os.environ['PLAT'], get_platform())\n self.assertEqual(util._environ_checked, 1)\n\n @unittest.skipUnless(os.name == 'posix', 'specific to posix')\n def test_check_environ_getpwuid(self):\n util._environ_checked = 0\n os.environ.pop('HOME', None)\n\n import pwd\n\n # only set pw_dir field, other fields are not used\n result = pwd.struct_passwd((None, None, None, None, None,\n '/home/distutils', None))\n with mock.patch.object(pwd, 'getpwuid', return_value=result):\n check_environ()\n self.assertEqual(os.environ['HOME'], '/home/distutils')\n\n util._environ_checked = 0\n os.environ.pop('HOME', None)\n\n # bpo-10496: Catch pwd.getpwuid() error\n with mock.patch.object(pwd, 'getpwuid', side_effect=KeyError):\n check_environ()\n self.assertNotIn('HOME', os.environ)\n\n def test_split_quoted(self):\n self.assertEqual(split_quoted('\"\"one\"\" \"two\" \\'three\\' \\\\four'),\n ['one', 'two', 'three', 'four'])\n\n def test_strtobool(self):\n yes = ('y', 'Y', 'yes', 'True', 't', 'true', 'True', 'On', 'on', '1')\n no = ('n', 'no', 'f', 'false', 'off', '0', 'Off', 'No', 'N')\n\n for y in yes:\n self.assertTrue(strtobool(y))\n\n for n in no:\n self.assertFalse(strtobool(n))\n\n def test_rfc822_escape(self):\n header = 'I am a\\npoor\\nlonesome\\nheader\\n'\n res = rfc822_escape(header)\n wanted = ('I am a%(8s)spoor%(8s)slonesome%(8s)s'\n 'header%(8s)s') % {'8s': '\\n'+8*' '}\n self.assertEqual(res, wanted)\n\n def test_dont_write_bytecode(self):\n # makes sure byte_compile raise a DistutilsError\n # if sys.dont_write_bytecode is True\n old_dont_write_bytecode = sys.dont_write_bytecode\n sys.dont_write_bytecode = True\n try:\n self.assertRaises(DistutilsByteCompileError, byte_compile, [])\n finally:\n sys.dont_write_bytecode = old_dont_write_bytecode\n\n def test_grok_environment_error(self):\n # test obsolete function to ensure backward compat (#4931)\n exc = IOError(\"Unable to find batch file\")\n msg = grok_environment_error(exc)\n self.assertEqual(msg, \"error: Unable to find batch file\")\n\n\ndef test_suite():\n return unittest.makeSuite(UtilTestCase)\n\nif __name__ == \"__main__\":\n run_unittest(test_suite())\n", "id": "12739245", "language": "Python", "matching_score": 1.3299345970153809, "max_stars_count": 4, "path": "Lib/distutils/tests/test_util.py" }, { "content": "# from argparse import ArgumentParser\nimport os\nimport sys\n\nimport django\nfrom django.conf import settings\nfrom django.core.management import execute_from_command_line\n\nTESTS_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n\ndef run_test_suite():\n settings.configure(\n DATABASES={\n \"default\": {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(TESTS_ROOT, 'db.sqlite3'),\n \"USER\": \"\",\n \"PASSWORD\": \"\",\n \"HOST\": \"\",\n \"PORT\": \"\",\n },\n },\n INSTALLED_APPS=[\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.sites\",\n \"rhouser\",\n ],\n )\n\n django.setup()\n\n execute_from_command_line(['manage.py', 'test'])\n\nif __name__ == \"__main__\":\n run_test_suite()\n", "id": "6855597", "language": "Python", "matching_score": 0.2043939232826233, "max_stars_count": 5, "path": "runtests.py" }, { "content": "\"\"\"\nForward compatible Cross Site Request Forgery Middleware.\n\nThis module provides a middleware that implements protection\nagainst request forgeries from other sites.\n\nIt is required to allow Django deployments < 1.10 to accept\ntokens issued on Django 1.10+. This may occur if you have\npartial or staged rollouts of a new codebase that includes\nan upgrade of Django.\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport re\nimport string\n\nfrom django.conf import settings\nfrom django.utils.crypto import constant_time_compare\nfrom django.utils.encoding import force_text\nfrom django.middleware.csrf import (\n _get_new_csrf_key, same_origin, constant_time_compare,\n CsrfViewMiddleware as BaseCsrfViewMiddleware\n)\nfrom django.utils.six.moves import zip\n\nREASON_NO_REFERER = \"Referer checking failed - no Referer.\"\nREASON_BAD_REFERER = \"Referer checking failed - %s does not match %s.\"\nREASON_NO_CSRF_COOKIE = \"CSRF cookie not set.\"\nREASON_BAD_TOKEN = \"CSRF token missing or incorrect.\"\n\nCSRF_SECRET_LENGTH = 32\nCSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH\nCSRF_ALLOWED_CHARS = string.ascii_letters + string.digits\n\n\ndef _unsalt_cipher_token(token):\n \"\"\"\n Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length\n CSRF_TOKEN_LENGTH, and that its first half is a salt), use it to decrypt\n the second half to produce the original secret.\n \"\"\"\n salt = token[:CSRF_SECRET_LENGTH]\n token = token[CSRF_SECRET_LENGTH:]\n chars = CSRF_ALLOWED_CHARS\n pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in salt))\n secret = ''.join(chars[x - y] for x, y in pairs) # Note negative values are ok\n return secret\n\n\ndef _sanitize_token(token):\n # Allow only ASCII alphanumerics\n if re.search('[^a-zA-Z0-9]', force_text(token)):\n return _get_new_csrf_key()\n elif len(token) == CSRF_TOKEN_LENGTH:\n # Django versions >= 1.10 set cookies to values of CSRF_TOKEN_LENGTH\n # alphanumeric characters. For forwards compatibility, accept\n # such values as salted secrets.\n return _unsalt_cipher_token(token)\n elif len(token) == CSRF_SECRET_LENGTH:\n return token\n return _get_new_csrf_key()\n\n\nclass CsrfViewMiddleware(BaseCsrfViewMiddleware):\n def process_view(self, request, callback, callback_args, callback_kwargs):\n\n if getattr(request, 'csrf_processing_done', False):\n return None\n\n try:\n csrf_token = _sanitize_token(\n request.COOKIES[settings.CSRF_COOKIE_NAME])\n # Use same token next time\n request.META['CSRF_COOKIE'] = csrf_token\n except KeyError:\n csrf_token = None\n # Generate token and store it in the request, so it's\n # available to the view.\n request.META[\"CSRF_COOKIE\"] = _get_new_csrf_key()\n\n # Wait until request.META[\"CSRF_COOKIE\"] has been manipulated before\n # bailing out, so that get_token still works\n if getattr(callback, 'csrf_exempt', False):\n return None\n\n # Assume that anything not defined as 'safe' by RFC2616 needs protection\n if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):\n if getattr(request, '_dont_enforce_csrf_checks', False):\n # Mechanism to turn off CSRF checks for test suite.\n # It comes after the creation of CSRF cookies, so that\n # everything else continues to work exactly the same\n # (e.g. cookies are sent, etc.), but before any\n # branches that call reject().\n return self._accept(request)\n\n if request.is_secure():\n # Suppose user visits http://example.com/\n # An active network attacker (man-in-the-middle, MITM) sends a\n # POST form that targets https://example.com/detonate-bomb/ and\n # submits it via JavaScript.\n #\n # The attacker will need to provide a CSRF cookie and token, but\n # that's no problem for a MITM and the session-independent\n # nonce we're using. So the MITM can circumvent the CSRF\n # protection. This is true for any HTTP connection, but anyone\n # using HTTPS expects better! For this reason, for\n # https://example.com/ we need additional protection that treats\n # http://example.com/ as completely untrusted. Under HTTPS,\n # Barth et al. found that the Referer header is missing for\n # same-domain requests in only about 0.2% of cases or less, so\n # we can use strict Referer checking.\n referer = force_text(\n request.META.get('HTTP_REFERER'),\n strings_only=True,\n errors='replace',\n )\n if referer is None:\n return self._reject(request, REASON_NO_REFERER)\n\n # Note that request.get_host() includes the port.\n good_referer = 'https://%s/' % request.get_host()\n if not same_origin(referer, good_referer):\n reason = REASON_BAD_REFERER % (referer, good_referer)\n return self._reject(request, reason)\n\n if csrf_token is None:\n # No CSRF cookie. For POST requests, we insist on a CSRF cookie,\n # and in this way we can avoid all CSRF attacks, including login\n # CSRF.\n return self._reject(request, REASON_NO_CSRF_COOKIE)\n\n # Check non-cookie token for match.\n request_csrf_token = \"\"\n if request.method == \"POST\":\n try:\n request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')\n except IOError:\n # Handle a broken connection before we've completed reading\n # the POST data. process_view shouldn't raise any\n # exceptions, so we'll ignore and serve the user a 403\n # (assuming they're still listening, which they probably\n # aren't because of the error).\n pass\n\n if request_csrf_token == \"\":\n # Fall back to X-CSRFToken, to make things easier for AJAX,\n # and possible for PUT/DELETE.\n request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')\n\n request_csrf_token = _sanitize_token(request_csrf_token)\n if not constant_time_compare(request_csrf_token, csrf_token):\n return self._reject(request, REASON_BAD_TOKEN)\n\n return self._accept(request)\n", "id": "5893182", "language": "Python", "matching_score": 1.0100061893463135, "max_stars_count": 8, "path": "birdcage/v1_11/csrf.py" }, { "content": "import dom\nimport sys\n\ndef is_valid_cc(number):\n digits = [int(x) for x in number]\n total = 0\n parity = len(digits) % 2\n for i, digit in enumerate(digits):\n if i % 2 == parity:\n digit *= 2\n if (digit > 9):\n digit -= 9\n total += digit\n return total % 10 == 0\n\ncard_number = dom.document.getElementById(sys.argv[1]).value\nif card_number:\n if is_valid_cc(card_number):\n dom.window.alert(\"Card is valid!!\")\n else:\n dom.window.alert(\"Card is NOT valid!!\")\n", "id": "6310924", "language": "Python", "matching_score": 0.635421633720398, "max_stars_count": 13, "path": "batavia/luhn.py" }, { "content": "import os\nfrom urllib.parse import quote\n\nimport toga\nfrom toga.style import Pack\n\nimport podium\n\n\nclass PrimarySlideWindow(toga.MainWindow):\n def __init__(self, deck, secondary):\n self.deck = deck\n self.secondary = secondary\n super().__init__(\n title=self.deck.title,\n position=(200, 200),\n size=(984 if self.deck.aspect == '16:9' else 738, 576)\n )\n self.create()\n\n def create(self):\n self.html_view = toga.WebView(\n style=Pack(\n flex=1,\n width=984 if self.deck.aspect == '16:9' else 738,\n height=576\n ),\n on_key_down=self.deck.on_key_press\n )\n self.content = self.html_view\n\n @property\n def template_name(self):\n return \"slide-template.html\"\n\n def redraw(self, slide='1'):\n with open(os.path.join(self.deck.resource_path, self.template_name), 'r') as data:\n template = data.read()\n\n content = template.format(\n resource_path=os.path.join(self.deck.resource_path),\n theme=self.deck.theme,\n style_overrides=self.deck.style_overrides,\n aspect_ratio_tag=self.deck.aspect.replace(':', '-'),\n aspect_ratio=self.deck.aspect,\n slide_content=self.deck.content,\n slide_number=slide,\n )\n\n self.html_view.set_content(self.deck.fileURL, content)\n\n def on_close(self):\n self.secondary.close()\n\n\nclass SecondarySlideWindow(toga.Window):\n def __init__(self, deck):\n self.deck = deck\n super().__init__(\n title=self.deck.title + \": Speaker notes\",\n position=(100, 100),\n size=(984 if self.deck.aspect == '16:9' else 738, 576),\n closeable=False\n )\n self.create()\n\n def create(self):\n self.html_view = toga.WebView(\n style=Pack(\n flex=1,\n width=984 if self.deck.aspect == '16:9' else 738,\n height=576\n ),\n on_key_down=self.deck.on_key_press\n )\n self.content = self.html_view\n\n @property\n def template_name(self):\n return \"notes-template.html\"\n\n def redraw(self, slide='1'):\n with open(os.path.join(self.deck.resource_path, self.template_name), 'r') as data:\n template = data.read()\n\n content = template.format(\n resource_path=os.path.join(self.deck.resource_path),\n theme=self.deck.theme,\n style_overrides=self.deck.style_overrides,\n aspect_ratio_tag=self.deck.aspect.replace(':', '-'),\n aspect_ratio=self.deck.aspect,\n slide_content=self.deck.content,\n slide_number=slide,\n )\n\n self.html_view.set_content(self.deck.fileURL, content)\n\n\nclass SlideDeck(toga.Document):\n def __init__(self, filename, app):\n super().__init__(\n filename=filename,\n document_type='Podium Slide Deck',\n app=app,\n )\n\n self.aspect = '16:9'\n self.window_2 = SecondarySlideWindow(self)\n self.window_2.app = self.app\n\n self.window_1 = PrimarySlideWindow(self, self.window_2)\n self.window_1.app = self.app\n\n self.reversed_displays = False\n self.paused = False\n\n @property\n def title(self):\n return os.path.splitext(os.path.basename(self.filename))[0]\n\n @property\n def resource_path(self):\n return os.path.join(\n os.path.dirname(os.path.abspath(podium.__file__)),\n 'resources',\n )\n\n def read(self):\n # TODO: There's only 1 theme.\n self.theme = 'default'\n if os.path.isdir(self.filename):\n # Multi-file .podium files must contain slides.md;\n # may contain style.css\n styleFile = os.path.join(self.filename, \"style.css\")\n contentFile = os.path.join(self.filename, \"slides.md\")\n\n with open(contentFile, 'r', encoding='utf-8') as f:\n self.content = f.read()\n\n if os.path.exists(styleFile):\n with open(styleFile, 'r', encoding='utf-8') as f:\n self.style_overrides = f.read()\n else:\n self.style_overrides = ''\n else:\n # Single file can just be a standalone markdown file\n with open(self.filename, 'r', encoding='utf-8') as f:\n self.content = f.read()\n self.style_overrides = ''\n\n def show(self):\n self.window_1.redraw()\n self.window_1.show()\n\n self.window_2.redraw()\n self.window_2.show()\n\n @property\n def fileURL(self):\n return 'file://{}/'.format(quote(self.filename))\n\n def switch_screens(self):\n print(\"Switch screens\")\n if self.app.is_full_screen:\n self.reversed_displays = not self.reversed_displays\n if self.reversed_displays:\n self.app.set_full_screen(self.window_2, self.window_1)\n else:\n self.app.set_full_screen(self.window_1, self.window_2)\n else:\n print('Not in full screen mode')\n\n def change_aspect_ratio(self):\n print(\"Switch aspect ratio\")\n if self.aspect == '16:9':\n self.aspect = '4:3'\n else:\n self.aspect = '16:9'\n\n if self.app.is_full_screen:\n # If we're fullscreen, just reload to apply different\n # aspect-related styles.\n self.reload()\n else:\n # If we're not fullscreen, we need to re-create the\n # display windows with the correct aspect ratio.\n self.window_1.close()\n\n self.window_2 = SecondarySlideWindow(self)\n self.window_1 = PrimarySlideWindow(self, self.window_2)\n\n self.window_1.app = self.app\n self.window_2.app = self.app\n\n self.show()\n\n def toggle_full_screen(self):\n print(\"Toggle full screen\")\n if self.app.is_full_screen:\n self.app.exit_full_screen()\n self.app.show_cursor()\n else:\n if self.reversed_displays:\n self.app.set_full_screen(self.window_2, self.window_1)\n else:\n self.app.set_full_screen(self.window_1, self.window_2)\n\n self.app.hide_cursor()\n\n async def reload(self):\n self.read()\n\n slide = await self.window_1.html_view.evaluate_javascript(\"slideshow.getCurrentSlideNo()\")\n\n print(\"Current slide:\", slide)\n self.redraw(slide)\n\n def redraw(self, slide=None):\n self.window_1.redraw(slide)\n self.window_2.redraw(slide)\n\n async def on_key_press(self, widget, key, modifiers):\n print(\"KEY =\", key, \"modifiers=\", modifiers)\n if key == toga.Key.ESCAPE:\n if self.app.is_full_screen:\n self.toggle_full_screen()\n else:\n print('Not in full screen mode')\n\n elif key == toga.Key.F11:\n self.toggle_full_screen()\n\n elif key == toga.Key.P and (toga.Key.MOD_1 in modifiers):\n if self.app.is_full_screen:\n self.toggle_pause()\n else:\n self.toggle_full_screen()\n\n elif key == toga.Key.TAB and (toga.Key.MOD_1 in modifiers):\n if self.app.is_full_screen:\n self.switch_screens()\n else:\n print('Not in full screen mode')\n\n elif key == toga.Key.A and (toga.Key.MOD_1 in modifiers):\n self.change_aspect_ratio()\n\n elif key in (\n toga.Key.RIGHT,\n toga.Key.DOWN,\n toga.Key.SPACE,\n toga.Key.ENTER,\n toga.Key.PAGE_DOWN\n ):\n self.goto_next_slide()\n\n elif key in (toga.Key.LEFT, toga.Key.UP, toga.Key.PAGE_UP):\n self.goto_previous_slide()\n\n elif key == toga.Key.HOME:\n self.goto_first_slide()\n\n elif key == toga.Key.END:\n self.goto_last_slide()\n\n elif key == toga.Key.R and (toga.Key.MOD_1 in modifiers):\n await self.reload()\n\n elif key == toga.Key.T and (toga.Key.MOD_1 in modifiers):\n self.reset_timer()\n\n def reset_timer(self):\n print(\"Reset Timer\")\n\n self.window_1.html_view.invoke_javascript(\"slideshow.resetTimer()\")\n self.window_2.html_view.invoke_javascript(\"slideshow.resetTimer()\")\n\n def toggle_pause(self):\n if self.app.is_full_screen:\n if self.paused:\n print(\"Resume presentation\")\n self.window_1.html_view.invoke_javascript(\"slideshow.resume()\")\n self.window_2.html_view.invoke_javascript(\"slideshow.resume()\")\n self.paused = False\n else:\n print(\"Pause presentation\")\n self.window_1.html_view.invoke_javascript(\"slideshow.pause()\")\n self.window_2.html_view.invoke_javascript(\"slideshow.pause()\")\n self.paused = True\n else:\n print(\"Presentation not in fullscreen mode; pause/play disabled\")\n\n def goto_first_slide(self):\n print(\"Goto first slide\")\n\n self.window_1.html_view.invoke_javascript(\"slideshow.gotoFirstSlide()\")\n self.window_2.html_view.invoke_javascript(\"slideshow.gotoFirstSlide()\")\n\n def goto_last_slide(self):\n print(\"Goto previous slide\")\n\n self.window_1.html_view.invoke_javascript(\"slideshow.gotoLastSlide()\")\n self.window_2.html_view.invoke_javascript(\"slideshow.gotoLastSlide()\")\n\n def goto_next_slide(self):\n print(\"Goto next slide\")\n\n self.window_1.html_view.invoke_javascript(\"slideshow.gotoNextSlide()\")\n self.window_2.html_view.invoke_javascript(\"slideshow.gotoNextSlide()\")\n\n def goto_previous_slide(self):\n print(\"Goto previous slide\")\n\n self.window_1.html_view.invoke_javascript(\"slideshow.gotoPreviousSlide()\")\n self.window_2.html_view.invoke_javascript(\"slideshow.gotoPreviousSlide()\")\n", "id": "7188794", "language": "Python", "matching_score": 1.6976732015609741, "max_stars_count": 101, "path": "src/podium/deck.py" }, { "content": "\"\"\"\nAn app that does lots of stuff\n\"\"\"\nimport toga\nfrom toga.style import Pack\nfrom toga.style.pack import COLUMN, ROW\n\n\nclass HelloWorld(toga.App):\n\n def startup(self):\n \"\"\"\n Construct and show the Toga application.\n\n Usually, you would add your application to a main content box.\n We then create a main window (with a name matching the app), and\n show the main window.\n \"\"\"\n main_box = toga.Box()\n\n self.main_window = toga.MainWindow(title=self.name)\n self.main_window.content = main_box\n self.main_window.show()\n\n\ndef main():\n return HelloWorld('Hello World', 'com.example.helloworld')\n", "id": "12530720", "language": "Python", "matching_score": 0.8935773968696594, "max_stars_count": 0, "path": "src/helloworld/app.py" }, { "content": "import toga\n\nfrom toga.style import Pack\nfrom toga.style.pack import COLUMN, BOLD, RIGHT, CENTER\n\n\nclass Currency:\n def __init__(self, name, symbol, forex, format='%.2f'):\n self.name = name\n self.symbol = symbol\n self.forex = forex\n self.format = format\n\n def __str__(self):\n if self.symbol:\n return '{} ({})'.format(self.name, self.symbol)\n else:\n return self.name\n\n\nCURRENCIES = [\n # forex is the value of USD1 in the currency\n # Last updated Jan 8 2020\n Currency('AU Dollars', symbol='$', forex=1.45),\n Currency('AE Dirham', symbol=None, forex=3.67),\n Currency('AR Peso', symbol=None, forex=59.75, format='%.0f'),\n Currency('BR Real', symbol='R$', forex=4.07, format='%.0f'),\n Currency('CA Dollars', symbol='$', forex=1.3),\n Currency('CH Franc', symbol='Fr.', forex=0.97),\n Currency('CN Yuan', symbol='¥', forex=6.95, format='%.0f'),\n Currency('CO Peso', symbol='$', forex=3276.0, format='%.0f'),\n Currency('DA Krone', symbol='Kr', forex=6.70, format='%.0f'),\n Currency('ET Birr', symbol='Br', forex=32.12, format='%.0f'),\n Currency('Euro', symbol='€', forex=0.9),\n Currency('GB Pounds', symbol='£', forex=0.76),\n Currency('HK Dollars', symbol='$', forex=7.78, format='%.0f'),\n Currency('IN Rupee', symbol='₹', forex=72.02, format='%.0f'),\n Currency('ID Rupiah', symbol='Rp', forex=13925.8, format='%.0f'),\n Currency('IS Króna', symbol='kr', forex=123.07, format='%.0f'),\n Currency('IL New Shekel', symbol='₪', forex=3.47, format='%.0f'),\n Currency('JP Yen', symbol='¥', forex=108.37, format='%.0f'),\n Currency('KR Won', symbol='₩', forex=1171.31, format='%.0f'),\n Currency('MX Peso', symbol='$', forex=18.9, format='%.0f'),\n Currency('MY Ringgit', symbol='RM', forex=4.11, format='%.0f'),\n Currency('NZ Dollars', symbol='$', forex=1.5),\n Currency('PH Peso', symbol='₱', forex=50.9, format='%.0f'),\n Currency('PL Złoty', symbol='zł', forex=3.81, format='%.0f'),\n Currency('RS Dinar', symbol='дин', forex=105.38, format='%.0f'),\n Currency('RU Ruble', symbol='₽', forex=3.81, format='%.0f'),\n Currency('SE Krona', symbol='kr', forex=9.45, format='%.0f'),\n Currency('SG Dollars', symbol='$', forex=1.35),\n Currency('TH Baht', symbol='฿', forex=30.27, format='%.0f'),\n Currency('TR Lira', symbol='₺', forex=5.96, format='%.0f'),\n Currency('TW Dollars', symbol='NT$', forex=30.06, format='%.0f'),\n Currency('US Dollars', symbol='$', forex=1.0),\n Currency('ZA Rand', symbol='R', forex=14.32),\n]\n\nFOREX = {\n str(currency): currency\n for currency in CURRENCIES\n}\n\n\nclass TravelTips(toga.App):\n def calculate(self):\n try:\n self.my_tip_label.text = self.tip_rate.value\n\n value = float(self.amount.value)\n rate = int(self.tip_rate.value[:-1]) / 100.0\n local = FOREX[self.local_currency.value]\n my = FOREX[self.my_currency.value]\n self.tip.value = local.format % (value * rate)\n self.tip_total.value = local.format % (value * (rate + 1.0))\n\n my_amount = value / local.forex * my.forex\n self.my_amount.value = my.format % my_amount\n\n self.my_tip.value = my.format % (my_amount * rate)\n self.my_tip_total.value = my.format % (my_amount * (rate + 1.0))\n\n except (ValueError, TypeError) as e:\n if self.amount.value:\n value = '?'\n else:\n value = ''\n\n self.tip.value = value\n self.tip_total.value = value\n\n self.my_amount.value = value\n\n self.my_tip.value = value\n self.my_tip_total.value = value\n\n def on_select(self, widget):\n self.calculate()\n\n def on_change(self, widget):\n self.calculate()\n\n def startup(self):\n self.main_window = toga.MainWindow(\n title=self.formal_name,\n size=(320, 568)\n )\n\n box = toga.Box(style=Pack(direction=COLUMN, padding=5))\n\n local_box = toga.Box(\n style=Pack(\n padding=(20, 0, 5, 0),\n alignment=CENTER\n )\n )\n local_box.add(toga.Label(\n 'Local Currency:',\n style=Pack(\n width=120,\n padding_right=5,\n font_family='Helvetica',\n font_size=16,\n font_weight=BOLD,\n text_align=RIGHT,\n )\n ))\n\n self.local_currency = toga.Selection(\n items=[str(c) for c in CURRENCIES],\n on_select=self.on_select,\n style=Pack(flex=1)\n )\n local_box.add(self.local_currency)\n\n box.add(local_box)\n\n self.amount = toga.NumberInput(\n on_change=self.on_change,\n min_value=0,\n step='0.01',\n style=Pack(\n font_family='Helvetica',\n font_size=48,\n text_align=RIGHT\n )\n )\n box.add(self.amount)\n\n tip_box = toga.Box(style=Pack(padding_top=10))\n\n self.tip_rate = toga.Selection(\n items=[\"20%\", \"15%\", \"10%\"],\n on_select=self.on_select,\n style=Pack(flex=1)\n )\n tip_box.add(self.tip_rate)\n\n self.tip = toga.TextInput(\n readonly=True,\n style=Pack(flex=1, padding_left=5, text_align=RIGHT)\n )\n tip_box.add(self.tip)\n\n self.tip_total = toga.TextInput(\n readonly=True,\n style=Pack(flex=1, text_align=RIGHT)\n )\n tip_box.add(self.tip_total)\n\n box.add(tip_box)\n\n my_box = toga.Box(\n style=Pack(\n padding=(10, 0, 5, 0),\n alignment=CENTER\n )\n )\n my_box.add(toga.Label(\n 'My Currency:',\n style=Pack(\n width=120,\n padding_right=5,\n font_family='Helvetica',\n font_size=16,\n font_weight=BOLD,\n text_align=RIGHT\n )\n ))\n\n self.my_currency = toga.Selection(\n items=[str(c) for c in CURRENCIES],\n on_select=self.on_select,\n style=Pack(flex=1)\n )\n my_box.add(self.my_currency)\n\n box.add(my_box)\n\n self.my_amount = toga.TextInput(\n readonly=True,\n style=Pack(\n font_family='Helvetica',\n font_size=48,\n text_align=RIGHT\n )\n )\n box.add(self.my_amount)\n\n my_tip_box = toga.Box(style=Pack(padding_top=10))\n\n self.my_tip_label = toga.Label(\n '20%',\n style=Pack(flex=1, padding_left=5, text_align=RIGHT)\n )\n my_tip_box.add(self.my_tip_label)\n\n self.my_tip = toga.TextInput(\n readonly=True,\n style=Pack(flex=1, padding_left=5, text_align=RIGHT)\n )\n my_tip_box.add(self.my_tip)\n\n self.my_tip_total = toga.TextInput(\n readonly=True,\n style=Pack(flex=1, text_align=RIGHT)\n )\n my_tip_box.add(self.my_tip_total)\n\n box.add(my_tip_box)\n\n self.main_window.content = box\n self.main_window.show()\n\n\ndef main():\n return TravelTips()\n\n\nif __name__ == '__main__':\n main().main_loop()\n", "id": "1508778", "language": "Python", "matching_score": 1.5036321878433228, "max_stars_count": 21, "path": "src/traveltips/app.py" }, { "content": "from podium.app import main\n\ndef start():\n main().main_loop()\n\nif __name__ == '__main__':\n start()\n", "id": "5502265", "language": "Python", "matching_score": 0.20221634209156036, "max_stars_count": 101, "path": "src/podium/__main__.py" }, { "content": "#!/usr/bin/env python\nimport io\nimport re\nfrom setuptools import setup, find_packages\n\n\nwith io.open('./src/podium/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M)\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\n\nwith io.open('README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\nsetup(\n name='podium',\n version=version,\n description='A presentation tool for developers.',\n long_description=long_description,\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://beeware.org/project/projects/applications/podium',\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\n 'podium': [\n 'resources/*.html',\n 'resources/*.js',\n 'resources/*.css',\n 'resources/*.png',\n 'resources/*.icns',\n 'resources/themes/default/*.css',\n 'resources/themes/default/*.woff',\n ]\n },\n include_package_data=True,\n install_requires=[\n ],\n license='New BSD',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n test_suite='tests',\n zip_safe=False,\n options={\n 'app': {\n 'formal_name': 'Podium',\n 'bundle': 'org.beeware',\n 'document_types': {\n 'deck': {\n 'description': 'Podium Slide Deck',\n 'extension': 'podium',\n 'icon': 'src/podium/resources/podium-deck',\n 'url': 'https://beeware.org/project/projects/applications/podium/',\n }\n }\n },\n 'macos': {\n 'app_requires': [\n 'toga-cocoa>=0.3.0.dev14'\n ],\n 'icon': 'src/podium/resources/podium',\n },\n 'linux': {\n 'app_requires': [\n 'toga-gtk>=0.3.0.dev14'\n ],\n }\n }\n)\n", "id": "6115796", "language": "Python", "matching_score": 5.367831230163574, "max_stars_count": 0, "path": "setup.py" }, { "content": "#/usr/bin/env python\nimport io\n# import re\nfrom setuptools import setup, find_packages\n\n\nwith io.open('README.rst', encoding='utf8') as readme:\n long_description = readme.read()\n\n\nsetup(\n name='rho-django-user',\n version='1.0.0',\n description=\"RKM's Humble Opinion: A custom Django user model for best practices email-based login.\",\n long_description=long_description,\n author='<NAME>',\n author_email='<EMAIL>',\n url='http://github.com/freakboy3742/rho-django-user/',\n packages=find_packages(exclude=['docs', 'tests']),\n install_requires=[\n 'django>=1.8'\n ],\n license='New BSD',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development',\n 'Topic :: Utilities',\n ],\n test_suite='tests'\n)\n", "id": "5316310", "language": "Python", "matching_score": 0.3999705910682678, "max_stars_count": 5, "path": "setup.py" }, { "content": "from __future__ import unicode_literals\n\nimport django\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.hashers import MD5PasswordHasher\nfrom django.contrib.auth.models import AnonymousUser, Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.test import TestCase, override_settings, modify_settings\n\nfrom rhouser.models import User\n\n\nclass CountingMD5PasswordHasher(MD5PasswordHasher):\n \"\"\"Hasher that counts how many times it computes a hash.\"\"\"\n\n calls = 0\n\n def encode(self, *args, **kwargs):\n type(self).calls += 1\n return super(CountingMD5PasswordHasher, self).encode(*args, **kwargs)\n\n\nclass BaseModelBackendTest(object):\n \"\"\"\n A base class for tests that need to validate the ModelBackend\n with different User models. Subclasses should define a class\n level UserModel attribute, and a create_users() method to\n construct two users for test purposes.\n \"\"\"\n backend = 'django.contrib.auth.backends.ModelBackend'\n\n def setUp(self):\n self.patched_settings = modify_settings(\n AUTHENTICATION_BACKENDS={'append': self.backend},\n )\n self.patched_settings.enable()\n self.create_users()\n\n def tearDown(self):\n self.patched_settings.disable()\n # The custom_perms test messes with ContentTypes, which will\n # be cached; flush the cache to ensure there are no side effects\n # Refs #14975, #14925\n ContentType.objects.clear_cache()\n def test_has_perm(self):\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n self.assertEqual(user.has_perm('auth.test'), False)\n\n user.is_staff = True\n user.save()\n self.assertEqual(user.has_perm('auth.test'), False)\n\n user.is_superuser = True\n user.save()\n self.assertEqual(user.has_perm('auth.test'), True)\n\n user.is_staff = True\n user.is_superuser = True\n user.is_active = False\n user.save()\n self.assertEqual(user.has_perm('auth.test'), False)\n\n def test_custom_perms(self):\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n content_type = ContentType.objects.get_for_model(Group)\n perm = Permission.objects.create(name='test', content_type=content_type, codename='test')\n user.user_permissions.add(perm)\n\n # reloading user to purge the _perm_cache\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n self.assertEqual(user.get_all_permissions() == {'auth.test'}, True)\n self.assertEqual(user.get_group_permissions(), set())\n self.assertEqual(user.has_module_perms('Group'), False)\n self.assertEqual(user.has_module_perms('auth'), True)\n\n perm = Permission.objects.create(name='test2', content_type=content_type, codename='test2')\n user.user_permissions.add(perm)\n perm = Permission.objects.create(name='test3', content_type=content_type, codename='test3')\n user.user_permissions.add(perm)\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n self.assertEqual(user.get_all_permissions(), {'auth.test2', 'auth.test', 'auth.test3'})\n self.assertEqual(user.has_perm('test'), False)\n self.assertEqual(user.has_perm('auth.test'), True)\n self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), True)\n\n perm = Permission.objects.create(name='test_group', content_type=content_type, codename='test_group')\n group = Group.objects.create(name='test_group')\n group.permissions.add(perm)\n user.groups.add(group)\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n exp = {'auth.test2', 'auth.test', 'auth.test3', 'auth.test_group'}\n self.assertEqual(user.get_all_permissions(), exp)\n self.assertEqual(user.get_group_permissions(), {'auth.test_group'})\n self.assertEqual(user.has_perms(['auth.test3', 'auth.test_group']), True)\n\n user = AnonymousUser()\n self.assertEqual(user.has_perm('test'), False)\n self.assertEqual(user.has_perms(['auth.test2', 'auth.test3']), False)\n\n def test_has_no_object_perm(self):\n \"\"\"Regressiontest for #12462\"\"\"\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n content_type = ContentType.objects.get_for_model(Group)\n perm = Permission.objects.create(name='test', content_type=content_type, codename='test')\n user.user_permissions.add(perm)\n\n self.assertEqual(user.has_perm('auth.test', 'object'), False)\n self.assertEqual(user.get_all_permissions('object'), set())\n self.assertEqual(user.has_perm('auth.test'), True)\n self.assertEqual(user.get_all_permissions(), {'auth.test'})\n\n def test_anonymous_has_no_permissions(self):\n \"\"\"\n #17903 -- Anonymous users shouldn't have permissions in\n ModelBackend.get_(all|user|group)_permissions().\n \"\"\"\n backend = ModelBackend()\n\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n content_type = ContentType.objects.get_for_model(Group)\n user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')\n group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')\n user.user_permissions.add(user_perm)\n\n group = Group.objects.create(name='test_group')\n user.groups.add(group)\n group.permissions.add(group_perm)\n\n self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})\n self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})\n self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})\n\n # In Django 1.10, is_anonymous became a property.\n is_anon = self.UserModel.is_anonymous\n if django.VERSION >= (1, 10):\n self.UserModel.is_anonymous = True\n else:\n user.is_anonymous = lambda: True\n\n self.assertEqual(backend.get_all_permissions(user), set())\n self.assertEqual(backend.get_user_permissions(user), set())\n self.assertEqual(backend.get_group_permissions(user), set())\n\n self.UserModel.is_anonymous = is_anon\n\n def test_inactive_has_no_permissions(self):\n \"\"\"\n #17903 -- Inactive users shouldn't have permissions in\n ModelBackend.get_(all|user|group)_permissions().\n \"\"\"\n backend = ModelBackend()\n\n user = self.UserModel._default_manager.get(pk=self.user.pk)\n content_type = ContentType.objects.get_for_model(Group)\n user_perm = Permission.objects.create(name='test', content_type=content_type, codename='test_user')\n group_perm = Permission.objects.create(name='test2', content_type=content_type, codename='test_group')\n user.user_permissions.add(user_perm)\n\n group = Group.objects.create(name='test_group')\n user.groups.add(group)\n group.permissions.add(group_perm)\n\n self.assertEqual(backend.get_all_permissions(user), {'auth.test_user', 'auth.test_group'})\n self.assertEqual(backend.get_user_permissions(user), {'auth.test_user', 'auth.test_group'})\n self.assertEqual(backend.get_group_permissions(user), {'auth.test_group'})\n\n user.is_active = False\n user.save()\n\n self.assertEqual(backend.get_all_permissions(user), set())\n self.assertEqual(backend.get_user_permissions(user), set())\n self.assertEqual(backend.get_group_permissions(user), set())\n\n def test_get_all_superuser_permissions(self):\n \"\"\"A superuser has all permissions. Refs #14795.\"\"\"\n user = self.UserModel._default_manager.get(pk=self.superuser.pk)\n self.assertEqual(len(user.get_all_permissions()), len(Permission.objects.all()))\n\n @override_settings(PASSWORD_HASHERS=['rhouser.tests.test_auth_backends.CountingMD5PasswordHasher'])\n def test_authentication_timing(self):\n \"\"\"Hasher is run once regardless of whether the user exists. Refs #20760.\"\"\"\n # Re-set the password, because this tests overrides PASSWORD_HASHERS\n self.user.set_password('<PASSWORD>')\n self.user.save()\n\n CountingMD5PasswordHasher.calls = 0\n username = getattr(self.user, self.UserModel.USERNAME_FIELD)\n authenticate(username=username, password='<PASSWORD>')\n self.assertEqual(CountingMD5PasswordHasher.calls, 1)\n\n CountingMD5PasswordHasher.calls = 0\n authenticate(username='no_such_user', password='<PASSWORD>')\n self.assertEqual(CountingMD5PasswordHasher.calls, 1)\n\n\n@modify_settings(INSTALLED_APPS={'append': 'rhouser'})\n@override_settings(AUTH_USER_MODEL='rhouser.User')\nclass ModelBackendTest(BaseModelBackendTest, TestCase):\n \"\"\"\n Tests for the ModelBackend using the rhouser User model.\n\n This isn't a perfect test, because both auth.User and rhouser.User are\n synchronized to the database, which wouldn't ordinary happen in\n production. As a result, it doesn't catch errors caused by the non-\n existence of the User table.\n\n The specific problem is queries on .filter(groups__user) et al, which\n makes an implicit assumption that the user model is called 'User'. In\n production, the auth.User table won't exist, so the requested join\n won't exist either; in testing, the auth.User *does* exist, and\n so does the join. However, the join table won't contain any useful\n data; for testing, we check that the data we expect actually does exist.\n \"\"\"\n\n UserModel = User\n\n def create_users(self):\n self.user = User._default_manager.create_user(\n email='<EMAIL>',\n password='<PASSWORD>',\n )\n self.superuser = User._default_manager.create_superuser(\n email='<EMAIL>',\n password='<PASSWORD>',\n )\n", "id": "4701091", "language": "Python", "matching_score": 3.0919301509857178, "max_stars_count": 5, "path": "rhouser/tests/test_auth_backends.py" }, { "content": "from django.core.mail import send_mail\nfrom django.contrib.auth.models import BaseUserManager, AbstractBaseUser, PermissionsMixin\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass UserManager(BaseUserManager):\n\n def _create_user(self, email, password,\n is_staff, is_superuser, **extra_fields):\n \"\"\"\n Creates and saves an User with the given email and password.\n \"\"\"\n now = timezone.now()\n if not email:\n raise ValueError('An email address must be provided.')\n email = self.normalize_email(email)\n user = self.model(email=email,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser, last_login=now,\n date_joined=now, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_user(self, email=None, password=None, **extra_fields):\n return self._create_user(email, password, False, False,\n **extra_fields)\n\n def create_superuser(self, email, password, **extra_fields):\n return self._create_user(email, password, True, True,\n **extra_fields)\n\n\nclass SimpleIdentityMixin(models.Model):\n \"\"\"\n A mixin class that provides a simple first name/last name representation\n of user identity.\n \"\"\"\n full_name = models.CharField(_('full name'), max_length=200, blank=True)\n short_name = models.CharField(_('short name'), max_length=50)\n is_staff = models.BooleanField(_('staff status'), default=False,\n help_text=_('Designates whether the user can log into the admin site.'))\n is_active = models.BooleanField(_('active'), default=True,\n help_text=_('Designates whether this user should be treated as '\n 'active. Unselect this instead of deleting accounts.'))\n date_joined = models.DateTimeField(_('date joined'), default=timezone.now)\n\n class Meta:\n abstract = True\n\n def get_full_name(self):\n \"\"\"\n Returns the full name of the user.\n \"\"\"\n return self.full_name\n\n def get_short_name(self):\n \"Returns the short name for the user.\"\n return self.short_name\n\n\nclass AbstractUser(SimpleIdentityMixin, PermissionsMixin, AbstractBaseUser):\n \"\"\"\n An abstract base class implementing a fully featured User model with\n admin-compliant permissions, using email as a username.\n\n All fields other than email and password are optional.\n \"\"\"\n email = models.EmailField(_('email address'), max_length=254, unique=True)\n\n objects = UserManager()\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['full_name', 'short_name']\n\n class Meta:\n verbose_name = _('user')\n verbose_name_plural = _('users')\n abstract = True\n\n def email_user(self, subject, message, from_email=None, **kwargs):\n \"\"\"\n Sends an email to this User.\n \"\"\"\n send_mail(subject, message, from_email, [self.email], **kwargs)\n\n\nclass User(AbstractUser):\n \"\"\"\n The abstract base class exists so that it can be easily extended, but\n this class is the one that should be instantiated.\n \"\"\"\n", "id": "8323447", "language": "Python", "matching_score": 2.9427173137664795, "max_stars_count": 5, "path": "rhouser/models.py" }, { "content": "from django.core import mail\nfrom django.test import TestCase, override_settings, modify_settings\n\nfrom rhouser.models import AbstractUser, User, UserManager\n\n\n@modify_settings(INSTALLED_APPS={'append': 'rhouser'})\n@override_settings(AUTH_USER_MODEL='rhouser.User')\nclass UserManagerTestCase(TestCase):\n\n def test_create_user(self):\n email_lowercase = '<EMAIL>'\n user = User.objects.create_user(email_lowercase)\n self.assertEqual(user.email, email_lowercase)\n self.assertFalse(user.has_usable_password())\n\n def test_create_user_email_domain_normalize_rfc3696(self):\n # According to http://tools.ietf.org/html/rfc3696#section-3\n # the \"@\" symbol can be part of the local part of an email address\n returned = UserManager.normalize_email(r'<EMAIL>')\n self.assertEqual(returned, r'<EMAIL>')\n\n def test_create_user_email_domain_normalize(self):\n returned = UserManager.normalize_email('<EMAIL>')\n self.assertEqual(returned, '<EMAIL>')\n\n def test_create_user_email_domain_normalize_with_whitespace(self):\n returned = UserManager.normalize_email('email\\ <EMAIL>')\n self.assertEqual(returned, 'email\\ <EMAIL>')\n\n def test_empty_username(self):\n self.assertRaisesMessage(\n ValueError,\n 'An email address must be provided.',\n User.objects.create_user, email=''\n )\n\n\n@override_settings(AUTH_USER_MODEL='rhouser.User')\nclass AbstractUserTestCase(TestCase):\n def test_email_user(self):\n # valid send_mail parameters\n kwargs = {\n \"fail_silently\": False,\n \"auth_user\": None,\n \"auth_password\": <PASSWORD>,\n \"connection\": None,\n \"html_message\": None,\n }\n abstract_user = AbstractUser(email='<EMAIL>')\n abstract_user.email_user(subject=\"Subject here\",\n message=\"This is a message\", from_email=\"<EMAIL>\", **kwargs)\n # Test that one message has been sent.\n self.assertEqual(len(mail.outbox), 1)\n # Verify that test email contains the correct attributes:\n message = mail.outbox[0]\n self.assertEqual(message.subject, \"Subject here\")\n self.assertEqual(message.body, \"This is a message\")\n self.assertEqual(message.from_email, \"<EMAIL>\")\n self.assertEqual(message.to, [abstract_user.email])\n", "id": "10845184", "language": "Python", "matching_score": 2.0189404487609863, "max_stars_count": 5, "path": "rhouser/tests/test_models.py" }, { "content": "from __future__ import unicode_literals\n\nfrom unittest import skipUnless\n\nimport django\nfrom django.forms import Field\nfrom django.test import TestCase, override_settings\nfrom django.utils.encoding import force_text\nfrom django.utils.translation import ugettext as _\n\nfrom rhouser.models import User\nfrom rhouser.forms import UserCreationForm, UserChangeForm\n\n\n@override_settings(\n AUTH_USER_MODEL='rhouser.User',\n USE_TZ=False,\n PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),\n)\nclass UserCreationFormTest(TestCase):\n\n fixtures = ['authtestdata.json']\n\n def test_user_already_exists(self):\n data = {\n 'email': '<EMAIL>',\n 'password1': '<PASSWORD>',\n 'password2': '<PASSWORD>',\n }\n form = UserCreationForm(data)\n self.assertFalse(form.is_valid())\n self.assertEqual(form[\"email\"].errors,\n [force_text(form.error_messages['duplicate_email'])])\n\n def test_invalid_data(self):\n data = {\n 'email': 'jsmith!',\n 'password1': '<PASSWORD>',\n 'password2': '<PASSWORD>',\n }\n form = UserCreationForm(data)\n self.assertFalse(form.is_valid())\n self.assertEqual(form[\"email\"].errors, [_('Enter a valid email address.')])\n\n def test_password_verification(self):\n # The verification password is incorrect.\n data = {\n 'email': '<EMAIL>',\n 'password1': '<PASSWORD>',\n 'password2': '<PASSWORD>',\n }\n form = UserCreationForm(data)\n self.assertFalse(form.is_valid())\n self.assertEqual(form[\"password2\"].errors,\n [force_text(form.error_messages['password_mismatch'])])\n\n def test_both_passwords(self):\n # One (or both) passwords weren't given\n data = {'email': '<EMAIL>'}\n form = UserCreationForm(data)\n required_error = [force_text(Field.default_error_messages['required'])]\n self.assertFalse(form.is_valid())\n self.assertEqual(form['password1'].errors, required_error)\n self.assertEqual(form['password2'].errors, required_error)\n\n data['password2'] = '<PASSWORD>'\n form = UserCreationForm(data)\n self.assertFalse(form.is_valid())\n self.assertEqual(form['password1'].errors, required_error)\n self.assertEqual(form['password2'].errors, [])\n\n def test_success(self):\n # The success case.\n data = {\n 'email': '<EMAIL>',\n 'full_name': '<NAME>',\n 'short_name': 'John',\n 'password1': '<PASSWORD>',\n 'password2': '<PASSWORD>',\n }\n form = UserCreationForm(data)\n self.assertTrue(form.is_valid())\n u = form.save()\n self.assertEqual(repr(u), '<User: <EMAIL>>')\n\n @skipUnless(django.VERSION >= (1, 9), \"Password strength checks not available on Django 1.8\")\n @override_settings(\n AUTH_PASSWORD_VALIDATORS = [\n {'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},\n ]\n )\n def test_simple_password(self):\n data = {\n 'email': '<EMAIL>',\n 'password1': 'password',\n 'password2': 'password',\n }\n form = UserCreationForm(data)\n self.assertFalse(form.is_valid())\n self.assertEqual(form[\"password2\"].errors, ['This password is too common.'])\n\n\n@override_settings(\n AUTH_USER_MODEL='rhouser.User',\n USE_TZ=False,\n PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',)\n)\nclass UserChangeFormTest(TestCase):\n\n fixtures = ['authtestdata.json']\n\n def test_email_validity(self):\n user = User.objects.get(email='<EMAIL>')\n data = {'email': 'not valid'}\n form = UserChangeForm(data, instance=user)\n self.assertFalse(form.is_valid())\n self.assertEqual(form[\"email\"].errors, [_('Enter a valid email address.')])\n\n def test_bug_14242(self):\n # A regression test, introduce by adding an optimization for the\n # UserChangeForm.\n\n class MyUserForm(UserChangeForm):\n def __init__(self, *args, **kwargs):\n super(MyUserForm, self).__init__(*args, **kwargs)\n self.fields['groups'].help_text = 'These groups give users different permissions'\n\n class Meta(UserChangeForm.Meta):\n fields = ('groups',)\n\n # Just check we can create it\n MyUserForm({})\n\n def test_unusable_password(self):\n user = User.objects.get(email='<EMAIL>')\n user.set_unusable_password()\n user.save()\n form = UserChangeForm(instance=user)\n self.assertIn(_(\"No password set.\"), form.as_table())\n\n def test_bug_17944_empty_password(self):\n user = User.objects.get(email='<EMAIL>')\n form = UserChangeForm(instance=user)\n self.assertIn(_(\"No password set.\"), form.as_table())\n\n def test_bug_17944_unmanageable_password(self):\n user = User.objects.get(email='<EMAIL>')\n form = UserChangeForm(instance=user)\n self.assertIn(_(\"Invalid password format or unknown hashing algorithm.\"),\n form.as_table())\n\n def test_bug_17944_unknown_password_algorithm(self):\n user = User.objects.get(email='<EMAIL>')\n form = UserChangeForm(instance=user)\n self.assertIn(_(\"Invalid password format or unknown hashing algorithm.\"),\n form.as_table())\n\n def test_bug_19133(self):\n \"The change form does not return the password value\"\n # Use the form to construct the POST data\n user = User.objects.get(email='<EMAIL>')\n form_for_data = UserChangeForm(instance=user)\n post_data = form_for_data.initial\n\n # The password field should be readonly, so anything\n # posted here should be ignored; the form will be\n # valid, and give back the 'initial' value for the\n # password field.\n post_data['password'] = '<PASSWORD>'\n form = UserChangeForm(instance=user, data=post_data)\n\n self.assertTrue(form.is_valid())\n self.assertEqual(form.cleaned_data['password'], '<PASSWORD>')\n\n def test_bug_19349_bound_password_field(self):\n user = User.objects.get(email='<EMAIL>')\n form = UserChangeForm(data={}, instance=user)\n # When rendering the bound password field,\n # ReadOnlyPasswordHashWidget needs the initial\n # value to render correctly\n self.assertEqual(form.initial['password'], form['password'].value())\n", "id": "1839333", "language": "Python", "matching_score": 5.023647308349609, "max_stars_count": 5, "path": "rhouser/tests/test_forms.py" }, { "content": "from django import forms\ntry:\n from django.contrib.auth import password_validation\nexcept ImportError:\n # Django 1.8 doesn't have password strength validation\n # Insert a dummy object into the namespace.\n class EmptyValidator:\n def validate_password(self, password, instance):\n pass\n\n def password_validators_help_text_html(self):\n return None\n\n password_validation = EmptyValidator()\n\nfrom django.contrib.auth.forms import ReadOnlyPasswordHashField\nfrom django.utils.translation import ugettext as _\n\nfrom rhouser.models import User\n\n\nclass AbstractUserCreationForm(forms.ModelForm):\n \"\"\"\n A form that creates a user, with no privileges, from the given username and\n password.\n \"\"\"\n error_messages = {\n 'duplicate_username': _(\"A user with that username already exists.\"),\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n\n password1 = forms.CharField(label=_(\"Password\"),\n widget=forms.PasswordInput,\n help_text=password_validation.password_validators_help_text_html())\n password2 = forms.CharField(label=_(\"Password confirmation\"),\n widget=forms.PasswordInput,\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',\n )\n self.instance.username = self.cleaned_data.get('username')\n password_validation.validate_password(self.cleaned_data.get('password2'), self.instance)\n return password2\n\n def save(self, commit=True):\n user = super(AbstractUserCreationForm, self).save(commit=False)\n user.set_password(self.cleaned_data[\"password1\"])\n if commit:\n user.save()\n return user\n\n\nclass UserCreationForm(AbstractUserCreationForm):\n \"\"\"\n A concrete implementation of AbstractUserCreationForm that uses an\n e-mail address as a user's identifier.\n \"\"\"\n error_messages = {\n 'duplicate_email': _(\"A user with that email already exists.\"),\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n\n class Meta:\n model = User\n fields = ('email', 'full_name', 'short_name')\n\n def clean_email(self):\n # Since User.email is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n email = self.cleaned_data['email']\n try:\n User._default_manager.get(email=email)\n except User.DoesNotExist:\n return email\n raise forms.ValidationError(\n self.error_messages['duplicate_email'],\n code='duplicate_email',\n )\n\n\nclass AbstractUserChangeForm(forms.ModelForm):\n password = ReadOnlyPasswordHashField(label=_(\"Password\"),\n help_text=_(\"Raw passwords are not stored, so there is no way to see \"\n \"this user's password, but you can change the password \"\n \"using <a href=\\\"password/\\\">this form</a>.\"))\n\n def __init__(self, *args, **kwargs):\n super(AbstractUserChangeForm, self).__init__(*args, **kwargs)\n f = self.fields.get('user_permissions', None)\n if f is not None:\n f.queryset = f.queryset.select_related('content_type')\n\n def clean_password(self):\n # Regardless of what the user provides, return the initial value.\n # This is done here, rather than on the field, because the\n # field does not have access to the initial value\n return self.initial[\"password\"]\n\n\nclass UserChangeForm(AbstractUserChangeForm):\n class Meta:\n model = User\n fields = '__all__'\n", "id": "9310194", "language": "Python", "matching_score": 0.7565587162971497, "max_stars_count": 5, "path": "rhouser/forms.py" }, { "content": "from django.apps import AppConfig\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass RHOUserConfig(AppConfig):\n name = 'rhouser'\n verbose_name = _(\"Authentication and authorization\")\n", "id": "11402771", "language": "Python", "matching_score": 2.9165048599243164, "max_stars_count": 5, "path": "rhouser/apps.py" }, { "content": "default_app_config = 'rhouser.apps.RHOUserConfig'\n", "id": "8239064", "language": "Python", "matching_score": 1.975074052810669, "max_stars_count": 5, "path": "rhouser/__init__.py" } ]
1.600653
iainjames88
[ { "content": "import argparse\nimport collections\nimport os\nimport time\n\nfrom colorama import init, Fore\n\nCELL_KEYS = {\n -3: Fore.BLUE, # visited\n -2: Fore.CYAN, # queued\n -1: Fore.BLACK, # wall\n 0: Fore.WHITE, # empty\n 1: Fore.LIGHTGREEN_EX, # current node\n 2: Fore.LIGHTRED_EX, # target node\n 3: Fore.YELLOW, # path node\n}\n\nDIRECTIONS = {(-1, 0), (0, 1), (1, 0), (0, -1)}\n\n\nclass Node:\n def __init__(self, row, cell, parent):\n self.row = row\n self.cell = cell\n self.parent = parent\n\n\ndef read_maze(file):\n start_node = None\n maze = []\n\n for i, line in enumerate(file):\n row = []\n for j, char in enumerate(line.split()):\n if char == \"1\":\n start_node = Node(i, j, None)\n row.append(int(char))\n maze.append(row)\n\n return maze, start_node\n\n\ndef print_maze(maze):\n os.system(\"clear\")\n\n for row in maze:\n for cell in row:\n print(f\"{CELL_KEYS[cell]} █\", end=\"\")\n print()\n print()\n\n\ndef get_neighbours(maze, current_node):\n neighbours = []\n\n for direction in DIRECTIONS:\n row = current_node.row + direction[0]\n cell = current_node.cell + direction[1]\n\n if (\n row < 0\n or row >= len(maze)\n or cell < 0\n or cell >= len(maze[current_node.row])\n ):\n continue\n\n if maze[row][cell] < 0:\n continue\n\n neighbours.append(Node(row, cell, current_node))\n\n return neighbours\n\n\ndef backtrack(maze, current_node):\n path = [current_node]\n\n while current_node.parent:\n maze[current_node.row][current_node.cell] = 3\n path.append(current_node.parent)\n current_node = current_node.parent\n\n maze[current_node.row][current_node.cell] = 3\n\n return path\n\n\ndef bfs(maze, current_node):\n \"\"\"\n Iteratively solve the maze by adding the current nodes neighbours to a FIFO queue then enumerating the queue and\n repeating the process.\n\n Nodes keep track of their parent node i.e., the node that discovered them. Once the target\n node is reached backtrack by visiting each nodes' parent until a node with no parent (the starting node) is found\n\n :return: a generator function that yields the maze after each update\n \"\"\"\n q = collections.deque()\n\n q.append(current_node)\n\n while len(q) > 0:\n current_node = q.popleft()\n maze[current_node.row][current_node.cell] = 1\n yield maze\n\n for neighbour in get_neighbours(maze, current_node):\n if maze[neighbour.row][neighbour.cell] == 2:\n backtrack(maze, neighbour)\n yield maze\n return\n else:\n q.append(neighbour)\n maze[neighbour.row][neighbour.cell] = -2\n\n yield maze\n maze[current_node.row][current_node.cell] = -3\n time.sleep(args.speed)\n\n\ndef dfs(maze, current_node):\n \"\"\"\n Iteratively solve the maze by adding the current nodes neighbours to a LIFO queue then enumerating the queue and\n repeating the process.\n\n Nodes keep track of their parent node i.e., the node that discovered them. Once the target\n node is reached backtrack by visiting each nodes' parent until a node with no parent (the starting node) is found\n\n :return: a generator function that yields the maze after each update\n \"\"\"\n\n q = collections.deque()\n\n q.append(current_node)\n\n while len(q) > 0:\n current_node = q.pop()\n maze[current_node.row][current_node.cell] = 1\n yield maze\n\n for neighbour in get_neighbours(maze, current_node):\n if maze[neighbour.row][neighbour.cell] == 2:\n backtrack(maze, neighbour)\n yield maze\n return\n else:\n q.append(neighbour)\n maze[neighbour.row][neighbour.cell] = -2\n\n yield maze\n maze[current_node.row][current_node.cell] = -3\n\n\ndef main(maze_file, algorithm, speed):\n maze, start_node = read_maze(maze_file)\n\n if algorithm == \"bfs\":\n for iteration in bfs(maze, start_node):\n print_maze(iteration)\n time.sleep(speed)\n elif algorithm == \"dfs\":\n for iteration in dfs(maze, start_node):\n print_maze(iteration)\n time.sleep(speed)\n\n\nif __name__ == \"__main__\":\n init() # for colorama\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"maze_file\", type=argparse.FileType(\"r\"))\n parser.add_argument(\"algorithm\", type=str, choices=[\"bfs\", \"dfs\"])\n parser.add_argument(\n \"--speed\", type=float, help=\"delay in seconds; defaults to 0.3\", default=0.3\n )\n\n args = parser.parse_args()\n\n main(args.maze_file, args.algorithm, args.speed)\n", "id": "8590163", "language": "Python", "matching_score": 1.3467339277267456, "max_stars_count": 0, "path": "main.py" }, { "content": "import json\nimport os\nimport random\n\n\ndef get_adjectives():\n dirname = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(dirname, \"adjectives.json\")\n with open(path, \"r\") as f:\n return json.load(f)\n\ndef get_nouns():\n dirname = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(dirname, \"nouns.json\")\n with open(path, \"r\") as f:\n return json.load(f)\n\n\nadjective = random.choice(get_adjectives())\nnoun = random.choice(get_nouns())\nnumber = random.randint(1, 10000)\n\nprint(f\"{adjective}-{noun}-{number}\")\n", "id": "4333350", "language": "Python", "matching_score": 0.017911052331328392, "max_stars_count": 0, "path": "sobriquet/__main__.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\"\"\"\nExecute AWS Lambda functions.\n\"\"\"\n\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\nfrom airflow.contrib.hooks.aws_lambda_hook import AwsLambdaHook\n\n\nclass AwsLambdaExecutionError(Exception):\n \"\"\"\n Raised when there is an error executing the function.\n \"\"\"\n\n\nclass AwsLambdaPayloadError(Exception):\n \"\"\"\n Raised when there is an error with the Payload object in the response.\n \"\"\"\n\n\nclass AwsLambdaInvokeFunctionOperator(BaseOperator):\n \"\"\"\n Invoke AWS Lambda functions with a JSON payload.\n\n The check_success_function signature should be a single param which will receive a dict.\n The dict will be the \"Response Structure\" described in\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.invoke.\n It may be necessary to read the Payload see the actual response from the Lambda function e.g.,\n\n ```\n def succeeded(response):\n payload = json.loads(response['Payload'].read())\n # do something with payload\n ```\n\n :param function_name: The name of the Lambda function.\n :type function_name: str\n :param region_name: AWS region e.g., eu-west-1, ap-southeast-1, etc.\n :type region_name: str\n :param payload: The JSON to submit as input to a Lambda function.\n :type payload: str\n :param check_success_function: A function to check the Lambda response and determine success or failure.\n :type check_success_function: function\n :param log_type: Set to Tail to include the execution log in the response. Otherwise, set to \"None\".\n :type log_type: str\n :param qualifier: A version or alias name for the Lambda.\n :type qualifier: str\n :param aws_conn_id: connection id of AWS credentials / region name. If None,\n credential boto3 strategy will be used\n (http://boto3.readthedocs.io/en/latest/guide/configuration.html).\n :type aws_conn_id: str\n \"\"\"\n\n @apply_defaults\n def __init__(\n self,\n function_name,\n region_name,\n payload,\n check_success_function,\n log_type=\"None\",\n qualifier=\"$LATEST\",\n aws_conn_id=None,\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.function_name = function_name\n self.region_name = region_name\n self.payload = payload\n self.log_type = log_type\n self.qualifier = qualifier\n self.check_success_function = check_success_function\n self.aws_conn_id = aws_conn_id\n\n def get_hook(self):\n \"\"\"\n Initialises an AWS Lambda hook\n\n :return: airflow.contrib.hooks.AwsLambdaHook\n \"\"\"\n return AwsLambdaHook(\n self.function_name,\n self.region_name,\n self.log_type,\n self.qualifier,\n aws_conn_id=self.aws_conn_id,\n )\n\n def execute(self, context):\n self.log.info(\"AWS Lambda: invoking %s\", self.function_name)\n\n response = self.get_hook().invoke_lambda(self.payload)\n\n try:\n self._validate_lambda_api_response(response)\n self._validate_lambda_response_payload(response)\n except (AwsLambdaExecutionError, AwsLambdaPayloadError) as e:\n self.log.error(response)\n raise e\n\n self.log.info(\"AWS Lambda: %s succeeded!\", self.function_name)\n\n def _validate_lambda_api_response(self, response):\n \"\"\"\n Check whether the AWS Lambda function executed without errors.\n\n :param response: HTTP Response from AWS Lambda.\n :type response: dict\n :return: None\n \"\"\"\n\n if \"FunctionError\" in response or response[\"StatusCode\"] >= 300:\n raise AwsLambdaExecutionError(\"AWS Lambda: error occurred during execution\")\n\n def _validate_lambda_response_payload(self, response):\n \"\"\"\n Call a user provided function to validate the Payload object for errors.\n\n :param response: HTTP Response from AWS Lambda.\n :type response: dict\n :return: None\n \"\"\"\n if not self.check_success_function(response):\n raise AwsLambdaPayloadError(\n \"AWS Lambda: error validating response payload!\"\n )\n", "id": "1263391", "language": "Python", "matching_score": 5.544848442077637, "max_stars_count": 0, "path": "airflow/contrib/operators/aws_lambda_operator.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\nimport json\nimport unittest\nfrom unittest import mock\n\nfrom airflow.contrib.operators.aws_lambda_operator import (\n AwsLambdaInvokeFunctionOperator,\n AwsLambdaExecutionError,\n AwsLambdaPayloadError,\n)\n\n\nclass TestAwsLambdaInvokeFunctionOperator(unittest.TestCase):\n @mock.patch(\n \"airflow.contrib.operators.aws_lambda_operator.AwsLambdaHook.invoke_lambda\",\n return_value={\"StatusCode\": 200},\n )\n def test__execute__given_check_success_function_fails__raises_an_exception(\n self, mock_aws_lambda_hook\n ):\n operator = AwsLambdaInvokeFunctionOperator(\n task_id=\"foo\",\n function_name=\"foo\",\n region_name=\"eu-west-1\",\n payload=json.dumps({\"foo\": \"bar\"}),\n check_success_function=lambda r: False,\n )\n\n with self.assertRaises(AwsLambdaPayloadError):\n operator.execute(None)\n\n @mock.patch(\n \"airflow.contrib.operators.aws_lambda_operator.AwsLambdaHook.invoke_lambda\"\n )\n def test__execute__given_an_invalid_lambda_api_response__raises_an_exception(\n self, mock_aws_lambda_hook\n ):\n responses = [{\"StatusCode\": 500}, {\"FunctionError\": \"foo\"}]\n\n for response in responses:\n mock_aws_lambda_hook.return_value = response\n\n operator = AwsLambdaInvokeFunctionOperator(\n task_id=\"foo\",\n function_name=\"foo\",\n region_name=\"eu-west-1\",\n payload=json.dumps({\"foo\": \"bar\"}),\n check_success_function=lambda r: True,\n )\n\n with self.assertRaises(AwsLambdaExecutionError):\n operator.execute(None)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "id": "3835303", "language": "Python", "matching_score": 4.462345123291016, "max_stars_count": 0, "path": "tests/contrib/operators/test_aws_lambda_operator.py" } ]
2.90454
jeffi
[ { "content": "# Copyright 2019 U.C. Berkeley RISE Lab\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom anna.anna_pb2 import (\n # Anna's lattice types as an enum\n LWW, SET, ORDERED_SET, SINGLE_CAUSAL, MULTI_CAUSAL,\n # Serialized representations of Anna's lattices\n LWWValue, SetValue, SingleKeyCausalValue, MultiKeyCausalValue\n)\n\n\nclass Lattice:\n def __init__(self):\n raise NotImplementedError\n\n def __str__(self):\n return str(self.reveal())\n\n def __eq__(self, other):\n if other is None:\n return False\n\n if type(other) != type(self):\n return False\n\n return self.reveal() == other.reveal()\n\n def reveal(self):\n '''\n The reveal method returns an unwrapped version of the data underlying\n data structure stored by the lattice.\n '''\n raise NotImplementedError\n\n def assign(self, value):\n '''\n Assigns a new value to the lattice -- this must be the same as the type\n expected when creating an instance of a particular lattice.\n '''\n raise NotImplementedError\n\n def merge(self, other):\n '''\n Merge two lattices into one. How the merge function works is contingent\n on what the underlying data structure us.\n '''\n raise NotImplementedError\n\n def serialize(self):\n '''\n Serializes the underlying data structure, including metadata relevant\n to the lattice, into a protobuf and returns a protobuf object along\n with an enum tag indicating the type of this lattice.\n '''\n raise NotImplementedError\n\n\nclass LWWPairLattice(Lattice):\n def __init__(self, timestamp, value):\n if type(timestamp) != int or type(value) != bytes:\n raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')\n\n self.ts = timestamp\n self.val = value\n\n def reveal(self):\n return self.val\n\n def assign(self, value):\n if type(value) == str:\n value = bytes(value, 'utf-8')\n\n if type(value) != tuple or type(value[0]) != int \\\n or type(value[1]) != bytes:\n raise ValueError('LWWPairLattice must be a timestamp-bytes pair.')\n\n self.ts = value[0]\n self.val = value[1]\n\n def merge(self, other):\n if other.ts > self.ts:\n return other\n else:\n return self\n\n def serialize(self):\n res = LWWValue()\n res.timestamp = self.ts\n res.value = self.val\n\n return res, LWW\n\n\nclass SetLattice(Lattice):\n def __init__(self, value=set()):\n if type(value) != set:\n raise ValueError('SetLattice can only be formed from a set.')\n\n self.val = value\n\n def reveal(self):\n return self.val\n\n def assign(self, value):\n if type(value) != set:\n raise ValueError('SetLattice can only be formed from a set.')\n\n self.val = value\n\n def merge(self, other):\n if type(other) != SetLattice:\n raise ValueError('Cannot merge SetLattice with invalid type ' +\n str(type(other)) + '.')\n\n new_set = set()\n\n for v in other.val:\n new_set.insert(v)\n\n for v in self.val:\n new_set.insert(v)\n\n return SetLattice(new_set)\n\n def serialize(self):\n res = SetValue()\n\n for v in self.val:\n if type(v) != bytes:\n raise ValueError('Unsupported type %s in SetLattice!' %\n (str(type(v))))\n\n res.values.append(v)\n\n return res, SET\n\n\n# A wrapper class that implements some convenience OrderedSet operations on\n# top of a list. # We use this because it is way cheaper to deserialize into,\n# at the cost of having expensive reordering operations (e.g. random insert),\n# which we expect to be rare for our use cases (we will almost always be\n# inserting at the end).\nclass ListBasedOrderedSet:\n # Preconditions: iterable's elements are unique and sorted ascending.\n # Behaviour is undefined if it is not.\n def __init__(self, iterable=[]):\n self.lst = []\n for val in iterable:\n self.insert(val)\n\n # Inserts a value, maintaining sorted order.\n def insert(self, value):\n # Microoptimization for the common case.\n if len(self.lst) == 0:\n self.lst.append(value)\n elif value > self.lst[-1]:\n self.lst.append(value)\n else:\n idx, present = self._index_of(value)\n if not present:\n self.lst.insert(idx, value)\n\n # Finds the index of an element, or where to insert it if you want to\n # maintain sorted order.\n # Returns (int index, bool present).\n # E.g. _index_of(lst, 'my-value') -> (42, true)\n # => lst[42] = 'my-value'\n # _index_of(lst, 'my-value') -> (42, false)\n # => lst[41] < 'my-value' < lst[42]\n def _index_of(self, value):\n low = 0\n high = len(self.lst)\n while low < high:\n middle = low + int((high - low) / 2)\n pivot = self.lst[middle]\n if value == pivot:\n return (middle, True)\n elif value < pivot:\n high = middle\n elif pivot < value:\n low = middle + 1\n return (low, False)\n\n\nclass OrderedSetLattice(Lattice):\n def __init__(self, value=ListBasedOrderedSet()):\n if type(value) != ListBasedOrderedSet:\n raise ValueError('OrderedSetLattice can only be formed from a '\n + 'ListBasedOrderedSet.')\n self.val = value\n\n def reveal(self):\n return self.val.lst\n\n def assign(self, value):\n if type(value) != ListBasedOrderedSet:\n raise ValueError('OrderedSetLattice can only be formed from a' +\n ' ListBasedOrderedSet.')\n self.val = value\n\n def merge(self, other):\n if type(other) != OrderedSetLattice:\n raise ValueError('Cannot merge OrderedSetLattice with type ' +\n str(type(other)) + '.')\n\n # Merge the two sorted lists by lockstep merge.\n # Note that reconstruction is faster than in-place merge.\n new_lst = []\n\n other = other.reveal().lst\n us = self.val.lst\n i, j = 0, 0 # Earliest unmerged indices.\n while i < len(us) or j < len(other):\n if i == len(us):\n new_lst.extend(other[j:])\n break\n elif j == len(other):\n new_lst.extend(us[i:])\n break\n else:\n a = us[i]\n b = other[j]\n if a == b:\n new_lst.append(a)\n i += 1\n j += 1\n elif a < b:\n new_lst.append(a)\n i += 1\n elif b < a:\n new_lst.append(b)\n j += 1\n\n return OrderedSetLattice(ListBasedOrderedSet(new_lst))\n\n def serialize(self):\n res = SetValue()\n res.values.extend(self.val.lst)\n\n return res, ORDERED_SET\n\n\nclass MaxIntLattice(Lattice):\n def __init__(self, value):\n if type(value) != int:\n raise ValueError('MaxIntLattice only accepts integers.')\n\n self.value = value\n\n def reveal(self):\n return self.value\n\n def assign(self, value):\n if type(value) != int:\n raise ValueError('MaxIntLattice only accepts integers.')\n\n self.value = value\n\n def merge(self, other):\n if type(other) != MaxIntLattice:\n raise ValueError('Cannot merge MaxIntLattice with type ' +\n str(type(other)) + '.')\n\n if other.value > self.value:\n self.value = other.value\n\n\nclass MapLattice(Lattice):\n def __init__(self, mp):\n if type(mp) != dict:\n raise ValueError('MapLattice only accepts dict data structures.')\n\n self.mp = mp\n\n def reveal(self):\n return self.mp\n\n def assign(self, mp):\n if type(mp) != dict:\n raise ValueError('MapLattice only accepts dict data structures.')\n\n self.mp = mp\n\n def merge(self, other):\n if type(other) != MapLattice:\n raise ValueError('Cannot merge MapLattice with type ' +\n str(type(other)) + '.')\n\n for key in other.mp.keys:\n if key in self.mp:\n if (not isinstance(self.mp[key], Lattice) or not\n isinstance(other.mp[key], Lattice)):\n raise ValueError('Cannot merge a MapLattice with values' +\n ' that are not lattice types.')\n self.mp[key].merge(other.mp[key])\n else:\n self.mp[key] = other.mp[key]\n\n def copy(self):\n return MapLattice(self.mp.copy())\n\n\nclass VectorClock(MapLattice):\n def __init__(self, mp, deserialize=False):\n if type(mp) != dict:\n raise ValueError(f'VectorClock must be a dict, not {type(mp)}.')\n\n if deserialize:\n self.mp = VectorClock._deserialize(mp)\n else:\n VectorClock._validate_vc(mp)\n self.mp = mp\n\n def _deserialize(mp):\n result = {}\n\n for key in mp:\n if type(mp[key]) != int:\n raise ValueError('Cannot deserialize VectorClock from'\n + ' non-integer values.')\n\n result[key] = MaxIntLattice(mp[key])\n\n return result\n\n def _validate_vc(mp):\n for val in mp.values():\n if type(val) != MaxIntLattice:\n raise ValueError(('VectorClock values must be MaxIntLattices,'\n + ' not %s.') % str(type(val)))\n\n def assign(self, mp):\n if type(mp) != dict:\n raise ValueError('VectorClock must be a dict.')\n\n VectorClock._validate_vc(mp)\n self.mp = mp\n\n def update(self, key, count):\n if key in self.mp:\n lattice = MaxIntLattice(count)\n self.mp[key].merge(lattice)\n\n def serialize(self, pobj):\n for key in self.mp:\n pobj[key] = self.mp[key].reveal()\n\n\nclass SingleKeyCausalLattice(Lattice):\n def __init__(self, vector_clock, value):\n if type(vector_clock) != VectorClock:\n raise ValueError('Vector clock of SingleKeyCausalLattice must be a'\n + ' VectorClock.')\n if type(value) != SetLattice:\n raise ValueError('Value of SingleKeyCausalLattice must be a'\n + ' SetLattice.')\n\n self.vector_clock = vector_clock\n self.value = value\n\n def reveal(self):\n return list(self.value.reveal())\n\n def assign(self, value):\n if type(value) != SetLattice:\n raise ValueError('Value of SingleKeyCausalLattice must be a'\n + ' SetLattice.')\n self.value = value\n\n def merge(self, other):\n if type(other) != SingleKeyCausalLattice:\n raise ValueError('Cannot merge SingleKeyCausalLattice with type ' +\n str(type(other)) + '.')\n\n previous = self.vector_clock.copy()\n self.vector_clock.merge(other.vector_clock)\n\n if self.vector_clock == other.vector_clock:\n # The other version dominates this version.\n self.value = other.value\n elif self.vector_clock != previous:\n # The versions are concurrent.\n self.value.merge(other.value)\n else:\n # This version dominates, so we do nothing.\n pass\n\n def serialize(self):\n skcv = SingleKeyCausalValue()\n\n # Serialize the vector clock for this particular lattice by adding each\n # key-counter pair.\n self.vector_clock.serialize(skcv.vector_clock)\n\n # Add the value(s) stored by this lattice.\n for v in self.value:\n skcv.values.add(v)\n\n return skcv, SINGLE_CAUSAL\n\n\nclass MultiKeyCausalLattice(Lattice):\n def __init__(self, vector_clock, dependencies, value):\n if type(vector_clock) != VectorClock:\n raise ValueError('Vector clock of MultiKeyCausalLattice must be a'\n + ' VectorClock.')\n if type(dependencies) != MapLattice:\n raise ValueError('Dependency set of MultiKeyCausalLattice must be'\n + ' a MapLattice.')\n if type(value) != SetLattice:\n raise ValueError('Value of MultiKeyCausalLattice must be a'\n + ' SetLattice.')\n\n self.vector_clock = vector_clock\n self.dependencies = dependencies\n self.value = value\n\n def reveal(self):\n return list(self.value.reveal())\n\n def assign(self, value):\n if type(value) != SetLattice:\n raise ValueError('Value of MultiKeyCausalLattice must be a'\n + ' SetLattice.')\n self.value = value\n\n def merge(self, other):\n if type(other) != MultiKeyCausalLattice:\n raise ValueError('Cannot merge MultiKeyCausalLattice with type ' +\n str(type(other)) + '.')\n\n previous = self.vector_clock.copy()\n self.vector_clock.merge(other.vector_clock)\n\n if self.vector_clock == other.vector_clock:\n # other version dominates this version\n self.dependencies = other.dependencies\n self.value = other.value\n elif self.vector_clock != previous:\n # versions are concurrent\n self.dependencies.merge(other.dependencies)\n self.value.merge(other.value)\n else:\n # this version dominates, so we do nothing\n pass\n\n def serialize(self):\n mkcv = MultiKeyCausalValue()\n\n # Serialize the vector clock for this particular lattice by adding each\n # key-counter pair.\n self.vector_clock.serialize(mkcv.vector_clock)\n\n # Serialize the vector clocks for each of the keys this lattice depends\n # on.\n for key in self.dependencies:\n kv = mkcv.add_dependences()\n kv.key = key\n self.dependencies[key].serialize(kv.vector_clock)\n\n # Add the value(s) stored by this lattice.\n for v in self.value:\n mkcv.values.add(v)\n\n return mkcv, MULTI_CAUSAL\n", "id": "11650624", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "client/python/anna/lattices.py" } ]
0
unrealdever
[ { "content": "import carla\nimport random\nimport time\nfrom tqdm import tqdm\n\nclient = carla.Client('localhost', 2000)\nclient.set_timeout(2.0)\n\nfor mid, map_name in enumerate(client.get_available_maps()):\n world = client.load_world(map_name)\n blueprint_library = world.get_blueprint_library()\n print('load map', map_name)\n\n bp_rgb = blueprint_library.find('sensor.camera.rgb')\n bp_rgb.set_attribute('sensor_tick', '0.1')\n bp_rgb.set_attribute('image_size_x', '1024')\n bp_rgb.set_attribute('image_size_y', '1024')\n bp_seg = blueprint_library.find('sensor.camera.semantic_segmentation')\n bp_seg.set_attribute('sensor_tick', '0.1')\n bp_seg.set_attribute('image_size_x', '1024')\n bp_seg.set_attribute('image_size_y', '1024')\n cc_rgb = carla.ColorConverter.Raw\n cc_seg = carla.ColorConverter.CityScapesPalette\n actors = []\n\n for i, transform in tqdm(enumerate(world.get_map().get_spawn_points())):\n transform.location.z += 3.0\n transform.rotation.pitch = -45.0\n\n camera_rgb = world.spawn_actor(bp_rgb, transform)\n actors.append(camera_rgb)\n camera_rgb.listen(lambda image: image.save_to_disk('_out/%02d_%05d_rgb_%06d.png' % (mid, i, image.frame), cc_rgb))\n time.sleep(0.15)\n for actor in actors:\n actor.destroy()\n # 通过存取list的方式让destory方法可调用,直接调用可能报错\n actors = []\n\n camera_seg = world.spawn_actor(bp_seg, transform)\n actors.append(camera_seg)\n camera_seg.listen(lambda image: image.save_to_disk('_out/%02d_%05d_seg_%06d.png' % (mid, i, image.frame), cc_seg))\n time.sleep(0.15)\n for actor in actors:\n actor.destroy()\n actors = []\n \n time.sleep(1)\n print('all %d point done.' % len(world.get_map().get_spawn_points()))", "id": "51980", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "snap_seg.py" } ]
0
vonmaehlen
[ { "content": "from setuptools import setup, find_packages\n\nsetup(\n name='facebook-ads-performance-pipeline',\n version='1.0.1',\n description=\"A data integration pipeline that imports downloaded Facebook Ads performance data into a data warehouse\",\n\n install_requires=[\n 'facebook-ads-performance-downloader>=2.0.0',\n 'mara-etl-tools>=1.1.0',\n 'mara-pipelines>=3.1.0',\n ],\n\n dependency_links=[\n 'git+https://github.com/mara/[email protected]#egg=facebook-ads-performance-downloader-2.0.0',\n 'git+https://github.com/mara/[email protected]#egg=mara-etl-tools-1.1.0',\n 'git+https://github.com/mara/[email protected]#egg=mara-pipelines-3.1.1'\n ],\n\n packages=find_packages(),\n\n author='<NAME>',\n license='MIT'\n)\n", "id": "4655598", "language": "Python", "matching_score": 3.1301777362823486, "max_stars_count": 0, "path": "setup.py" }, { "content": "from setuptools import setup, find_packages\n\nsetup(\n name='facebook-ads-performance-downloader',\n version='3.2.0',\n\n description=(\"Downloads data from the Facebook Ads API to local files\"),\n\n install_requires=[\n 'facebook_business==10.0.0',\n 'click>=6.0',\n 'wheel>=0.29'\n ],\n\n packages=find_packages(),\n\n author='<NAME>',\n license='MIT',\n\n entry_points={\n 'console_scripts': [\n 'download-facebook-performance-data=facebook_downloader.cli:download_data'\n ]\n }\n)\n", "id": "7661284", "language": "Python", "matching_score": 1.1699632406234741, "max_stars_count": 38, "path": "setup.py" }, { "content": "import pathlib\nimport mara_db.postgresql\nfrom facebook_ads_performance_pipeline import config\n\nfrom mara_pipelines.commands.files import ReadSQLite\nfrom mara_pipelines.commands.sql import ExecuteSQL\nfrom mara_pipelines.parallel_tasks.files import ReadMode, ParallelReadSqlite\nfrom mara_pipelines.parallel_tasks.sql import ParallelExecuteSQL\nfrom mara_pipelines.pipelines import Pipeline, Task\nfrom mara_pipelines.config import default_db_alias\n\npipeline = Pipeline(\n id=\"facebook\",\n description=\"Processes the data downloaded from the FacebookAds API\",\n base_path=pathlib.Path(__file__).parent,\n labels={\"Schema\": \"fb_dim\"})\n\npipeline.add_initial(\n Task(\n id=\"initialize_schemas\",\n description=\"Recreates the tmp and dim_next schemas\",\n commands=[\n ExecuteSQL(sql_statement=\"DROP SCHEMA IF EXISTS fb_dim_next CASCADE; CREATE SCHEMA fb_dim_next;\"),\n ExecuteSQL(sql_file_name=\"create_data_schema.sql\", echo_queries=False,\n file_dependencies=[\"create_data_schema.sql\"]),\n ExecuteSQL(sql_file_name=\"recreate_schemas.sql\", echo_queries=False)\n ]))\n\npipeline.add(\n Task(\n id=\"read_campaign_structure\",\n description=\"Loads the adwords campaign structure\",\n commands=[\n ExecuteSQL(sql_file_name='create_campaign_structure_data_table.sql', echo_queries=False),\n ReadSQLite(sqlite_file_name='facebook-account-structure-{}.sqlite3'.format(config.input_file_version()),\n sql_file_name='read_campaign_structure.sql',\n target_table='fb_data.campaign_structure')]))\n\npipeline.add(\n ParallelReadSqlite(\n id=\"read_ad_performance\",\n description=\"Loads ad performance data from json files\",\n file_pattern=\"*/*/*/facebook/ad-performance-*-{}.sqlite3\".format(config.input_file_version()),\n read_mode=ReadMode.ONLY_CHANGED,\n sql_file_name='read_ad_performance.sql',\n target_table=\"fb_data.ad_performance_upsert\",\n date_regex=\"^(?P<year>\\d{4})\\/(?P<month>\\d{2})\\/(?P<day>\\d{2})/\",\n file_dependencies=['create_ad_performance_data_table.sql'],\n commands_before=[\n ExecuteSQL(sql_file_name=\"create_ad_performance_data_table.sql\", echo_queries=False,\n file_dependencies=['create_ad_performance_data_table.sql'])\n ],\n commands_after=[\n ExecuteSQL(sql_statement='SELECT fb_data.upsert_ad_performance()')\n ]))\n\npipeline.add(\n Task(\n id=\"preprocess_ad\",\n description=\"Creates the different ad dimensions\",\n commands=[\n ExecuteSQL(sql_file_name=\"preprocess_ad.sql\")\n ]),\n [\"read_campaign_structure\", \"read_ad_performance\"])\n\npipeline.add(\n Task(\n id=\"transform_ad\",\n description=\"Creates the ad dimension table\",\n commands=[\n ExecuteSQL(sql_file_name=\"transform_ad.sql\")\n ]),\n [\"preprocess_ad\"])\n\n\ndef index_ad_parameters():\n with mara_db.postgresql.postgres_cursor_context(default_db_alias()) as cursor:\n cursor.execute('''select util.get_columns('fb_dim_next', 'ad', '%_name');''')\n return cursor.fetchall()\n\n\npipeline.add(\n ParallelExecuteSQL(\n id=\"index_ad\",\n description=\"Adds indexes to all columns of the ad dimension\",\n sql_statement='''SELECT util.add_index('fb_dim_next', 'ad', column_names:=ARRAY[''@@param_1@@'']);''',\n parameter_function=index_ad_parameters,\n parameter_placeholders=[\"'@@param_1@@'\"]),\n [\"transform_ad\"])\n\npipeline.add(\n Task(\n id=\"transform_ad_performance\",\n description=\"Creates the fact table of the facebook cube\",\n commands=[\n ExecuteSQL(sql_file_name=\"transform_ad_performance.sql\")\n ]),\n [\"read_ad_performance\"])\n\n\ndef index_ad_performance_parameters():\n with mara_db.postgresql.postgres_cursor_context(default_db_alias()) as cursor:\n cursor.execute('''SELECT util.get_columns('fb_dim_next', 'ad_performance', '%_fk');''')\n return cursor.fetchall()\n\n\npipeline.add(\n ParallelExecuteSQL(\n id=\"index_ad_performance\",\n description=\"Adds indexes to all fk columns of the ad performance fact table\",\n sql_statement='''SELECT util.add_index('fb_dim_next', 'ad_performance',column_names := ARRAY [''@@param_1@@'']);''',\n parameter_function=index_ad_performance_parameters,\n parameter_placeholders=[\"'@@param_1@@'\"]),\n [\"transform_ad_performance\"])\n\npipeline.add(\n Task(\n id=\"transform_ad_attribute\",\n description=\"Creates the ad_attribute dimension table\",\n commands=[\n ExecuteSQL(sql_file_name=\"transform_ad_attribute.sql\")\n ]),\n [\"preprocess_ad\"])\n\npipeline.add_final(\n Task(\n id=\"replace_dim_schema\",\n description=\"Replaces the current dim schema with the contents of dim_next\",\n commands=[\n ExecuteSQL(sql_statement=\"SELECT fb_tmp.constrain_ad_performance();\"),\n ExecuteSQL(sql_statement=\"SELECT fb_tmp.constrain_ad_attribute_mapping();\"),\n ExecuteSQL(sql_statement=\"SELECT util.replace_schema('fb_dim','fb_dim_next');\")\n ]))\n", "id": "8129749", "language": "Python", "matching_score": 2.9407639503479004, "max_stars_count": 0, "path": "facebook_ads_performance_pipeline/__init__.py" }, { "content": "\"\"\"\nConfigures facebook ads performance pipeline\n\"\"\"\n\n\ndef input_file_version():\n \"\"\"A suffix that is added to input files, denoting a version of the data format\"\"\"\n return 'v2'\n", "id": "10716866", "language": "Python", "matching_score": 0.045129794627428055, "max_stars_count": 0, "path": "facebook_ads_performance_pipeline/config.py" }, { "content": "\"\"\"Command line interface for facebook downloader\"\"\"\n\nimport logging\nfrom functools import partial\n\nimport click\nfrom facebook_downloader import config\n\n\ndef config_option(config_function):\n \"\"\"Helper decorator that turns an option function into a cli option\"\"\"\n return (lambda function: click.option('--' + config_function.__name__,\n help=config_function.__doc__.strip() + '. Example: \"' +\n str(config_function()) + '\"')(function))\n\n\ndef apply_options(kwargs):\n \"\"\"Applies passed cli parameters to config.py\"\"\"\n for key, value in kwargs.items():\n if value: setattr(config, key, partial(lambda v: v, value))\n\n\[email protected]()\n@config_option(config.app_id)\n@config_option(config.app_secret)\n@config_option(config.access_token)\n@config_option(config.data_dir)\n@config_option(config.first_date)\n@config_option(config.download_today)\n@config_option(config.redownload_window)\n@config_option(config.target_accounts)\n@config_option(config.number_of_ad_performance_threads)\ndef download_data(**kwargs):\n \"\"\"\n Downloads data.\n When options are not specified, then the defaults from config.py are used.\n \"\"\"\n from facebook_downloader import downloader\n\n apply_options(kwargs)\n logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n downloader.download_data()\n", "id": "10788767", "language": "Python", "matching_score": 0.2830202579498291, "max_stars_count": 0, "path": "facebook_downloader/cli.py" }, { "content": "import os\nimport sys\nfrom locust import FastHttpUser, task, between, constant\nfrom bs4 import BeautifulSoup\nimport threading\n\nsys.path.append(os.path.dirname(__file__) + '/src')\nfrom storefront import Storefront\nfrom context import Context\nfrom erp import ERP\nfrom api import Api\n\ncontext = Context()\n\ndef run_erp():\n erp = ERP(context)\n erp.run()\n\ntimer = threading.Timer(5.0, run_erp)\ntimer.start()\n\nclass Customer(FastHttpUser):\n wait_time = between(2, 10)\n\n @task(4)\n def short_time_listing_visitor(self):\n page = Storefront(self.client, context)\n page = page.go_to_listing()\n page = page.view_products(2)\n\n page = page.go_to_next_page()\n page = page.view_products(3)\n\n @task(4)\n def short_time_search_visitor(self):\n page = Storefront(self.client, context)\n page = page.do_search()\n page = page.view_products(2)\n\n page = page.go_to_next_page()\n page = page.view_products(2)\n page = page.go_to_next_page()\n\n page = page.add_manufacturer_filter()\n page = page.select_sorting()\n page = page.view_products(3)\n\n @task(3)\n def long_time_visitor(self):\n page = Storefront(self.client, context)\n\n # search products over listings\n page = page.go_to_listing()\n\n # take a look to the first two products\n page = page.view_products(2)\n page = page.go_to_next_page()\n\n # open two different product pages\n page = page.view_products(2)\n\n # sort listing and use properties to filter\n page = page.select_sorting()\n page = page.add_property_filter()\n\n page = page.view_products(1)\n page = page.go_to_next_page()\n\n # switch to search to find products\n page = page.do_search()\n page = page.view_products(2)\n\n # use property filter to find products\n page = page.add_property_filter()\n\n # take a look to the top three hits\n page = page.view_products(3)\n page = page.go_to_next_page()\n\n @task(3)\n def short_time_buyer(self):\n page = Storefront(self.client, context)\n page = page.register() #instead of login, we register\n page = page.go_to_account_orders()\n\n page = page.go_to_listing()\n page = page.view_products(2)\n page = page.add_product_to_cart()\n page = page.add_product_to_cart()\n page = page.instant_order()\n page = page.logout()\n\n @task(2)\n def long_time_buyer(self):\n page = Storefront(self.client, context)\n page = page.register() #instead of login, we register\n page = page.go_to_account()\n page = page.go_to_account_orders()\n\n # search products over listings\n page = page.go_to_listing()\n\n # take a look to the first two products\n page = page.view_products(2)\n page = page.add_product_to_cart()\n page = page.go_to_next_page()\n\n # open two different product pages\n page = page.view_products(2)\n page = page.add_product_to_cart()\n\n # sort listing and use properties to filter\n page = page.select_sorting()\n page = page.add_property_filter()\n page = page.view_products(1)\n page = page.go_to_next_page()\n page = page.add_product_to_cart()\n page = page.instant_order()\n\n # switch to search to find products\n page = page.do_search()\n page = page.view_products(2)\n\n # use property filter to find products\n page = page.add_property_filter()\n\n # take a look to the top three hits\n page = page.view_products(3)\n page = page.add_product_to_cart()\n page = page.add_product_to_cart()\n page = page.go_to_next_page()\n\n page = page.view_products(2)\n page = page.add_product_to_cart()\n page = page.add_product_to_cart()\n page = page.add_product_to_cart()\n\n page = page.instant_order()\n page = page.logout()\n", "id": "3831149", "language": "Python", "matching_score": 1.3351635932922363, "max_stars_count": 0, "path": "src/Core/DevOps/Locust/locustfile.py" }, { "content": "from api import Api\nimport time\n\nclass ERP:\n def __init__(self, context):\n self.context = context\n self.api = Api(context)\n\n def run(self):\n while True:\n self.api.update_stock()\n time.sleep(10)\n self.api.update_prices()\n time.sleep(10)\n # refresh token\n self.context.token = self.context.get_token()\n", "id": "8426176", "language": "Python", "matching_score": 0.4239497184753418, "max_stars_count": 0, "path": "src/Core/DevOps/Locust/src/erp.py" } ]
1.169963
lucyhjiang
[ { "content": "import numpy as np\nimport fcsparser\nimport csv\nimport matplotlib.pyplot as plt\nfrom scipy.stats import gaussian_kde\n\npath='/home/lucy/test.fcs'\n##print(path)\nmeta,data=fcsparser.parse(path,reformat_meta=True)\n##print(data)\n#plt.scatter(data['SSC-A'],data['SSC-H'], alpha=0.8,color='gray')\n#plt.show()\n\n#print(data['SSC-A'])\n#print(data['SSC-H'])\nSSC = np.vstack([data['SSC-A'],data['SSC-H']])\n##print(SSC)\nSSC_z = gaussian_kde(SSC)(SSC)\nidx = SSC_z.argsort()\nx,y,z = data['SSC-A'][idx],data['SSC-H'][idx],SSC_z[idx]\n\nN = round(len(x)/2.5)\nfig,ax=plt.subplots()\n#ax.scatter(data['SSC-A'],data['SSC-H'],c=SSC_z,s=100, edgecolor='')\n#ax.scatter(x,y, c=z,s=50, edgecolor='')\nax.scatter(x[N:],y[N:], c=z[N:],s=50, edgecolor='')\nax.scatter(x[:N],y[:N], c='r',s=50, edgecolor='')\nplt.show()\n\n", "id": "6114572", "language": "Python", "matching_score": 1.7039130926132202, "max_stars_count": 0, "path": "flow.py" }, { "content": "#Program to solve heat equations using Forwarrd time centered space scje,e\n\nimport numpy\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\npp = PdfPages('finite_difference.pdf')\n\n#Initialize parameters\ntau = float(input ('Enter time step size: '))\nN = int(input (\"Enter the number of grid points: \"))\n\n#Analyze if the solution is stable with the current gridsize\nukappa = 1. #diffusion coefficient for u\nvkappa = 10. #diffusion coefficient for v\n\nL = 10. #total size \nh = L/(N) #grid size\ncoeff = vkappa * tau/h**2\nprint(coeff)\n\nif coeff <= 0.5:\n\tprint(\"Solution is stable with currect gridsize\")\nelse:\n\tprint(\"Solution is not stable with the current gridsize\")\n\n\nnstep = int(input (\"Enter the number of steps: \")) \nplotstep = int(input(\"Enter the plotstep: \"))\ngamma = float(input(\"Enter the scaling parameter: \"))\n\n#gamma = 70. #scaling parameter\n\n\n\na = 92.\nb = 64.\nK = 0.1\nalpha = 1.5\nrho = 18.5\n\n\n#Define X/Y grid\nX = numpy.arange(0, 10, h)\n##print(\"Size of X: \", len(X))\n\n\n\n\n#Set initial condition\n##uu = numpy.zeros((N,N))\n##uu[round((N-1)/2),round((N-1)/2)] = 1/h #delta function\nuu = numpy.random.rand(N,N)\n##vv = numpy.zeros((N,N));\n##vv[round((N-1)/2),round((N-1)/2)] = 1/h #delta function\nvv = numpy.random.rand(N,N)\n\n##print(uu)\n##print(vv)\n##print(\"Size of uu: \", len(uu))\n\n\n\n#Loop through time\nfor istep in range(nstep):\n\t#initiate temp storage matrix\n\tuu_new = numpy.zeros((N,N));\n\tvv_new = numpy.zeros((N,N));\n\t#Loop through x\n\tfor ix in range(N):\n\t\tfor iy in range(N):\n\t\t\tif ix == 0 and iy ==0:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix+1,iy] + uu[ix,iy+1] - 2*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix+1,iy] + vv[ix,iy+1] - 2*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\telif ix == 0 and iy == N-1:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix+1,iy] + uu[ix,iy-1] - 2*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix+1,iy] + vv[ix,iy-1] - 2*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\telif ix == N-1 and iy == 0:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix-1,iy] + uu[ix,iy+1] - 2*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix-1,iy] + vv[ix,iy+1] - 2*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\telif ix == N-1 and iy == N-1:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix-1,iy] + uu[ix,iy-1] - 2*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix-1,iy] + vv[ix,iy-1] - 2*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\telif ix == 0:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix+1,iy] + uu[ix,iy+1] + uu[ix,iy-1] - 3*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix+1,iy] + vv[ix,iy+1] + vv[ix,iy-1] - 3*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\telif iy == 0:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix+1,iy] + uu[ix-1,iy] + uu[ix,iy+1] - 3*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix+1,iy] + vv[ix-1,iy] + vv[ix,iy+1] - 3*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\telif ix == N-1:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix-1,iy] + uu[ix,iy+1] + uu[ix,iy-1] - 3*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix-1,iy] + vv[ix,iy+1] + vv[ix,iy-1] - 3*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\telif iy == N-1:\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix+1,iy] + uu[ix-1,iy] + uu[ix,iy-1] - 3*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix+1,iy] + vv[ix-1,iy] + vv[ix,iy-1] - 3*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\t##uu_new[ix,iy] = 6\n\t\t\telse:\n\t\t\t\t##uu_new[ix,iy] = 8\n\t\t\t\tuu_new[ix,iy] = uu[ix,iy] + tau * ( ukappa/h**2 * ( uu[ix+1,iy] + uu[ix-1,iy] + uu[ix,iy+1] + uu[ix,iy-1] - 4*uu[ix,iy]) + gamma * (a - uu[ix,iy] - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\tvv_new[ix,iy] = vv[ix,iy] + tau * ( ukappa/h**2 * ( vv[ix+1,iy] + vv[ix-1,iy] + vv[ix,iy+1] + vv[ix,iy-1] - 4*vv[ix,iy]) + gamma * (alpha * (b-vv[ix,iy]) - (rho*uu[ix,iy]*vv[ix,iy])/(1 + uu[ix,iy] + K * uu[ix,iy]**2 )))\n\t\t\t\t\n\t##print(uu_new)\n\t##print(vv_new)\n\n\tuu = uu_new\n\tvv = vv_new\n\tprint(\"Now is at step: \", istep)\n\tif (istep+1)%plotstep == 0:\n\t\tfig = plt.figure()\n\t\tplt.clf()\n\t\ttime = (istep+1) * tau\n\t\tplt.contourf(X,X,uu)\n\t\tplt.title('Time = %i' %time)\n\t\tpp.savefig()\n\t\t\n\nprint(uu)\nprint(vv)\n\npp.close()\n\n\n\n\n\t\n\n\t \n", "id": "12266253", "language": "Python", "matching_score": 0.9033676981925964, "max_stars_count": 0, "path": "dftcs.py" }, { "content": "# Discrete Fourier transform demonstration (python v3)\n\n# Initialize the since wave time series to be transformed\n# Pesudo code\n# 1. Input: get user input of original vector\n# 1a: Input vector manually\n# 1b: Input vector by a function: size of vector, frequency and phase of function\n# 2. Fourier transform the vector into another vector into frequency space\n# 3. Plot out original vector, and transformed vector\n\nimport cmath\n\n# get user-input vector\nv = [complex(x) for x in input().split()]\n# count size of vector\nN = len(v)\n\n\n# initialize transformed vector\nvft = list(range(N))\n\n\n# calculate transformed vector using for loop\nfor y in range(N):\n\tsum = 0\n\tfor x in range(N):\n\t\tpsum =v[x]*cmath.exp(complex(0, -2*cmath.pi*x*y/N))\n\t\tpsum = complex(round(psum.real),round(psum.imag))\n\t\tsum = sum + psum\n\t\t# print (psum)\n\tvft[y] = 1/N*sum\n\t\n# Print calculated loop\nprint(vft)\n", "id": "11422337", "language": "Python", "matching_score": 1.9301891326904297, "max_stars_count": 0, "path": "ft.py" }, { "content": "# This code convert number from decimal form into binary (python v3)\n\n# Input number (integer or fraction part)\n# Output converted binary number\n\nimport math \nimport sys\n\n# Getting user input\n# n = eval(input (\" Please enter your number: \"))\nn = eval(sys.argv[1])\nprint (n)\n\n\n# Separate into integer and fraction part\na = math.trunc(n)\nb = n - a\n\n\n# initialize variable\nc = 10000\nv = []\n\n\n# Calculate binary conversion for integer part\nwhile c != 0:\n\tc = math.floor (a/2)\n\td = a - c * 2\n\tv.extend([str(d)])\n\ta = c\nv.reverse()\n\n# Calculate binary conversion for fraction part\n\nif b == 0:\n\tprint (''.join(v))\nelse:\n\tv.extend(\".\")\n\twhile c <= 20 and b != 0: \n\t\te = b * 2\n\t\tf = math.trunc(e)\n\t\tv.extend([str(f)])\n\t\tb = e - f\n\tprint (''.join(v))\n\n\n\n", "id": "3076189", "language": "Python", "matching_score": 1.8644636869430542, "max_stars_count": 0, "path": "decimal_to_binary.py" } ]
1.784188
davismartin
[ { "content": "import requests\n\nurl = \"http://mockbin.com/har\"\n\nquerystring = {\"foo\":[\"bar\",\"baz\"],\"baz\":\"abc\",\"key\":\"value\"}\n\npayload = \"foo=bar\"\nheaders = {\n \"cookie\": \"foo=bar; bar=baz\",\n \"accept\": \"application/json\",\n \"content-type\": \"application/x-www-form-urlencoded\"\n}\n\nresponse = requests.request(\"POST\", url, data=payload, headers=headers, params=querystring)\n\nprint(response.text)\n", "id": "5801362", "language": "Python", "matching_score": 0, "max_stars_count": 557, "path": "test/fixtures/output/python/requests/full.py" } ]
0
SofSei
[ { "content": "import argparse\nfrom posixpath import join\nimport subprocess\nimport threading\nfrom pathlib import Path\n\n\ndef assemble (forward,reverse,output):\n return subprocess.call (f'metaspades.py -1 {forward} -2 {reverse} -o {output}',shell = True)\ndef prodigal (folder, output):\n Path(output).mkdir(parents=True, exist_ok=True)\n return subprocess.call(f\"prodigal -a {output}/contigs.aa.fasta -d {output}/contigs.nuc.fasta -i {folder}/contigs.fasta -f gff -p meta > {output}/contigs.gff\")\n\n\n\nparser = argparse.ArgumentParser(description='Give row data')\nparser.add_argument('-f', '--input1', help='forward sequence')\nparser.add_argument('-r', '--input2', help=' reverse sequnece')\nparser.add_argument('-o', '--output', help='Output folder')\narguments = parser.parse_args()\narguments = arguments.__dict__\nprint(arguments)\nforward = arguments['input1']\nreverse = arguments['input2']\noutput = arguments['output']\ntry:\n assembly = threading.Thread(target=assemble, args=(forward, reverse, output))\n assembly.start()\n assembly.join()\n annotation = threading.Thread (target = prodigal, args = (output, f\"{output}\"))\n annotation.start()\n annotation.join()\n\nexcept:\n print('Error')\n", "id": "1718178", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "meta.py" } ]
0
mwangel
[ { "content": "#!/usr/bin/python3\nwith open('data') as f:\n lines = f.readlines()\n\nn = 0\nfor i in range(len(lines)-1):\n\tif int(lines[i]) < int(lines[i+1]):\n\t\tn += 1\n\nprint('python3, day 1, part 1 :', n)\n", "id": "364142", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "01/y1.py" }, { "content": "#!/usr/bin/python3\n\nwith open('data') as f: lines = f.readlines()\n\ndata = list( map( lambda s: int(s), lines ) )\nresult = 0\nfor n in range( len(data)-3 ):\n\ta = data[n] + data[n+1] + data[n+2]\n\tb = data[n+1] + data[n+2] + data[n+3]\n\tif a < b: result += 1\n\nprint(result) # 1471\n", "id": "9614791", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "01/y2.py" }, { "content": "#!/usr/bin/python3\nlines = []\nwith open('data.txt') as f:\n lines = f.readlines()\n\nx = 0\nd = 0\nfor s in lines:\n\t[dir,n] = s.split()\n\tif dir == 'forward': x = x + int(n)\n\telif dir == 'up' : d = d - int(n)\n\telif dir == 'down' : d = d + int(n)\n\telse : raise Error('apa')\n\nprint(x*d)\n", "id": "9224950", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "02/y1.py" } ]
0
komalsharma037
[ { "content": "import os,mysql.connector as conn\r\nfrom tabulate import tabulate\r\n\r\nusername=input(\"Enter Username : \")\r\npassword=input(\"Enter Password : \")\r\n\r\nmydb=conn.connect(host=\"Your Host Name\",user=\"Your Username\",password=\"<PASSWORD>\",database=\"Your Database\")\r\nmyconn=mydb.cursor()\r\nmyconn.execute(\"SELECT * FROM admin WHERE username='\"+username+\"' and password='\"+password+\"'\")\r\nadmin_data=myconn.fetchone()\r\nif myconn.rowcount==1 :\r\n print(\"\\n\\n===== Welcome, \"+admin_data[1]+\"=======\")\r\n print(\"---------------------------\\n\")\r\n print(\"1. Add User\\n2. Delete User\\n3. See User Result\\n4. Add Quiz\\n5. Delete Quiz\\n\")\r\n first=input(\"Choose: \")\r\n if first=='1':\r\n print(\"\\n===== Add User =======\")\r\n user_name=input(\"Enter Name: \")\r\n user_un=input(\"Enter Username: \")\r\n user_pass=input(\"Enter Password: \")\r\n myconn.execute(\"INSERT INTO users(name,username,password) VALUES('\"+user_name+\"','\"+user_un+\"','\"+user_pass+\"')\")\r\n mydb.commit()\r\n print(\"Inserted\")\r\n elif first=='2':\r\n print(\"\\n===== Delete User =======\")\r\n user_id=input(\"Enter User's ID : \")\r\n myconn.execute(\"DELETE FROM users WHERE id='\"+user_id+\"'\")\r\n mydb.commit()\r\n print(\"Deleted\")\r\n elif first=='3':\r\n print(\"\\n============= User Results =============\\n\")\r\n myconn.execute(\"SELECT name,quiz,marks,date_time FROM users,marks WHERE users.id=marks.user_id\")\r\n result=myconn.fetchall()\r\n print(tabulate(result, headers=[\"Name\",\"Quiz\",\"Marks\",\"Quiz Time\"], tablefmt=\"pretty\"))\r\n elif first=='4':\r\n print(\"\\n===== Add Quiz =======\")\r\n quiz_name=input(\"Enter Quiz Name : \")\r\n f=open(quiz_name+\".json\",\"w\")\r\n x='{\"name\":\"'+quiz_name+'\",\"quiz\":[]}'\r\n f.write(x)\r\n f.close()\r\n myconn.execute(\"INSERT INTO quiz(quiz_name,file) VALUES('\"+quiz_name+\"','\"+quiz_name+\".json')\")\r\n mydb.commit()\r\n print(\"Inserted\") \r\n elif first=='5':\r\n print(\"\\n===== Delete Quiz =======\")\r\n quiz_n=input(\"Enter Quiz's Name : \")\r\n os.remove(quiz_n+'.json')\r\n myconn.execute(\"DELETE FROM quiz WHERE quiz_name='\"+quiz_n+\"'\")\r\n mydb.commit()\r\n print(\"Deleted\") \r\n else:\r\n pass\r\nelse:\r\n print(\"Wrong Username & Password\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ", "id": "2191462", "language": "Python", "matching_score": 0, "max_stars_count": 6, "path": "admin.py" }, { "content": "import requests,json,mysql.connector as conn\r\nfrom tabulate import tabulate\r\n\r\nmarks=0\r\nusername=input(\"Enter Username : \")\r\npassword=input(\"Enter Password : \")\r\n\r\nmydb=conn.connect(host=\"Your Host Name\",user=\"Your Username\",password=\"<PASSWORD>\",database=\"Your Database\")\r\nmyconn=mydb.cursor()\r\n\r\nmyconn.execute(\"SELECT * FROM quiz\")\r\nall_quiz = myconn.fetchall()\r\n\r\nmyconn.execute(\"SELECT * FROM users WHERE username='\"+username+\"' and password='\"+password+\"'\")\r\nuser_data = myconn.fetchone()\r\n\r\n\r\nif myconn.rowcount==1 :\r\n print(\"===== Welcome, \"+user_data[1]+\"=======\\n\")\r\n print(\"1. Start Quiz\\n2. See Results\")\r\n first=input(\"Choose: \")\r\n if first=='1':\r\n print(\"\\n==== Select Your Quiz ====\\n\")\r\n for x in range(len(all_quiz)):\r\n print(str(x+1)+\". \"+all_quiz[x][1]+\"\")\r\n quiz_n=input(\"Enter Your Choice (Quiz Name):\")\r\n \r\n r=requests.get(\"http://your_domain/\"+quiz_n+\".json\")\r\n b=r.json()\r\n for i in range(len(b[\"quiz\"])) :\r\n print(\"\\n\")\r\n print(\"Q\"+str(i+1)+\". \"+b[\"quiz\"][i][\"name\"])\r\n print(\"\\t1.\"+b[\"quiz\"][i][\"1\"])\r\n print(\"\\t2.\"+b[\"quiz\"][i][\"2\"])\r\n print(\"\\t3.\"+b[\"quiz\"][i][\"3\"]) \r\n print(\"\\t4.\"+b[\"quiz\"][i][\"4\"])\r\n ans=input(\"Enter Your Choose Answer No.: \")\r\n if ans==b[\"quiz\"][i][\"answer\"] :\r\n print(\" Answer is Correct\")\r\n marks=marks+1\r\n else :\r\n print(\" Answer is Not Correct:\")\r\n total=marks*100/len(b[\"quiz\"])\r\n print(\"\\nPercenatge is: \"+str(total)+\"%\")\r\n print(\"Total Marks is: \"+str(marks)+\" By \"+str(len(b[\"quiz\"]))+\"\")\r\n myconn.execute(\"INSERT INTO marks(quiz,marks,user_id) VALUES('\"+b[\"name\"]+\"','\"+str(marks)+\"','\"+str(user_data[0])+\"')\")\r\n mydb.commit()\r\n else:\r\n myconn.execute(\"SELECT quiz,marks,date_time FROM marks WHERE user_id='\"+str(user_data[0])+\"'\")\r\n result_data = myconn.fetchall()\r\n print(tabulate(result_data, headers=[\"Quiz\",\"Marks\",\"Quiz Time\"], tablefmt=\"pretty\")) \r\nelse :\r\n print(\"Wrong Username & Password\")\r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "id": "2398725", "language": "Python", "matching_score": 0, "max_stars_count": 6, "path": "project.py" } ]
0
stgztsw
[ { "content": "#!/usr/bin/env python\n# encoding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport logging\nfrom concurrent import futures\n\nimport grpc\n\nimport function_service_pb2\nimport function_service_pb2_grpc\nimport types_pb2\nimport sys\n\n\nclass FunctionServerDemo(function_service_pb2_grpc.PFunctionServiceServicer):\n def fn_call(self, request, context):\n response = function_service_pb2.PFunctionCallResponse()\n status = types_pb2.PStatus()\n status.status_code = 0\n response.status.CopyFrom(status)\n\n if request.function_name == \"add_int\":\n result = types_pb2.PValues()\n result.has_null = False\n result_type = types_pb2.PGenericType()\n result_type.id = types_pb2.PGenericType.INT32\n result.type.CopyFrom(result_type)\n result.int32_value.extend([x + y for x, y in zip(request.args[0].int32_value, request.args[1].int32_value)])\n response.result.CopyFrom(result)\n return response\n\n def check_fn(self, request, context):\n response = function_service_pb2.PCheckFunctionResponse()\n status = types_pb2.PStatus()\n status.status_code = 0\n response.status.CopyFrom(status)\n return response\n\n def hand_shake(self, request, context):\n response = types_pb2.PHandShakeResponse()\n if request.HasField(\"hello\"):\n response.hello = request.hello\n status = types_pb2.Pstatus()\n status.status_code = 0\n response.status.CopyFrom(status)\n return response\n\n\ndef serve(port):\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n function_service_pb2_grpc.add_PFunctionServiceServicer_to_server(FunctionServerDemo(), server)\n server.add_insecure_port(\"0.0.0.0:%d\" % port)\n server.start()\n server.wait_for_termination()\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n port = 9000\n if len(sys.argv) > 1:\n port = sys.argv[1]\n serve(port)\n", "id": "10559569", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "samples/doris-demo/remote-udf-python-demo/function_server_demo.py" } ]
0
MMichael-S
[ { "content": "from .filters import filters\n", "id": "5000119", "language": "Python", "matching_score": 0, "max_stars_count": 9, "path": "_python/__init__.py" }, { "content": "def dateformat(value, format=\"%d-%b-%Y\"):\n return value.strftime(format)\n\nfilters = {}\nfilters['dateformat'] = dateformat\n", "id": "10323199", "language": "Python", "matching_score": 0, "max_stars_count": 9, "path": "_python/filters.py" } ]
0
Whitomtit
[ { "content": "from pyswip import Prolog\nfrom pyswip import Functor\nfrom pyswip import Variable\nfrom pyswip.prolog import PrologError\n\nDEFAULT_LIMIT = 10\n\ndef format_value(value):\n output = \"\"\n if isinstance(value, list):\n output = \"[ \" + \", \".join([format_value(val) for val in value]) + \" ]\"\n elif isinstance(value, Functor) and value.arity == 2:\n output = \"{0}{1}{2}\".format(value.args[0], value.name, value.args[1])\n else:\n output = \"{}\".format(value)\n return output\n\ndef format_functor(functor):\n if not isinstance(functor, Functor):\n return str(functor)\n return \"{}{}{}\".format(format_functor(functor.args[0]), str(functor.name), format_functor(functor.args[1]))\n\ndef format_result(result, request, maxresults, prolog):\n result = list(result)\n\n if len(result) == 0:\n return \"false.\"\n output = \"\"\n for i, res in enumerate(result):\n if len(res) == 0:\n output += \"true;\\n\"\n continue\n tmpVarOutput = []\n vars = {}\n add_run = []\n for var in res:\n if isinstance(res[var], Variable):\n if not res[var].chars:\n add_run.append(var)\n continue\n id = res[var].chars\n if id in vars:\n vars[id].append(var)\n else:\n vars[id] = [var]\n tmpVarOutput.append(res[var])\n else:\n tmpVarOutput.append(var + \" = \" + format_value(res[var]))\n tmpOutput = []\n for line in tmpVarOutput:\n if isinstance(line, Variable):\n id = line.chars\n if len(vars[id]) == 1:\n tmpOutput.append(vars[id][0] + \" = \" + str(line))\n else:\n tmpOutput.append(\" = \".join(vars[id]))\n else:\n tmpOutput.append(line)\n if add_run:\n request = \"{}, copy_term([{}], [{}], __ADD_INFO).\".format(request, \", \".join(add_run), \", \".join(add_run))\n add_data = list(prolog.query(request, maxresult=maxresults))[i]['__ADD_INFO']\n for vi, v in enumerate(add_run):\n functor = add_data[vi].args[1]\n sub_functor = functor.args[1]\n v_res = \"{} {} {}\".format(v, str(functor.name), format_functor(functor.args[1]))\n tmpOutput.append(v_res)\n output += \", \".join(tmpOutput) + \";\\n\"\n output = output[:-2] + \".\"\n\n return output\n\ndef run(code):\n prolog = Prolog()\n\n output = []\n ok = True\n\n tmp = \"\"\n isQuery = False\n for line in code.split(\"\\n\"):\n line = line.split(\"%\", 1)[0]\n line = line.strip()\n if line == \"\" or line[0] == \"%\":\n continue\n\n if line[:2] == \"?-\" or line[:2] == \":-\":\n isQuery = True\n isSilent = line[:2] == \":-\"\n line = line[2:]\n tmp += \" \" + line\n\n if tmp[-1] == \".\":\n # End of statement\n tmp = tmp[:-1] # Removes \".\"\n maxresults = DEFAULT_LIMIT\n # Checks for maxresults\n if tmp[-1] == \"}\":\n tmp = tmp[:-1] # Removes \".\"\n limitStart = tmp.rfind('{')\n if limitStart == -1:\n ok = False\n output.append(\"ERROR: Found '}' before '.' but opening '{' is missing!\")\n else:\n limit = tmp[limitStart+1:]\n try:\n maxresults = int(limit)\n except:\n ok = False\n output.append(\"ERROR: Invalid limit {\" + limit + \"}!\")\n tmp = tmp[:limitStart]\n\n try:\n if isQuery:\n result = prolog.query(tmp, maxresult=maxresults)\n formatted = format_result(result, tmp, maxresults, prolog)\n if not isSilent:\n output.append(formatted)\n result.close()\n else:\n prolog.assertz('(' + tmp + ')')\n except PrologError as error:\n ok = False\n output.append(\"ERROR: {}\".format(error))\n\n tmp = \"\"\n isQuery = False\n\n return output, ok\n", "id": "6872344", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "swipl.py" }, { "content": "#!/bin/env python3\n\nimport tempfile\nimport swipl\n\neurope_pl = \"\"\"\nneighbours(austria , [czech_republic, germany, hungary, italy,\n slovenia, slovakia]).\nneighbours(belgium , [france, netherlands, luxemburg, germany,\n united_kingdom]).\nneighbours(bulgaria , [romania, greece]).\nneighbours(croatia , [slovenia, hungary]).\nneighbours(cyprus , [greece]).\nneighbours(czech_republic , [germany, poland, slovakia, austria]).\nneighbours(denmark , [germany, sweden]).\nneighbours(estonia , [finland, latvia, lithuania]).\nneighbours(finland , [estonia, sweden]).\nneighbours(france , [spain, belgium, luxemburg, germany, italy,\n united_kingdom]).\nneighbours(germany , [netherlands, belgium, luxemburg, denmark,\n france, austria, poland]).\nneighbours(greece , [bulgaria, cyprus]).\nneighbours(hungary , [austria, slovakia, romania, croatia,\n slovenia]).\nneighbours(ireland , [united_kingdom]).\nneighbours(italy , [france, austria, slovenia]).\nneighbours(latvia , [estonia, lithuania]).\nneighbours(luxemburg , [belgium, france, germany]).\nneighbours(malta , []).\nneighbours(netherlands , [belgium, germany , united_kingdom]).\nneighbours(poland , [germany, czech_republic, slovakia,\n lithuania]).\nneighbours(portugal , [spain]).\nneighbours(romania , [hungary, bulgaria]).\nneighbours(slovakia , [czech_republic, poland, hungary, austria]).\nneighbours(slovenia , [austria, italy, hungary, croatia]).\nneighbours(spain , [france, portugal]).\nneighbours(sweden , [finland, denmark]).\nneighbours(united_kingdom , [ireland, netherlands, belgium, france]).\n\ncolour_countries(Colours) :-\nsetof(Country/_, X^neighbours(Country,X), Colours),\ncolours(Colours).\n\ncolours([]).\ncolours([Country/Colour | Rest]):-\ncolours(Rest),\nmember(Colour, [green, yellow, red, purple]),\n\\+ (member(CountryA/Colour, Rest), neighbour(Country, CountryA)).\n\nneighbour(Country, CountryA):-\nneighbours(Country, Neighbours),\nmember(CountryA, Neighbours).\n\nmember(X, [X|_]).\nmember(X, [_|Tail]) :- member(X, Tail).\n\n?- colour_countries(Map){1}.\n\"\"\"\n\nsocrates_pl = \"\"\"\nman(socrates).\nman(bob).\nmortal(X) :- man(X).\n\n?- mortal(socrates).\n?- mortal(X).\n?- mortal(socrates2).\n\"\"\"\n\ndef main():\n output, ok = swipl.run(socrates_pl)\n output, ok = swipl.run(socrates_pl)\n\n if ok:\n print(\"OK\")\n else:\n print(\"NOT OK\")\n\n print(\"\\n\".join(output))\n\nif __name__ == \"__main__\":\n main()\n", "id": "5106044", "language": "Python", "matching_score": 0, "max_stars_count": 14, "path": "test.py" } ]
0
nanjalaruth
[ { "content": "#!/usr/bin/python3\nfrom optparse import OptionParser\nimport numpy as np\nimport pandas as pd\nimport gzip\n\nparser = OptionParser()\nparser.add_option(\"-v\", \"--vcf\", dest=\"filename\",\n help=\"Input: Gzip compressed vcf file\", type=\"string\")\nparser.add_option(\"-o\", \"--output\", dest=\"output\",\n help=\"Output: csv file with SNP counts per individual\", type=\"string\")\n(options, args) = parser.parse_args()\n\nvcf_file = options.filename\noutput = options.output\n\ndef get_counts(vcf_file):\n total_SNPS = 0\n '''\n :param vcf_file: Whole Genome vcf file compressed with gzip\n :return: np array with SNP count, unique/private SNPs and total imputed/genotyped SNPs of each individual\n '''\n with gzip.open(vcf_file, 'rt') as fp:\n for line in fp:\n line = line.rstrip()\n if not line.startswith('#'):\n total_SNPS += 2\n columns = line.split('\\t')\n num_indiv = len(columns) - 9\n indices_hetero = [i for i, item in enumerate(columns) if item.startswith('1|0') or item.startswith('0|1')]\n indices_homo = [i for i, item in enumerate(columns) if item.startswith('1|1')]\n all_indices = indices_hetero + indices_homo\n\n if len(all_indices) < 1:\n continue\n elif len(indices_hetero) == 1 and len(indices_homo) == 0:\n index = indices_hetero[0]\n Singleton[index] += 1\n elif len(indices_homo) == 1 and len(indices_hetero) == 0:\n index = indices_homo[0]\n Singleton[index] += 2\n for i in indices_hetero:\n SNP_count[i] += 1\n for j in indices_homo:\n SNP_count[j] += 2\n elif line.startswith(\"##\"):\n continue\n elif line.startswith('#'):\n headerlist = line.split('\\t')\n SNP_count = [0] * len(headerlist)\n total = [0] * len(headerlist)\n Singleton = [0] * len(headerlist)\n\n for i in range(len(total)):\n total[i] = total_SNPS\n all = np.column_stack((headerlist, total, SNP_count, Singleton))\n return(all)\n\nSNP_counts = get_counts(vcf_file)\n\nSNP_counts_df = pd.DataFrame(SNP_counts)\nSNP_counts_df = SNP_counts_df.drop(SNP_counts_df.index[0:9])\nSNP_counts_df.columns = [\"Indiv\", \"total_SNPs\", \"SNP_count\", \"Singleton_count\"]\n\nSNP_counts_df.to_csv(output, index=False, header=True)\n", "id": "5873451", "language": "Python", "matching_score": 3.5918936729431152, "max_stars_count": 12, "path": "templates/Individual_SNP_count.py" }, { "content": "#!/usr/bin/python3\nimport numpy as np\nimport pandas\nfrom optparse import OptionParser\n\n# example for concatenating all files together into one file(INPUT):\n'''\n#!/usr/bin/bash\nhead -1 ./all.10.prep.withlabels.maf_only.with_coord > all.txt\nfor i in ./all.*.prep.withlabels.maf_only.with_coord\ndo\ntail -n +2 \"$i\" >> all.txt\ndone\n'''\n\nparser = OptionParser()\nparser.add_option(\"-i\", \"--input\", dest=\"filename\",\n help=\"tab delimited text file with SNP MAFs of different Population\", type=\"string\")\nparser.add_option(\"-o\", \"--output\", dest=\"output\",\n help=\"Output: csv file with SNP counts per individual\", type=\"string\")\n(options, args) = parser.parse_args()\n\n\ninput = options.filename\noutput = options.output\n\ndef get_counts(filename):\n '''\n :param filename: SNP MAFs of different Populations in a tab delimited text file:\n :return: np array with unique/private SNPs of every population\n '''\n\n first_line = True\n num_SNPs = 0\n\n with open(filename, 'rt') as fp:\n for line in fp:\n line = line.rstrip()\n list = line.split('\\t')\n del list[0]\n if first_line:\n length = len(list)\n header = list\n num_unique = [0] * length\n num_snps = [0] * length\n first_line = False\n else:\n num_SNPs += 1\n index = [i for i, item in enumerate(list) if float(item) > 0]\n if len(index) > 1:\n continue\n elif len(index) == 1:\n num_unique[index[0]] += 1\n\n for i in range(len(num_snps)):\n num_snps[i] = num_SNPs\n\n return(np.column_stack((header, num_unique, num_snps)))\n\n\ndf = pandas.DataFrame(get_counts(input))\ndf.to_csv(\"Population_private_SNPs.csv\", index=False, header=True)\n", "id": "5209591", "language": "Python", "matching_score": 1.3805285692214966, "max_stars_count": 12, "path": "templates/Population_private_SNPs.py" }, { "content": "#!/usr/bin/env python2.7\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--csv_counts\", default=\"${csv_counts}\", help=\"\")\nparser.add_argument(\"--label\", default=\"${dataset}\", help=\"\")\nparser.add_argument(\"--csv_chrms\", default=\"${csv_chrms}\", help=\"\")\nparser.add_argument(\"--csv_out\", default=\"${csv_out}\", help=\"\")\n\nargs = parser.parse_args()\n\ndef combine_csv_counts(csv_counts, label, csv_chrms, csv_out):\n \"\"\"\n :param pop_freqs:\n :return:\n \"\"\"\n files = csv_counts.split(',')\n # labels = csv_labels.split(',')\n chrms = csv_chrms.split(',')\n classes = ['Group', 'CHROM']\n datas = {}\n for i in range(len(sorted(chrms))):\n data = []\n # pop = labels[i]\n chrm = chrms[i]\n datas[i] = [label, chrm]\n for line in open(files[i]):\n line = line.split(';')\n classe = line[0].strip()\n count = line[1]\n datas[i].append(count.strip())\n if classe not in classes:\n classes.append(classe)\n\n out = open(csv_out, 'w')\n out.writelines('\\\\t'.join(classes)+'\\\\n')\n for i in datas:\n out.writelines('\\\\t'.join(datas[i])+'\\\\n')\n out.close()\n\nif __name__ == '__main__':\n combine_csv_counts(args.csv_counts, args.label,\n args.csv_chrms, args.csv_out)\n\n", "id": "10497270", "language": "Python", "matching_score": 1.082145094871521, "max_stars_count": 0, "path": "templates/combine_counts.py" }, { "content": "#!/usr/bin/env python2.7\n\nimport argparse,sys\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infoFiles\", help=\"\")\nparser.add_argument(\"--outWell_imputed\", help=\"\")\nparser.add_argument(\"--outSNP_acc\", help=\"\")\nparser.add_argument(\"--infoCutoff\", help=\"\")\nparser.add_argument(\"--inWell_imputed\", help=\"\")\nparser.add_argument(\"--inSNP_acc\", help=\"\")\nparser.add_argument(\"--report_acc\", help=\"\")\nparser.add_argument(\"--ldFiles\", help=\"\")\nparser.add_argument(\"--report_ld\", help=\"\")\n\nargs = parser.parse_args()\n\ndef filter_info(infoFiles, infoCutoff, outWell_imputed, outSNP_acc):\n \"\"\"\n Return:\n well_imputed: certainy >= 1\n SNP_concordance: concord_type0 != -1\n \"\"\"\n well_imputed = {}\n SNP_concordance = {}\n count = 0\n infoFiles = infoFiles.split(',')\n datas = {}\n header = []\n outWell_imputed_out = open(outWell_imputed, 'w')\n outWell_imputed_snp_out = open(outWell_imputed+\"_snp\", 'w')\n outSNP_accuracy_out = open(outSNP_acc, 'w')\n for infoFile in infoFiles:\n infoFile = infoFile.strip().split('==')\n dataset = infoFile[0]\n info = infoFile[1]\n well_imputed[dataset] = []\n SNP_concordance[dataset] = []\n # outWell_imputed_out_dataset = open(dataset+'_'+outWell_imputed, 'w')\n for line in open(info):\n data = line.strip().split()\n if \"snp_id\" in line and \"info\" in line:\n if len(header) == 0:\n header = data\n info_idx = header.index(\"info\")\n conc_idx = header.index(\"concord_type0\")\n outWell_imputed_out.writelines(' '.join([dataset]+data)+'\\n')\n # outWell_imputed_out_dataset.writelines(' '.join([dataset]+data)+'\\n')\n outWell_imputed_snp_out.writelines(data[1]+'\\n')\n outSNP_accuracy_out.writelines(' '.join([dataset]+data)+'\\n')\n else:\n if float(data[info_idx]) >= float(infoCutoff):\n outWell_imputed_out.writelines(' '.join([dataset]+data)+'\\n')\n # outWell_imputed_out_dataset.writelines(' '.join([dataset]+data)+'\\n')\n outWell_imputed_snp_out.writelines(data[1]+'\\n')\n if float(data[conc_idx]) != float(-1):\n outSNP_accuracy_out.writelines(' '.join([dataset]+data)+'\\n')\n count += 1\n # outWell_imputed_out_dataset.close()\n outWell_imputed_out.close()\n outWell_imputed_snp_out.close()\n outSNP_accuracy_out.close()\n\ndef well_imputed_by_maf(inWell_imputed, outWell_imputed):\n \"\"\"\n\n :return:\n \"\"\"\n datas = {}\n outWell_imputed_out = open(outWell_imputed, 'w')\n outWell_imputed_out.writelines('\\t'.join(['CHRM', 'MAF>=5%', 'MAF>=1%', 'MAF 1-5%', 'TOTAL'])+'\\n')\n info_datas = open(inWell_imputed).readlines()\n for line in info_datas:\n data = line.strip().split()\n dataset = data[0]\n if dataset not in datas:\n datas[dataset] = {}\n datas[dataset]['1_5'] = []\n datas[dataset]['1'] = []\n datas[dataset]['5'] = []\n datas[dataset]['total'] = 0\n if \"snp_id\" in line and \"info\" in line:\n idx_exp_freq_a1 = data.index('exp_freq_a1')\n else:\n maf = float(data[idx_exp_freq_a1])\n datas[dataset]['total'] += 1\n if maf >= 0.5:\n maf = 1-maf\n if maf >= 0.01:\n datas[dataset]['1'].append(maf)\n if maf >= 0.05:\n datas[dataset]['5'].append(maf)\n if maf <= 0.05 and maf >= 0.01:\n datas[dataset]['1_5'].append(maf)\n for dataset in sorted(datas):\n tot = datas[dataset]['total']\n if tot == 0:\n outWell_imputed_out.write(\"dataset {} is empty (tot=0)\".format(dataset))\n else:\n outWell_imputed_out.writelines('\\t'.join([dataset, str(format(len(datas[dataset]['5'])/1000000., '0,.1f'))+'M ('+str(len(datas[dataset]['5']) * 100/tot)+'%)', str(format(len(datas[dataset]['1'])/1000000., '0,.1f'))+'M ('+str(len(datas[dataset]['1']) * 100/tot)+'%)', str(format(len(datas[dataset]['1_5'])/1000000., '0,.1f'))+'M ('+str(len(datas[dataset]['1_5']) * 100/tot)+'%)', str(format(tot, '0,.0f'))])+'\\n')\n outWell_imputed_out.close()\n\ndef acc_by_maf(inSNP_acc, outSNP_acc):\n \"\"\"\n :return:\n \"\"\"\n datas = {}\n\n outSNP_acc_out = open(outSNP_acc, 'w')\n outSNP_acc_out.writelines('\\t'.join(['CHRM', 'MAF>=5%', 'MAF>=1%', 'MAF 1-5%', 'TOTAL'])+'\\n')\n info_datas = open(inSNP_acc).readlines()\n for line in info_datas:\n data = line.strip().split()\n dataset = data[0]\n if dataset not in datas:\n datas[dataset] = {}\n datas[dataset]['1_5'] = []\n datas[dataset]['1'] = []\n datas[dataset]['5'] = []\n datas[dataset]['total'] = 0\n if \"snp_id\" in line and \"info\" in line:\n idx_exp_freq_a1 = data.index('exp_freq_a1')\n # idx_conc = data.index(\"concord_type0\")\n idx_conc = data.index(\"r2_type0\")\n else:\n maf = float(data[idx_exp_freq_a1])\n acc = float(data[idx_conc])\n datas[dataset]['total'] += 1\n if maf >= 0.5:\n maf = 1-maf\n if maf >= 0.01:\n datas[dataset]['1'].append(acc)\n if maf >= 0.05:\n datas[dataset]['5'].append(acc)\n if maf <= 0.05 and maf >= 0.01:\n datas[dataset]['1_5'].append(acc)\n for dataset in sorted(datas):\n tot = datas[dataset]['total']\n print tot\n\n if len(datas[dataset]['5'])==0:\n maf_5 = 0.0\n else:\n maf_5 = sum(datas[dataset]['5'])/float(len(datas[dataset]['5']))\n if len(datas[dataset]['1'])==0:\n maf_1 = 0.0\n else:\n maf_1 = sum(datas[dataset]['1'])/float(len(datas[dataset]['1']))\n if len(datas[dataset]['1_5'])==0:\n maf_1_5 = 0.0\n else:\n maf_1_5 = sum(datas[dataset]['1_5'])/float(len(datas[dataset]['1_5']))\n\n outSNP_acc_out.write(\"{0:.3f}\\t{0:.3f}\\t{0:.3f}\\n\".format(maf_5,maf_1, maf_1_5))\n #outSNP_acc_out.writelines('\\t'.join([dataset, str(format(maf_5), '0,.3f')), str(format(maf_1), '0,.3f')), str(format(maf_1_5), '0,.3f')), str(format(tot, '0,.0f'))])+'\\n')\n\n outSNP_acc_out.close()\n\ndef ld_by_maf(ldFiles, report_ld, inWell_imputed, infoCutoff):\n \"\"\"\n\n :return:\n \"\"\"\n maf_data = {}\n print 'Reading', inWell_imputed\n # inWell_imputed_data = open(inWell_imputed).readlines()\n for line in open(inWell_imputed):\n data = line.split(' ')\n dataset = data[0]\n if \"snp_id\" in line and \"info\" in line:\n idx_exp_freq_a1 = data.index('exp_freq_a1')\n rs_id = data.index('rs_id')\n if dataset not in maf_data:\n maf_data[dataset] = {}\n maf_data[dataset][data[rs_id]] = data[idx_exp_freq_a1]\n datas = {}\n ld_data = {}\n report_ld_out = open(report_ld, 'w')\n report_ld_out.writelines('\\t'.join(['Population', 'MAF>=5%', 'MAF>=1%', 'MAF 1-5%', 'TOTAL'])+'\\n')\n ldFiles = ldFiles.split(',')\n not_ = 0\n in_ = 0\n header = []\n infoCutoff = float(infoCutoff)\n for ldFile in ldFiles:\n ldFile = ldFile.strip().split('==')\n dataset =ldFile[0]\n if dataset not in datas:\n datas[dataset] = {}\n datas[dataset]['1_5'] = 0\n datas[dataset]['1'] = 0\n datas[dataset]['5'] = 0\n datas[dataset]['ALL'] = set()\n datas[dataset]['total'] = len(maf_data[dataset])\n ld = ldFile[1]\n print 'Reading', ld\n ld_data[dataset] = []\n for line in open(ld):\n data = line.strip().split()\n if \"SNP_A\" in line and \"SNP_B\" in line:\n if len(header) == 0:\n header = data\n snpA_idx = header.index(\"SNP_A\")\n snpB_idx = header.index(\"SNP_B\")\n r2_idx = header.index(\"R2\")\n else:\n if float(data[r2_idx]) >= infoCutoff:\n snpA = data[snpA_idx]\n snpB = data[snpB_idx]\n for snp in [snpA, snpB]:\n if snp not in datas[dataset]['ALL']:\n try:\n datas[dataset]['ALL'].add(snp)\n maf = float(maf_data[dataset][snp])\n if maf >= 0.5:\n maf = 1-maf\n if maf >= 0.01:\n datas[dataset]['1'] += 1\n if maf >= 0.05:\n datas[dataset]['5'] += 1\n if maf <= 0.05 and maf >= 0.01:\n datas[dataset]['1_5'] += 1\n # in_ += 1\n except:\n continue\n # not_ += 1\n for dataset in sorted(datas):\n tot = datas[dataset]['total']\n # tot = len(datas[dataset]['ALL'])\n # print len(datas[dataset]['ALL']), tot\n if tot == 0:\n report_ld_out.write(\"dataset {} is empty (tot=0)\".format(dataset))\n else:\n report_ld_out.writelines('\\t'.join([dataset, str(format(datas[dataset]['5']/1000000., '0,.1f'))+'M ('+str(datas[dataset]['5'] * 100/tot)+'%)', str(format(datas[dataset]['1']/1000000., '0,.1f'))+'M ('+str(datas[dataset]['1'] * 100/tot)+'%)', str(format(datas[dataset]['1_5']/1000000., '0,.1f'))+'M ('+str(datas[dataset]['1_5'] * 100/tot)+'%)', str(format(tot, '0,.0f'))])+'\\n')\n report_ld_out.close()\n\nif args.infoFiles and args.infoCutoff:\n filter_info(args.infoFiles, args.infoCutoff, args.outWell_imputed, args.outSNP_acc)\nif args.inWell_imputed and args.outWell_imputed:\n well_imputed_by_maf(args.inWell_imputed, args.outWell_imputed)\nif args.inSNP_acc and args.outSNP_acc:\n acc_by_maf(args.inSNP_acc, args.outSNP_acc)\nif args.report_ld and args.ldFiles:\n ld_by_maf(args.ldFiles, args.report_ld, args.inWell_imputed, args.infoCutoff)\n", "id": "149610", "language": "Python", "matching_score": 4.1213297843933105, "max_stars_count": 12, "path": "templates/report.py" }, { "content": "#!/usr/bin/env python2.7\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--infoFiles\", help=\"\")\nparser.add_argument(\"--outWell_imputed\", help=\"\")\nparser.add_argument(\"--outSNP_acc\", help=\"\")\nparser.add_argument(\"--infoCutoff\", help=\"\")\n\nargs = parser.parse_args()\n\n\ndef filter_info(infoFiles, infoCutoff, outWell_imputed, outSNP_acc):\n \"\"\"\n Return:\n well_imputed: certainy >= 1\n SNP_concordance: concord_type0 != -1\n \"\"\"\n well_imputed = {}\n SNP_concordance = {}\n count = 0\n infoFiles = infoFiles.split(',')\n header = []\n outWell_imputed_out = open(outWell_imputed + \".tsv\", 'w')\n outWell_imputed_snp_out = open(outWell_imputed + \"_snp.tsv\", 'w')\n outSNP_accuracy_out = open(outSNP_acc + \".tsv\", 'w')\n for infoFile in infoFiles:\n infoFile = infoFile.strip().split('==')\n dataset = infoFile[0]\n info = infoFile[1]\n well_imputed[dataset] = []\n SNP_concordance[dataset] = []\n print info\n for line in open(info):\n data = line.strip().split()\n if \"SNP\" in line and \"Rsq\" in line:\n if len(header) == 0:\n header = data\n info_idx = header.index(\"Rsq\")\n conc_idx = header.index(\"EmpRsq\")\n outWell_imputed_out.writelines(' '.join([dataset] + data) + '\\\\n')\n outWell_imputed_snp_out.writelines(data[1] + '\\\\n')\n outSNP_accuracy_out.writelines(' '.join([dataset] + data) + '\\\\n')\n else:\n print info_idx, data\n if float(data[info_idx]) >= float(infoCutoff):\n outWell_imputed_out.writelines(' '.join([dataset] + data) + '\\\\n')\n outWell_imputed_snp_out.writelines(data[1] + '\\\\n')\n if data[conc_idx] != '-':\n outSNP_accuracy_out.writelines(' '.join([dataset] + data) + '\\\\n')\n count += 1\n outWell_imputed_out.close()\n outWell_imputed_snp_out.close()\n outSNP_accuracy_out.close()\n\n\nargs.infoFiles = \"${infos}\"\nargs.infoCutoff = \"${impute_info_cutoff}\"\nargs.outWell_imputed = \"${well_out}\"\nargs.outSNP_acc = \"${acc_out}\"\nif args.infoFiles and args.infoCutoff:\n filter_info(args.infoFiles, args.infoCutoff, args.outWell_imputed, args.outSNP_acc)\n\n", "id": "7591343", "language": "Python", "matching_score": 0.864104151725769, "max_stars_count": 0, "path": "templates/filter_info_minimac.py" }, { "content": "#!/usr/bin/env python2.7\n\nimport argparse,sys\nimport time\nimport gzip\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--legend\", help=\"legend file\")\nparser.add_argument(\"--hap\", help=\"hap file\")\nparser.add_argument(\"--outfile\", help=\"output file pattern (will end with .hap.gz and .legend.gz)\")\nparser.add_argument(\"--start\", type=int, help=\"start position\")\nparser.add_argument(\"--stop\", type=int, help=\"stop position\")\nargs = parser.parse_args()\n\nWINDOW = 500000\n\n\ndef extract_region(legend_file, hap_file, outfile, start, stop):\n '''\n Return: extract a region of impute reference set\n '''\n # first filter the legend fileread the legend file and negnd in the\n fin_legend = gzip.open(legend_file)\n head = fin_legend.readline()\n fin_hap = gzip.open(hap_file)\n\n fout_leg = gzip.open(outfile+\".legend.gz\",\"wt\")\n fout_hap = gzip.open(outfile+\".hap.gz\",\"wt\")\n fout_leg.write(head)\n\n skipped = 0\n total = 0\n\n for line in fin_legend:\n total += 1\n lrow = line.strip().split()\n hline = fin_hap.readline()\n if start <= int(lrow[1]) <= stop:\n fout_leg.write(line)\n fout_hap.write(hline)\n else:\n skipped+=1\n if total % 1000 == 0:\n print \"\\b.\",\n\n kept = total - skipped\n\n fout_hap.close()\n fout_leg.close()\n print \"Done. {} kept, {} skipped.\".format(kept,skipped)\n\n\nif __name__ == '__main__':\n extract_region(args.legend, args.hap, args.outfile, args.start-WINDOW, args.stop+WINDOW)\n", "id": "562735", "language": "Python", "matching_score": 2.1621456146240234, "max_stars_count": 12, "path": "templates/extract_region_from_refs.py" }, { "content": "#!/usr/bin/env python2.7\n\nimport argparse,sys\nimport time\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--legend_file_list\", help=\"Searated by comma\")\nparser.add_argument(\"--outfile\", help=\"\")\nparser.add_argument(\"--chunk_size\", help=\"\")\nparser.add_argument(\"--chrm\", help=\"chromosome\")\nargs = parser.parse_args()\n\n\ndef chunk_split(legend_file_list, outfile, chunk_size='', chrm=''):\n '''\n Return: chunk files in the output folder\n '''\n pop_samples={}\n print 'Generating chunk files from reference data ...'\n print \"Reading file(s) \"+legend_file_list\n print \"Writing file \"+outfile\n POS = set()\n for legend_file in legend_file_list.split(','):\n for line in open(legend_file):\n if \"id\" not in line and 'position' not in line:\n line = line.strip().split()\n try:\n pos = int(line[1])\n POS.add(pos)\n except:\n pass\n max_POS = max(POS)\n chunk_size = int(chunk_size)\n out=open(outfile,\"wt\")\n # out.writelines(\"start\" +\" \"+\"end\"+\"\\n\")\n for pos in list(range(1, max_POS+chunk_size, chunk_size)):\n start_ = pos\n end_ = start_ + chunk_size - 1\n out.writelines(str(chrm)+\",\"+str(start_) +\",\"+str(end_)+\"\\n\")\n out.close()\n\nchunk_split(args.legend_file_list, args.outfile, args.chunk_size, args.chrm)", "id": "322925", "language": "Python", "matching_score": 1.4739021062850952, "max_stars_count": 12, "path": "templates/generate_chunks_2refs.py" } ]
1.473902
foundations
[ { "content": "# -*- coding: utf-8 -*-\n\nimport re\nfrom .config import config\nimport sqlite3\n\n\nclass SqliteInterface:\n def __init__(self):\n self.database = config.database + '.db'\n conn = sqlite3.connect(self.database) # 创建sqlite.db数据库\n print(\"open database success\")\n self.create_table_setting()\n self.create_table_price()\n print(\"Table created successfully\")\n\n # setting 表\n def create_table_setting(self):\n conn = sqlite3.connect(self.database) # 创建数据库\n query = \"\"\"CREATE TABLE IF NOT EXISTS setting(\n openid VARCHAR(100) PRIMARY KEY,\n premium_rate DOUBLE,\n warning_threshold DOUBLE,\n single_deal_threshold DOUBLE,\n warning_limit INT\n );\"\"\"\n conn.execute(query)\n conn.close()\n print(\"Table {0} is ready\".format('setting'))\n\n def update_setting(self, open_id, parameter_list):\n conn = sqlite3.connect(self.database)\n statement = \"INSERT OR REPLACE INTO {0} VALUES(?,?,?,?,?)\".format('setting')\n data = [(open_id, parameter_list[0], parameter_list[1], parameter_list[2], parameter_list[3])]\n conn.executemany(statement, data)\n conn.commit()\n\n # price 表\n def create_table_price(self):\n conn = sqlite3.connect(self.database) # 创建数据库\n query = \"\"\"CREATE TABLE IF NOT EXISTS price(stockid VARCHAR(25) PRIMARY KEY, preprice DOUBLE)\"\"\"\n conn.execute(query)\n conn.close()\n print(\"Table {0} is ready\".format('price'))\n\n def update_price(self, stock_id, price):\n conn = sqlite3.connect(self.database)\n statement = \"INSERT OR REPLACE INTO {0} VALUES(?,?)\".format('price')\n data = [(stock_id, price)]\n conn.executemany(statement, data)\n conn.commit()\n\n # 通用\n def print_table(self, table_name):\n conn = sqlite3.connect(self.database)\n sql = \"select * from {0}\".format(table_name)\n curson = conn.execute(sql)\n conn.commit()\n rows = curson.fetchall()\n print(rows)\n\n conn.close()\n\n def delete_table(self, table_name):\n conn = sqlite3.connect(self.database)\n sql = \"drop table IF EXISTS {0}\".format(table_name)\n conn.execute(sql)\n conn.close()\n\n def get_values_by_id(self, table_name, id_name, id_value):\n con = sqlite3.connect(self.database)\n cur = con.cursor()\n cur.execute('select * from {0} where {1} = \"{2}\"'.format(table_name, id_name, id_value))\n results = cur.fetchall()\n con.close()\n return results\n\n def change_tuple_to_string(self, parameter_tuple):\n res = ''\n # change tuple to string\n length = len(parameter_tuple)\n res = ''\n for i in range(1, length - 1):\n res += str(parameter_tuple[i]) + \" \"\n res += str(parameter_tuple[length - 1])\n return res\n\n def check_setting_parameter(self, openid, content):\n parameter_list = re.split(r\" +\", content)\n if len(parameter_list) < 5:\n return False, \"参数数量不足,需要的参数分别为:\\n越价率,越价+大单的大单阈值,单笔大单阈值,一分钟内的预警次数限制\", None\n if len(parameter_list) > 5:\n return False, \"参数数量过多,请重新输入.需要的参数分别为:\\n越价率,越价+大单的大单阈值,单笔大单阈值,一分钟内的预警次数限制\", None\n for i in range(1, len(parameter_list)):\n try:\n if i < len(parameter_list) - 1:\n f = float(parameter_list[i])\n else:\n f = int(parameter_list[i])\n except ValueError:\n if i < len(parameter_list) - 1:\n return False, \"第{0}个参数不是数值.\".format(i), None\n else:\n return False, \"第{0}个参数不是整数.\".format(i), None\n if i < len(parameter_list) - 1:\n parameter_list[i] = float(parameter_list[i])\n else:\n parameter_list[i] = int(parameter_list[i])\n msg = ''\n premium_rate = parameter_list[1]\n warning_threshold = parameter_list[2]\n large_threshold = parameter_list[3]\n warning_limit = parameter_list[4]\n if premium_rate > 1:\n msg += \"越价率请设成小于0到1之间的数,如设置成0.005,即0.5%\\n\"\n if premium_rate <= 1e-8:\n msg += \"越价率请大于1e-8\\n\"\n if warning_threshold <= 0:\n msg += \"越价+大单组合预警的大单阈值,只能为正数.\\n\"\n if large_threshold <= 0:\n msg += \"大单预警的阈值,只能为正数.\"\n if warning_limit > 15 or warning_limit < 1:\n msg += \"一分钟内的预警次数限制,最少设置为1,最多设置为15.\\n\"\n if msg != '':\n return False, msg + \"请重新输入.\", None\n\n msg += \"参数输入正确.\\n\"\n old_parameter_tuple = self.get_values_by_id('setting', 'openid', openid)\n if not old_parameter_tuple: # 如果数据库没有存在这个\n msg += \"这是您首次设置.\"\n else:\n if self.check_old_and_new(old_parameter_tuple[0], parameter_list): # 如果不相同\n old_parameter_string = self.change_tuple_to_string(old_parameter_tuple[0])\n msg += \"您上次的设置是:\\n{0}\".format(old_parameter_string)\n else:\n msg += \"您的设置与上次一致,无需设置.\"\n return False, msg, None\n\n return True, msg, parameter_list[1:]\n\n def check_old_and_new(self, old_parameter_tuple, new_parameter_list):\n for i in range(1, len(old_parameter_tuple)):\n if old_parameter_tuple[i] != new_parameter_list[i]:\n return True # 不相同\n return False\n\n\n# ---- 单元测试代码\nif __name__ == '__main__':\n mi = SqliteInterface()\n mi.check_setting_parameter(\"djkfhkdhsf\",\"设置 0.005 1000000 2000000 10\")\n mi.print_table('setting')\n print(mi.get_values_by_id('setting', 'openid', 'djkfhkdhsf'))\n\n mi.delete_table('setting')\n mi.update_price('HK.00001', 15.6)\n mi.print_table('price')\n mi.delete_table('price')", "id": "937995", "language": "Python", "matching_score": 2.240183115005493, "max_stars_count": 5, "path": "futuquant/examples/app/stock_alarm/sqlite_interface.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\nclass Config:\n def __init__(self):\n # wechat\n self.appid = '' # AppID\n self.secrect = '' # Secret\n\n # test_user_list\n self.test_user_list = {\n '' # lpt\n }\n\n # test_user_nickname\n self.test_user_nickname = {'lpt'}\n\n # wechat token\n self.token = '' # token\n\n # parameter: 越价率\n self.premium_rate = 0.005\n self.warning_threshold = 1000000\n self.large_threshold = 5000000\n self.warning_limit = 5\n\n # template_id\n self.template_id = \"\"\n\n # mysql\n # self.host = '127.0.0.1'\n # self.port = 3306\n # self.user = ''\n # self.passwd = ''\n # self.database = 'stock_alarm'\n\n # sqlite\n self.database = 'stock_alarm'\n\n # FutuOpenD\n self.host = '127.0.0.1'\n self.port = 11111\n\n\nconfig = Config()", "id": "6733288", "language": "Python", "matching_score": 0.6671314239501953, "max_stars_count": 5, "path": "futuquant/examples/app/stock_alarm/config.py" }, { "content": "# encoding: UTF-8\n\n# 重载sys模块,设置默认字符串编码方式为utf8\ntry:\n reload # Python 2\nexcept NameError: # Python 3\n from importlib import reload\nimport sys\nreload(sys)\n# sys.setdefaultencoding('utf8')\n\n\n# vn.trader模块\nfrom vnpy.event import EventEngine\nfrom vnpy.trader.vtEngine import MainEngine\nfrom vnpy.trader.uiQt import createQApp\nfrom vnpy.trader.uiMainWindow import MainWindow\n\n# 加载底层接口\nfrom vnpy.trader.gateway import futuGateway\n\n# 加载上层应用\nfrom vnpy.trader.app import (riskManager, ctaStrategy, spreadTrading)\n\n\n#----------------------------------------------------------------------\ndef main():\n \"\"\"主程序入口\"\"\"\n # 创建Qt应用对象\n qApp = createQApp()\n \n # 创建事件引擎\n ee = EventEngine()\n \n # 创建主引擎\n me = MainEngine(ee)\n \n # 添加交易接口\n me.addGateway(futuGateway)\n\n # 添加上层应用\n me.addApp(riskManager)\n me.addApp(ctaStrategy)\n me.addApp(spreadTrading)\n \n # 创建主窗口\n mw = MainWindow(me, ee)\n mw.showMaximized()\n \n # 在主线程中启动Qt事件循环\n sys.exit(qApp.exec_())\n\n\nif __name__ == '__main__':\n main()\n", "id": "413573", "language": "Python", "matching_score": 2.1741034984588623, "max_stars_count": 5, "path": "futuquant/examples/vnpy/vnTrader/run.py" }, { "content": "# encoding: UTF-8\n\n\"\"\"\n导入MC导出的CSV历史数据到MongoDB中\n\"\"\"\n\nfrom vnpy.trader.app.ctaStrategy.ctaBase import MINUTE_DB_NAME\nfrom vnpy.trader.app.ctaStrategy.ctaHistoryData import loadMcCsv\n\n\nif __name__ == '__main__':\n loadMcCsv('HK.00700_1min.csv', MINUTE_DB_NAME, 'HK.00700')\n\n", "id": "9919854", "language": "Python", "matching_score": 0.46847087144851685, "max_stars_count": 8, "path": "futuquant/examples/vnpy/CtaBacktesting/loadCsv.py" }, { "content": "# encoding: UTF-8\n\n\"\"\"\n基于King Keltner通道的交易策略,适合用在股指上,\n展示了OCO委托和5分钟K线聚合的方法。\n\n注意事项:\n1. 作者不对交易盈利做任何保证,策略代码仅供参考\n2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装\n3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略\n\"\"\"\n\nfrom __future__ import division\n\nfrom vnpy.trader.vtObject import VtBarData\nfrom vnpy.trader.vtConstant import EMPTY_STRING\nfrom vnpy.trader.app.ctaStrategy.ctaTemplate import (CtaTemplate, \n BarGenerator, \n ArrayManager)\n\n\n########################################################################\nclass KkStrategyTest(CtaTemplate):\n \"\"\"基于King Keltner通道的交易策略\"\"\"\n className = 'KkStrategyTest'\n author = u'用Python的交易员'\n\n # 策略参数\n kkLength = 11 # 计算通道中值的窗口数\n kkDev = 1.6 # 计算通道宽度的偏差\n trailingPrcnt = 0.8 # 移动止损\n initDays = 10 # 初始化数据所用的天数\n fixedSize = 1 # 每次交易的数量\n\n # 策略变量\n kkUp = 0 # KK通道上轨\n kkDown = 0 # KK通道下轨\n intraTradeHigh = 0 # 持仓期内的最高点\n intraTradeLow = 0 # 持仓期内的最低点\n\n buyOrderIDList = [] # OCO委托买入开仓的委托号\n shortOrderIDList = [] # OCO委托卖出开仓的委托号\n orderList = [] # 保存委托代码的列表\n\n # 参数列表,保存了参数的名称\n paramList = ['name',\n 'className',\n 'author',\n 'vtSymbol',\n 'kkLength',\n 'kkDev'] \n\n # 变量列表,保存了变量的名称\n varList = ['inited',\n 'trading',\n 'pos',\n 'kkUp',\n 'kkDown']\n \n # 同步列表,保存了需要保存到数据库的变量名称\n syncList = ['pos',\n 'intraTradeHigh',\n 'intraTradeLow'] \n\n #----------------------------------------------------------------------\n def __init__(self, ctaEngine, setting):\n \"\"\"Constructor\"\"\"\n super(KkStrategyTest, self).__init__(ctaEngine, setting)\n \n self.bg = BarGenerator(self.onBar, 5, self.onFiveBar) # 创建K线合成器对象\n self.am = ArrayManager()\n \n self.buyOrderIDList = []\n self.shortOrderIDList = []\n self.orderList = []\n \n #----------------------------------------------------------------------\n def onInit(self):\n \"\"\"初始化策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略初始化' %self.name)\n \n # 载入历史数据,并采用回放计算的方式初始化策略数值\n initData = self.loadBar(self.initDays)\n for bar in initData:\n self.onBar(bar)\n\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onStart(self):\n \"\"\"启动策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略启动' %self.name)\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onStop(self):\n \"\"\"停止策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略停止' %self.name)\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onTick(self, tick):\n \"\"\"收到行情TICK推送(必须由用户继承实现)\"\"\" \n self.bg.updateTick(tick)\n\n #----------------------------------------------------------------------\n def onBar(self, bar):\n \"\"\"收到Bar推送(必须由用户继承实现)\"\"\"\n self.bg.updateBar(bar)\n \n #----------------------------------------------------------------------\n def onFiveBar(self, bar):\n \"\"\"收到5分钟K线\"\"\"\n # 撤销之前发出的尚未成交的委托(包括限价单和停止单)\n for orderID in self.orderList:\n self.cancelOrder(orderID)\n self.orderList = []\n \n # 保存K线数据\n am = self.am\n am.updateBar(bar)\n if not am.inited:\n return\n \n # 计算指标数值\n self.kkUp, self.kkDown = am.keltner(self.kkLength, self.kkDev)\n \n # 判断是否要进行交易\n \n # 当前无仓位,发送OCO开仓委托\n if self.pos == 0:\n self.intraTradeHigh = bar.high\n self.intraTradeLow = bar.low \n self.sendOcoOrder(self.kkUp, self.kkDown, self.fixedSize)\n \n # 持有多头仓位\n elif self.pos > 0:\n self.intraTradeHigh = max(self.intraTradeHigh, bar.high)\n self.intraTradeLow = bar.low\n \n l = self.sell(self.intraTradeHigh*(1-self.trailingPrcnt/100), \n abs(self.pos), True)\n self.orderList.extend(l)\n \n # 持有空头仓位\n elif self.pos < 0:\n self.intraTradeHigh = bar.high\n self.intraTradeLow = min(self.intraTradeLow, bar.low)\n \n l = self.cover(self.intraTradeLow*(1+self.trailingPrcnt/100), \n abs(self.pos), True)\n self.orderList.extend(l)\n \n # 同步数据到数据库\n self.saveSyncData() \n \n # 发出状态更新事件\n self.putEvent() \n\n #----------------------------------------------------------------------\n def onOrder(self, order):\n \"\"\"收到委托变化推送(必须由用户继承实现)\"\"\"\n pass\n\n #----------------------------------------------------------------------\n def onTrade(self, trade):\n if self.pos != 0:\n # 多头开仓成交后,撤消空头委托\n if self.pos > 0:\n for shortOrderID in self.shortOrderIDList:\n self.cancelOrder(shortOrderID)\n # 反之同样\n elif self.pos < 0:\n for buyOrderID in self.buyOrderIDList:\n self.cancelOrder(buyOrderID)\n \n # 移除委托号\n for orderID in (self.buyOrderIDList + self.shortOrderIDList):\n if orderID in self.orderList:\n self.orderList.remove(orderID)\n \n # 发出状态更新事件\n self.putEvent()\n \n #----------------------------------------------------------------------\n def sendOcoOrder(self, buyPrice, shortPrice, volume):\n \"\"\"\n 发送OCO委托\n \n OCO(One Cancel Other)委托:\n 1. 主要用于实现区间突破入场\n 2. 包含两个方向相反的停止单\n 3. 一个方向的停止单成交后会立即撤消另一个方向的\n \"\"\"\n # 发送双边的停止单委托,并记录委托号\n self.buyOrderIDList = self.buy(buyPrice, volume, True)\n self.shortOrderIDList = self.short(shortPrice, volume, True)\n \n # 将委托号记录到列表中\n self.orderList.extend(self.buyOrderIDList)\n self.orderList.extend(self.shortOrderIDList)\n\n #----------------------------------------------------------------------\n def onStopOrder(self, so):\n \"\"\"停止单推送\"\"\"\n pass", "id": "5022521", "language": "Python", "matching_score": 2.2937893867492676, "max_stars_count": 8, "path": "futuquant/examples/vnpy/CtaTrading/strategyKingKeltnerTest.py" }, { "content": "# encoding: UTF-8\n\n'''\n 南方东英杠反ETF策略,回测数据见\n https://act.futunn.com/south-etf\n'''\nimport talib\nimport time\nfrom futuquant.examples.TinyQuant.TinyStrateBase import *\n\nclass TinyStrateSouthETF(TinyStrateBase):\n \"\"\"策略名称, setting.json中作为该策略配置的key\"\"\"\n name = 'tiny_strate_south_etf'\n\n \"\"\"策略需要用到行情数据的股票池\"\"\"\n symbol_pools = []\n\n def __init__(self):\n super(TinyStrateSouthETF, self).__init__()\n \"\"\"请在setting.json中配置参数\"\"\"\n self.symbol_ref = None\n self.ref_idx = None\n self.cta_call = None\n self.cta_put = None\n\n self.trade_qty = None\n self.trade_price_idx = None\n self._last_dt_process = 0\n\n def on_init_strate(self):\n \"\"\"策略加载完配置\"\"\"\n\n # 添加必要的股票,以便能得到相应的股票行情数据\n self.symbol_pools.append(self.symbol_ref)\n if self.cta_call[\"enable\"]:\n self.symbol_pools.append(self.cta_call[\"symbol\"])\n\n if self.cta_put[\"enable\"]:\n self.symbol_pools.append(self.cta_put[\"symbol\"])\n\n # call put 的持仓量以及持仓天数\n self.cta_call['pos'] = 0\n self.cta_call['days'] = 0\n self.cta_put['pos'] = 0\n self.cta_put['days'] = 0\n\n # call put 一天只操作一次,记录当天是否已经操作过\n self.cta_call['done'] = False\n self.cta_put['done'] = False\n\n # 记录当天操作的订单id\n self.cta_call['order_id'] = ''\n self.cta_put['order_id'] = ''\n\n # 检查参数: 下单的滑点 / 下单的数量\n if self.trade_price_idx < 1 or self.trade_price_idx > 5:\n raise Exception(\"conifg trade_price_idx error!\")\n if self.trade_qty < 0:\n raise Exception(\"conifg trade_qty error!\")\n\n def on_start(self):\n \"\"\"策略启动入口\"\"\"\n # 读取用户现有帐户持仓信息, 数量不超过config中指定的交易数量 'trade_qty'\n for cta in [self.cta_call, self.cta_put]:\n pos = self.get_tiny_position(cta['symbol'])\n if pos is not None:\n valid_pos = pos.position - pos.frozen\n valid_pos = valid_pos if valid_pos > 0 else 0\n valid_pos = self.trade_qty if valid_pos > self.trade_qty else valid_pos\n cta['pos'] = valid_pos\n\n self.log(\"on_start\")\n\n def on_quote_changed(self, tiny_quote):\n \"\"\"报价、摆盘实时数据变化时,会触发该回调\"\"\"\n\n # TinyQuoteData\n if tiny_quote.symbol != self.symbol_ref:\n return\n\n # 减少计算频率,每x秒一次\n dt_now = time.time()\n if dt_now - self._last_dt_process < 2:\n return\n self._last_dt_process = dt_now\n\n # 执行策略\n self._process_cta(self.cta_call)\n self._process_cta(self.cta_put)\n\n def _process_cta(self, cta):\n if not cta['enable'] or cta['done']:\n return\n\n cta_symbol = cta['symbol']\n\n # 是否要卖出\n if cta['pos'] > 0 and cta['days'] >= cta['days_sell']:\n # TO SELL\n price = self._get_splip_sell_price(cta_symbol)\n volume = cta['pos']\n if price > 0:\n ret, data = self.sell(price, volume, cta_symbol)\n # 安全起见,一天只做一次交易操作, 失败也忽略\n cta['done'] = True\n if 0 == ret:\n cta['order_id'] = data\n self.log(\"sell price=%s volume=%s symbol=%s ret=%s , err=%s\" % (price, volume, cta_symbol, ret, (data if 0 != ret else \"\")))\n return\n\n # 计算触发值\n is_call = cta is self.cta_call\n to_buy = False\n if self.ref_idx == 0:\n # 指标参数 0:涨跌幅 1:移动平均线\n quote = self.get_rt_tiny_quote(self.symbol_ref)\n if not quote or quote.preClosePrice <= 0 or quote.lastPrice <= 0:\n return\n if is_call:\n trigger = (quote.lastPrice - quote.preClosePrice)/float(quote.preClosePrice)\n else:\n trigger = (quote.preClosePrice - quote.lastPrice) /float(quote.preClosePrice)\n if trigger >= cta['trigger_per']:\n to_buy = True\n else:\n # 移动平均线\n am = self.get_kl_day_am(self.symbol_ref)\n array_close = am.close\n short = self.ema(array_close, cta['trigger_ema_short'], True)\n long = self.ema(array_close, cta['trigger_ema_long'], True)\n\n if is_call:\n if (short[-2] < long[-2]) and (short[-1] > long[-1]):\n to_buy = True\n else:\n if (short[-2] > long[-2]) and (short[-1] < long[-1]):\n to_buy = True\n\n if to_buy and 0 == cta['pos']: # 空仓才买入\n # TO BUY\n price = self._get_splip_buy_price(cta_symbol)\n volume = self.trade_qty\n if price > 0:\n ret, data = self.buy(price, volume, cta_symbol)\n # 安全起见,一天只做一次交易操作, 失败也忽略\n cta['done'] = True\n if 0 == ret:\n cta['order_id'] = data\n self.log(\"buy price=%s volume=%s symbol=%s ret=%s , err=%s\" %(price, volume, cta_symbol, ret, (data if 0!=ret else \"\")))\n\n def on_bar_min1(self, tiny_bar):\n \"\"\"每一分钟触发一次回调\"\"\"\n bar = tiny_bar\n str_dt = bar.datetime.strftime(\"%Y%m%d %H:%M:%S\")\n str_log = \"on_bar_min1 symbol=%s dt=%s open=%s high=%s close=%s low=%s vol=%s\" % (\n bar.symbol, str_dt, bar.open, bar.high, bar.close, bar.low, bar.volume)\n self.log(str_log)\n\n def on_bar_day(self, tiny_bar):\n \"\"\"收盘时会触发一次日k回调\"\"\"\n pass\n\n def on_before_trading(self, date_time):\n \"\"\"开盘的时候检查,如果有持仓,就把持有天数 + 1\"\"\"\n\n if self.cta_call['pos'] > 0:\n self.cta_call['days'] += 1\n if self.cta_put['pos'] > 0:\n self.cta_put['days'] += 1\n\n self.cta_call['done'] = False\n self.cta_put['done'] = False\n\n def on_after_trading(self, date_time):\n \"\"\"收盘的时候更新持仓信息\"\"\"\n\n self._update_cta_pos(self.cta_call)\n self._update_cta_pos(self.cta_put)\n\n def ema(self, np_array, n, array=False):\n \"\"\"移动均线\"\"\"\n if n < 2:\n result = np_array\n else:\n result = talib.EMA(np_array, n)\n if array:\n return result\n return result[-1]\n\n def _get_splip_buy_price(self, symbol):\n quote = self.get_rt_tiny_quote(symbol)\n if quote is None:\n return 0\n index = self.trade_price_idx\n return quote.__dict__['askPrice%s' % index]\n\n def _get_splip_sell_price(self, symbol):\n quote = self.get_rt_tiny_quote(symbol)\n if quote is None:\n return 0\n index = self.trade_price_idx\n return quote.__dict__['bidPrice%s' % index]\n\n def _update_cta_pos(self, cta):\n order_id = cta['order_id']\n if not order_id:\n return\n\n for x in range(3):\n ret, data = self.get_tiny_trade_order(order_id)\n if 0 != ret:\n continue\n if data.direction == TRADE_DIRECT_BUY:\n cta['pos'] = data.trade_volume\n cta['days'] = 0\n cta['order_id'] = ''\n elif data.direction == TRADE_DIRECT_SELL:\n cta['pos'] -= data.trade_volume\n # 如果全部卖出, 将days置为0, 否则第二天继续卖\n if cta['pos'] <= 0:\n cta['days'] = 0\n cta['order_id'] = ''\n else:\n raise Exception(\"_update_cta_pos error!\")\n break\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "id": "5470836", "language": "Python", "matching_score": 5.464410305023193, "max_stars_count": 5, "path": "futuquant/examples/app/tq_south_etf/TinyStrateSouthETF.py" }, { "content": "# encoding: UTF-8\n\n'''\n 实盘策略范例,接口用法见注释及范例代码\n'''\nimport talib\nfrom TinyStrateBase import *\n\nclass TinyStrateSample(TinyStrateBase):\n \"\"\"策略名称, setting.json中作为该策略配置的key\"\"\"\n name = 'tiny_strate_sample'\n\n \"\"\"策略需要用到行情数据的股票池\"\"\"\n symbol_pools = ['HK.00700', 'HK.00001']\n\n def __init__(self):\n super(TinyStrateSample, self).__init__()\n\n \"\"\"请在setting.json中配置参数\"\"\"\n self.param1 = None\n self.param2 = None\n\n def on_init_strate(self):\n \"\"\"策略加载完配置\"\"\"\n pass\n\n def on_start(self):\n \"\"\"策略启动入口\"\"\"\n self.log(\"on_start param1=%s param2=%s\" %(self.param1, self.param2))\n\n \"\"\"交易接口测试\n ret, data = self.buy(4.60, 1000, 'HK.03883')\n if 0 == ret:\n order_id = data\n ret, data = self.get_tiny_trade_order(order_id)\n if 0 == ret:\n str_info = ''\n for key in data.__dict__.keys():\n str_info += \"%s='%s' \" % (key, data.__dict__[key])\n print str_info\n\n ret, data = self.sell(11.4, 1000, 'HK.01357')\n if 0 == ret:\n order_id = data\n self.cancel_order(order_id)\n \"\"\"\n\n def on_quote_changed(self, tiny_quote):\n \"\"\"报价、摆盘实时数据变化时,会触发该回调\"\"\"\n # TinyQuoteData\n data = tiny_quote\n symbol = data.symbol\n str_dt = data.datetime.strftime(\"%Y%m%d %H:%M:%S\")\n\n # 得到日k数据的ArrayManager(vnpy)对象\n am = self.get_kl_day_am(data.symbol)\n array_high = am.high\n array_low = am.low\n array_open = am.open\n array_close = am.close\n array_vol = am.volume\n\n n = 5\n ma_high = self.sma(array_high, n)\n ma_low = self.sma(array_low, n)\n ma_open = self.sma(array_open, n)\n ma_close = self.sma(array_close, n)\n ma_vol = self.sma(array_vol, n)\n\n str_log = \"on_quote_changed symbol=%s dt=%s sma(%s) open=%s high=%s close=%s low=%s vol=%s\" % (\n symbol, str_dt, n, ma_open, ma_high, ma_close, ma_low, ma_vol)\n self.log(str_log)\n\n def on_bar_min1(self, tiny_bar):\n \"\"\"每一分钟触发一次回调\"\"\"\n bar = tiny_bar\n symbol = bar.symbol\n str_dt = bar.datetime.strftime(\"%Y%m%d %H:%M:%S\")\n\n # 得到分k数据的ArrayManager(vnpy)对象\n am = self.get_kl_min1_am(symbol)\n array_high = am.high\n array_low = am.low\n array_open = am.open\n array_close = am.close\n array_vol = am.volume\n\n n = 5\n ma_high = self.ema(array_high, n)\n ma_low = self.ema(array_low, n)\n ma_open = self.ema(array_open, n)\n ma_close = self.ema(array_close, n)\n ma_vol = self.ema(array_vol, n)\n\n str_log = \"on_bar_min1 symbol=%s dt=%s ema(%s) open=%s high=%s close=%s low=%s vol=%s\" % (\n symbol, str_dt, n, ma_open, ma_high, ma_close, ma_low, ma_vol)\n self.log(str_log)\n\n def on_bar_day(self, tiny_bar):\n \"\"\"收盘时会触发一次日k回调\"\"\"\n bar = tiny_bar\n symbol = bar.symbol\n str_dt = bar.datetime.strftime(\"%Y%m%d %H:%M:%S\")\n str_log = \"on_bar_day symbol=%s dt=%s open=%s high=%s close=%s low=%s vol=%s\" % (\n symbol, str_dt, bar.open, bar.high, bar.close, bar.low, bar.volume)\n self.log(str_log)\n\n def on_before_trading(self, date_time):\n \"\"\"开盘时触发一次回调, 港股是09:30:00\"\"\"\n str_log = \"on_before_trading - %s\" % date_time.strftime('%Y-%m-%d %H:%M:%S')\n self.log(str_log)\n\n def on_after_trading(self, date_time):\n \"\"\"收盘时触发一次回调, 港股是16:00:00\"\"\"\n str_log = \"on_after_trading - %s\" % date_time.strftime('%Y-%m-%d %H:%M:%S')\n self.log(str_log)\n\n def sma(self, np_array, n, array=False):\n \"\"\"简单均线\"\"\"\n if n < 2:\n result = np_array\n else:\n result = talib.SMA(np_array, n)\n if array:\n return result\n return result[-1]\n\n def ema(self, np_array, n, array=False):\n \"\"\"移动均线\"\"\"\n if n < 2:\n result = np_array\n else:\n result = talib.EMA(np_array, n)\n if array:\n return result\n return result[-1]\n\n", "id": "12554563", "language": "Python", "matching_score": 2.6939001083374023, "max_stars_count": 8, "path": "futuquant/examples/TinyQuant/TinyStrateSample.py" }, { "content": "# encoding: UTF-8\n\n'''\n 实盘策略范例,接口用法见注释及范例代码\n'''\nimport talib\nfrom futuquant.examples.TinyQuant.TinyStrateBase import *\nfrom futuquant.examples.TinyQuant.TinyQuantFrame import *\nfrom futuquant.quote.open_quote_context import *\nfrom futuquant.trade.open_trade_context import *\nimport datetime\n\nclass TinyBreakRegion(TinyStrateBase):\n \"\"\"策略名称, setting.json中作为该策略配置的key\"\"\"\n name = 'tiny_break_region_sample'\n\n \"\"\"策略需要用到行情数据的股票池\"\"\"\n symbol_pools = ['HK.00700']\n pwd_unlock = '<PASSWORD>'\n\n def __init__(self):\n super(TinyBreakRegion, self).__init__()\n \"\"\"请在setting.json中配置参数\"\"\"\n #self.money = None\n #self.chicang = None\n #self.up = 0\n #self.down = 0\n self.before_minute_price = 0\n\n\n\n def on_init_strate(self):\n \"\"\"策略加载完配置后的回调\n 1. 可修改symbol_pools 或策略内部其它变量的初始化\n 2. 此时还不能调用futu api的接口\n \"\"\"\n\n def on_start(self):\n\n pass\n\n def on_quote_changed(self, tiny_quote):\n pass\n\n def on_bar_min1(self, tiny_bar):\n \"\"\"每一分钟触发一次回调\"\"\"\n bar = tiny_bar\n symbol = bar.symbol\n price = bar.open\n\n up, down = self.track(symbol)\n now = datetime.datetime.now()\n work_time = now.replace(hour=9, minute=30, second=0)\n if now == work_time:\n self.before_minute_price = price\n return\n if self.before_minute_price == 0:\n self.before_minute_price = price\n return\n if self.before_minute_price < up and price > up:\n self.do_trade(symbol, price, \"buy\")\n elif self.before_minute_price > down and price < down:\n self.do_trade(symbol, price, \"sell\")\n self.before_minute_price = price\n\n\n\n def on_bar_day(self, tiny_bar):\n \"\"\"收盘时会触发一次日k回调\"\"\"\n pass\n\n\n\n\n def on_before_trading(self, date_time):\n \"\"\"开盘时触发一次回调, 脚本挂机切换交易日时,港股会在09:30:00回调\"\"\"\n pass\n\n def on_after_trading(self, date_time):\n \"\"\"收盘时触发一次回调, 脚本挂机时,港股会在16:00:00回调\"\"\"\n str_log = \"on_after_trading - %s\" % date_time.strftime('%Y-%m-%d %H:%M:%S')\n self.log(str_log)\n\n\n def track(self, symbol):\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11122)\n now = datetime.datetime.now()\n end_str = now.strftime('%Y-%m-%d')\n start = now - datetime.timedelta(days=365)\n start_str = start.strftime('%Y-%m-%d')\n _, temp = quote_ctx.get_history_kline(symbol, start=start_str, end=end_str)\n #print(temp)\n high = temp['high'].values\n low = temp['low'].values\n open = temp['open'].values\n \"\"\"确定上下轨\"\"\"\n '''\n y_amplitude = []\n for i in range(len(high)):\n temp = high[i] - low[i]\n y_amplitude.append(temp)\n '''\n y_amplitude = high - low\n print(y_amplitude)\n y_a_r = y_amplitude[-1] / open[-2]\n if y_a_r > 0.05:\n up_ = open[-1] + 0.5 * y_amplitude[-1]\n down_ = open[-1] - 0.5 * y_amplitude[-1]\n else:\n up_ = open[-1] + 2 / 3 * y_amplitude[-1]\n down_ = open[-1] - 2 / 3 * y_amplitude[-1]\n #print(up_, down_)\n return up_, down_\n\n def test(self):\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11122)\n print(quote_ctx.unsubscribe(['HK.00700'], [SubType.QUOTE]))\n print(quote_ctx.get_rt_data('HK.00700'))\n\n quote_ctx.close()\n\n def do_trade(self, symbol, price, trd_side):\n # 获取账户信息\n trd_ctx = OpenHKTradeContext(host='172.24.31.139', port=11111)\n trd_ctx.unlock_trade(self.pwd_unlock)\n result, accinfo = trd_ctx.accinfo_query()\n if result != 0:\n return\n accinfo_cash = accinfo.cash.values[0]\n accinfo_market_val = accinfo.market_val.values[0]\n\n\n if trd_side == 'buy':\n qty = int(accinfo_cash / price)\n trd_ctx.place_order(price=price, qty=qty, code=symbol, trd_side=TrdSide.BUY)\n elif trd_side == 'sell':\n qty = int(accinfo_market_val / price)\n trd_ctx.place_order(price=price, qty=qty, code=symbol, trd_side=TrdSide.SELL)\n\n trd_ctx.close()\n\n\nif __name__ == '__main__':\n my_strate = TinyBreakRegion()\n #my_strate.test()\n frame = TinyQuantFrame(my_strate)\n frame.run()\n", "id": "6129508", "language": "Python", "matching_score": 6.439749240875244, "max_stars_count": 5, "path": "futuquant/examples/app/tq_break_region/TinyBreakRegion.py" }, { "content": "# encoding: UTF-8\n\n'''\n双均线策略,通过建立m天移动平均线,n天移动平均线,则两条均线必有交点。若m>n,n天平均线“上穿越”m天均线则为买入点,反之为卖出点。\n该策略基于不同天数均线的交叉点,抓住股票的强势和弱势时刻,进行交易。\n'''\n\nimport talib\nfrom futuquant.examples.TinyQuant.TinyStrateBase import *\nfrom futuquant.examples.TinyQuant.TinyQuantFrame import *\nfrom futuquant.quote.open_quote_context import *\nfrom futuquant.trade.open_trade_context import *\nimport datetime\n\nclass TinyStrateMeanLine(TinyStrateBase):\n name = 'tiny_strate_mean_line'\n symbol_pools = ['HK.00700']\n pwd_unlock = '<PASSWORD>'\n\n def __init__(self):\n super(TinyStrateMeanLine, self).__init__()\n \"\"\"请在setting.json中配置参数\"\"\"\n self.param1 = None\n self.param2 = None\n \"\"\"0: 空仓 1:满仓\"\"\"\n self.flag = 0\n\n def on_init_strate(self):\n \"\"\"策略加载完配置\"\"\"\n pass\n\n def on_quote_changed(self, tiny_quote):\n \"\"\"报价、摆盘实时数据变化时,会触发该回调\"\"\"\n pass\n\n def on_bar_min1(self, tiny_quote):\n \"\"\"每一分钟触发一次回调\"\"\"\n \"\"\"收盘前五分钟,调用\"\"\"\n data = tiny_quote\n symbol = data.symbol\n price = data.open\n #print(price)\n\n now = datetime.datetime.now()\n work_time = now.replace(hour=15, minute=55, second=0)\n\n if now >= work_time:\n ma_20 = self.get_sma(20, symbol)\n ma_60 = self.get_sma(60, symbol)\n if ma_20 >= ma_60 and self.flag==0:\n #金叉买入\n self.do_trade(symbol, price, \"buy\")\n self.flag = 1\n elif ma_20 < ma_60 and self.flag==1:\n #死叉卖出\n self.do_trade(symbol, price, \"sell\")\n self.flag = 0\n\n\n def on_bar_day(self, tiny_quote):\n \"\"\"收盘时会触发一次日k数据推送\"\"\"\n pass\n\n def on_before_trading(self, date_time):\n \"\"\"开盘时触发一次回调, 港股是09:30:00\"\"\"\n pass\n\n def on_after_trading(self, date_time):\n \"\"\"收盘时触发一次回调, 港股是16:00:00\"\"\"\n pass\n\n def get_sma(self, n, symbol):\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11122)\n now = datetime.datetime.now()\n end_str = now.strftime('%Y-%m-%d')\n start = now - datetime.timedelta(days=365)\n start_str = start.strftime('%Y-%m-%d')\n temp = quote_ctx.get_history_kline(symbol, start=start_str, end=end_str)\n temp_data = temp[1]['close']\n result = talib.EMA(temp_data, n)\n quote_ctx.close()\n return result.values[-1]\n\n def do_trade(self, symbol, price, trd_side):\n # 获取账户信息\n trd_ctx = OpenHKTradeContext(host='172.24.31.139', port=11111)\n trd_ctx.unlock_trade(self.pwd_unlock)\n result, accinfo = trd_ctx.accinfo_query()\n if result != 0:\n return\n accinfo_cash = accinfo.cash.values[0]\n accinfo_market_val = accinfo.market_val.values[0]\n\n\n if trd_side == 'buy':\n qty = int(accinfo_cash / price)\n trd_ctx.place_order(price=price, qty=qty, code=symbol, trd_side=TrdSide.BUY)\n elif trd_side == 'sell':\n qty = int(accinfo_market_val / price)\n trd_ctx.place_order(price=price, qty=qty, code=symbol, trd_side=TrdSide.SELL)\n\n trd_ctx.close()\n\n\n def test(self):\n pwd_unlock = '<PASSWORD>' #输入交易密码\n trade_ctx = OpenHKTradeContext(host='127.0.0.1', port=11122)\n _, lock_message = trade_ctx.unlock_trade(pwd_unlock)\n print(lock_message)\n _ , accinfo = trade_ctx.accinfo_query()\n print(trade_ctx.accinfo_query(trd_env='SIMULATE'))\n accinfo_cash = accinfo.cash.values[0]\n accinfo_market_val = accinfo.market_val.values[0]\n print(accinfo_cash, accinfo_market_val)\n trade_ctx.close()\n\nif __name__ == '__main__':\n my_strate = TinyStrateMeanLine()\n #my_strate.test()\n frame = TinyQuantFrame(my_strate)\n frame.run()\n", "id": "6870992", "language": "Python", "matching_score": 2.1169657707214355, "max_stars_count": 5, "path": "futuquant/examples/app/tq_mean_line/TinyStrateMeanLine.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n 跟踪止损:跟踪止损是一种更高级的条件单,需要指定如下参数,以便制造出移动止损价\n 跟踪止损的详细介绍:https://www.futu5.com/faq/topic214\n\"\"\"\n\nimport os\nimport sys\nfrom math import floor\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom time import sleep\n\nsys.path.append(os.path.split(os.path.abspath(os.path.pardir))[0])\n\nimport futuquant as ft\nfrom emailplugin import EmailNotification\nfrom stocksell import simple_sell, smart_sell\n\n\nclass TrailingMethod(object):\n DROP_ABS = \"DROP_ABS\" # 绝对值降低\n DROP_PER = \"DROP_PER\" # 百分比降低\n\n\nclass SellMethod(object):\n SIMPLE_SELL = \"SIMPLE_SELL\"\n SMART_SELL = \"SMART_SELL\"\n\n\nclass TrailingStopHandler(ft.StockQuoteHandlerBase):\n \"\"\"\"跟踪止损数据回调类\"\"\"\n\n def __init__(self, quote_ctx, is_hk_trade, method, drop):\n super(ft.StockQuoteHandlerBase, self).__init__()\n self.quote_ctx = quote_ctx\n self.is_hk_trade = is_hk_trade\n self.method = method\n self.drop = drop\n self.finished = False\n self.stop = None\n self.price_lst = []\n self.stop_lst = []\n self.time_lst = []\n\n if self.method not in [TrailingMethod.DROP_ABS, TrailingMethod.DROP_PER]:\n raise Exception(\"trailing method is error!\")\n\n def on_recv_rsp(self, rsp_str):\n \"\"\"数据接收回调函数\"\"\"\n ret, content = super(TrailingStopHandler, self).on_recv_rsp(rsp_str)\n if ret != ft.RET_OK:\n print('StockQuote error {}'.format(content))\n return ret, content\n if self.finished:\n return ret, content\n ret, data = self.quote_ctx.get_global_state()\n if ret != ft.RET_OK:\n print('获取全局状态失败')\n trading = False\n else:\n hk_trading = (data['market_hk'] == ft.MarketState.MORNING or data['market_hk'] == ft.MarketState.AFTERNOON)\n us_trading = (data['market_us'] == ft.MarketState.MORNING)\n trading = hk_trading if self.is_hk_trade else us_trading\n\n if not trading:\n print('不处在交易时间段')\n return ft.RET_OK, content\n last_price = content.iloc[0]['last_price']\n\n if self.stop is None:\n self.stop = last_price - self.drop if self.method == TrailingMethod.DROP_ABS else last_price * (1 - self.drop)\n elif (self.stop + self.drop < last_price) if self.method == TrailingMethod.DROP_ABS else (self.stop < last_price * (1 - self.drop)):\n self.stop = last_price - self.drop if self.method == TrailingMethod.DROP_ABS else last_price * (1 - self.drop)\n elif self.stop >= last_price:\n # 交易己被触发\n self.finished = True\n print('交易被触发')\n\n self.price_lst.append(last_price)\n self.stop_lst.append(self.stop)\n print('last_price is {}, stop is {}'.format(last_price, self.stop))\n\n return ft.RET_OK, content\n\n\ndef trailing_stop(api_svr_ip='127.0.0.1', api_svr_port=11111, unlock_password=\"\", code='<PASSWORD>',\n trade_env=ft.TrdEnv.SIMULATE, method=TrailingMethod.DROP_ABS, drop=1.0, volume=100,\n how_to_sell=SellMethod.SMART_SELL, diff=0, rest_time=2,\n enable_email_notification=False, receiver=''):\n \"\"\"\n 止损策略函数\n :param api_svr_ip: (string)ip\n :param api_svr_port: (int)port\n :param unlock_password: (string)交易解锁密码, 必需修改! 模拟交易设为一个非空字符串即可\n :param code: (string)股票\n :param trade_env: ft.TrdEnv.REAL: 真实交易 ft.TrdEnv.SIMULATE: 模拟交易\n :param method: method == TrailingMethod.DROP_ABS: 股票下跌drop价格就会止损 railingMethod.DROP_PER: 股票下跌drop的百分比就会止损\n :param drop: method == TrailingMethod.DROP_ABS, 股票下跌的价格 method == TrailingMethod.DROP_PER,股票下跌的百分比,0.01表示下跌1%则止损\n :param volume: 需要卖掉的股票数量\n :param how_to_sell: 以何种方式卖出股票, SellMethod 类型\n :param diff: 默认为0,当how_to_sell为SellMethod.DROP_ABS时,以(市价-diff)的价格卖出\n :param rest_time: 每隔REST_TIME秒,会检查订单状态, 需要>=2\n :param enable_email_notification: 激活email功能\n :param receiver: 邮件接收者\n \"\"\"\n EmailNotification.set_enable(enable_email_notification)\n\n if how_to_sell not in [SellMethod.SIMPLE_SELL, SellMethod.SMART_SELL]:\n raise Exception('how_to_sell value error')\n\n if method not in [TrailingMethod.DROP_ABS, TrailingMethod.DROP_PER]:\n raise Exception('method value error')\n\n quote_ctx = ft.OpenQuoteContext(host=api_svr_ip, port=api_svr_port)\n is_hk_trade = 'HK.' in code\n if is_hk_trade:\n trade_ctx = ft.OpenHKTradeContext(host=api_svr_ip, port=api_svr_port)\n else:\n trade_ctx = ft.OpenUSTradeContext(host=api_svr_ip, port=api_svr_port)\n\n if unlock_password == \"\":\n raise Exception('请先配置交易密码')\n\n ret, data = trade_ctx.unlock_trade(unlock_password)\n if ret != ft.RET_OK:\n raise Exception('解锁交易失败')\n\n ret, data = trade_ctx.position_list_query(trd_env=trd_env)\n if ret != ft.RET_OK:\n raise Exception(\"无法获取持仓列表\")\n\n try:\n qty = data[data['code'] == code].iloc[0]['qty']\n except:\n raise Exception('你没有持仓!无法买卖')\n\n qty = int(qty)\n if volume == 0:\n volume = qty\n if volume < 0:\n raise Exception('volume lower than 0')\n elif qty < volume:\n raise Exception('持仓不足')\n\n ret, data = quote_ctx.get_market_snapshot(code)\n if ret != ft.RET_OK:\n raise Exception('获取lot size失败')\n lot_size = data.iloc[0]['lot_size']\n\n if volume % lot_size != 0:\n raise Exception('volume 必须是{}的整数倍'.format(lot_size))\n\n ret, data = quote_ctx.subscribe(code, ft.SubType.QUOTE)\n if ret != ft.RET_OK:\n raise Exception('订阅QUOTE错误: error {}:{}'.format(ret, data))\n\n ret, data = quote_ctx.subscribe(code, ft.SubType.ORDER_BOOK)\n if ret != ft.RET_OK:\n print('error {}:{}'.format(ret, data))\n raise Exception('订阅order book失败: error {}:{}'.format(ret, data))\n\n if diff:\n if is_hk_trade:\n ret, data = quote_ctx.get_order_book(code)\n if ret != ft.RET_OK:\n raise Exception('获取order book失败: cannot get order book'.format(data))\n\n min_diff = round(abs(data['Bid'][0][0] - data['Bid'][1][0]), 3)\n if floor(diff / min_diff) * min_diff != diff:\n raise Exception('diff 应是{}的整数倍'.format(min_diff))\n else:\n if round(diff, 2) != diff:\n raise Exception('美股价差保留2位小数{}->{}'.format(diff, round(diff, 2)))\n\n if method == TrailingMethod.DROP_ABS:\n if is_hk_trade:\n if floor(drop / min_diff) * min_diff != drop:\n raise Exception('drop必须是{}的整数倍'.format(min_diff))\n else:\n if round(drop, 2) != drop:\n raise Exception('drop必须保留2位小数{}->{}'.format(drop, round(drop, 2)))\n\n elif method == TrailingMethod.DROP_PER:\n if drop < 0 or drop > 1:\n raise Exception('drop must in [0, 1] if method is DROP_PER')\n\n trailing_stop_handler = TrailingStopHandler(quote_ctx, is_hk_trade, method, drop)\n quote_ctx.set_handler(trailing_stop_handler)\n quote_ctx.start()\n while True:\n if trailing_stop_handler.finished:\n # sell the stock\n qty = volume\n sell_price = trailing_stop_handler.stop\n while qty > 0:\n if how_to_sell == SellMethod.SIMPLE_SELL:\n data = simple_sell(quote_ctx, trade_ctx, code, sell_price - diff, qty, trade_env, ft.OrderType.SPECIAL_LIMIT)\n else:\n data = smart_sell(quote_ctx, trade_ctx, code, qty, trade_env, ft.OrderType.SPECIAL_LIMIT)\n if data is None:\n print('下单失败')\n EmailNotification.send_email(receiver, '下单失败', '股票代码{},数量{}'.format(code, volume))\n sleep(rest_time)\n continue\n\n order_id = data.iloc[0]['order_id']\n sleep(rest_time)\n\n while True:\n ret, data = trade_ctx.order_list_query(order_id=order_id, trd_env=trade_env)\n if ret != ft.RET_OK:\n sleep(rest_time)\n continue\n\n status = data.iloc[0]['order_status']\n dealt_qty = int(data.iloc[0]['dealt_qty'])\n order_price = data.iloc[0]['price']\n qty -= dealt_qty\n\n if status == ft.OrderStatus.FILLED_ALL:\n print('全部成交:股票代码{}, 成交总数{},价格{}'.format(code, dealt_qty, order_price))\n EmailNotification.send_email(receiver, '全部成交', '股票代码{},成交总数{},价格{}'\n .format(code, dealt_qty, order_price))\n break\n elif status == ft.OrderStatus.FILLED_PART:\n print('部分成交:股票代码{},成交总数{},价格{}'.format(code, dealt_qty, order_price))\n EmailNotification.send_email(receiver, '部分成交', '股票代码{},成交总数{},价格{}'\n .format(code, dealt_qty, order_price))\n break\n elif status == ft.OrderStatus.FAILED or status == ft.OrderStatus.SUBMIT_FAILED or \\\n status == ft.OrderStatus.CANCELLED_ALL or status == ft.OrderStatus.DELETED:\n break\n else:\n trade_ctx.modify_order(ft.ModifyOrderOp.CANCEL, order_id, 0, 0)\n sleep(rest_time)\n continue\n\n if how_to_sell == SellMethod.SIMPLE_SELL:\n ret, data = quote_ctx.get_order_book(code)\n if ret != ft.RET_OK:\n raise Exception('获取order_book失败')\n sell_price = data['Bid'][0][0]\n\n # draw price and stop\n price_lst = trailing_stop_handler.price_lst\n plt.plot(np.arange(len(price_lst)), price_lst)\n stop_list = trailing_stop_handler.stop_lst\n plt.plot(np.arange(len(stop_list)), stop_list)\n break\n\n quote_ctx.close()\n trade_ctx.close()\n\n\nif __name__ == '__main__':\n # 全局参数配置\n ip = '127.0.0.1'\n port = 11111\n unlock_pwd = \"<PASSWORD>\"\n code = 'HK.00123' # 'US.BABA' #'HK.00700'\n trd_env = ft.TrdEnv.SIMULATE\n\n trailing_method = TrailingMethod.DROP_PER\n trailing_drop = 0.03 # 3%\n vol = 0\n how_to_sell = SellMethod.SMART_SELL\n diff = 0\n rest_time = 2 # 每隔REST_TIME秒,会检查订单状态, 需要>=2\n\n # 邮件通知参数\n enable_email = True\n receiver_email = 'your receive email'\n\n trailing_stop(ip, port, unlock_pwd, code, trd_env, trailing_method, trailing_drop, vol,\n how_to_sell, diff, rest_time, enable_email, receiver_email)\n\n", "id": "7486054", "language": "Python", "matching_score": 4.274054050445557, "max_stars_count": 5, "path": "futuquant/examples/learn/newtrailingstop.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n验证接口:下单然后立即撤单, 为避免成交损失,买单价格港股放在十档,美股为一档下降10%, 买单数量为1手(美股为1股)\n\"\"\"\nimport os\nimport sys\nfrom time import sleep\n\nsys.path.append(os.path.split(os.path.abspath(os.path.pardir))[0])\n\nimport futuquant as ft\n\n\n'''\n 验证接口:下单然后立即撤单, 为避免成交损失,买单价格港股放在十档,美股为一档下降10%, 买单数量为1手(美股为1股)\n 使用请先配置正确参数:\n api_svr_ip: (string)ip\n api_svr_port: (string)ip\n unlock_password: (string)交易解锁密码, 必需修改!!!\n test_code: (string)股票 'HK.xxxxx' or 'US.xxxx'\n trade_env: ft.TrdEnv.SIMULATE or ft.TrdEnv.REAL\n'''\n\n\ndef make_order_and_cancel(api_svr_ip, api_svr_port, unlock_password, test_code, trade_env, acc_id):\n \"\"\"\n 使用请先配置正确参数:\n :param api_svr_ip: (string) ip\n :param api_svr_port: (string) ip\n :param unlock_password: (string) 交易解锁密码, 必需修改!\n :param test_code: (string) 股票\n :param trade_env: 参见 ft.TrdEnv的定义\n :param acc_id: 交易子账号id\n \"\"\"\n if unlock_password == \"\":\n raise Exception(\"请先配置交易解锁密码!\")\n\n quote_ctx = ft.OpenQuoteContext(host=api_svr_ip, port=api_svr_port) # 创建行情api\n quote_ctx.subscribe(test_code, ft.SubType.ORDER_BOOK) # 定阅摆盘\n\n # 创建交易api\n is_hk_trade = 'HK.' in test_code\n if is_hk_trade:\n trade_ctx = ft.OpenHKTradeContext(host=api_svr_ip, port=api_svr_port)\n else:\n trade_ctx = ft.OpenUSTradeContext(host=api_svr_ip, port=api_svr_port)\n\n # 每手股数\n lot_size = 0\n is_unlock_trade = False\n is_fire_trade = False\n while not is_fire_trade:\n sleep(2)\n # 解锁交易\n if not is_unlock_trade and trade_env == ft.TrdEnv.REAL:\n print(\"unlocking trade...\")\n ret_code, ret_data = trade_ctx.unlock_trade(unlock_password)\n is_unlock_trade = (ret_code == ft.RET_OK)\n if not is_unlock_trade:\n print(\"请求交易解锁失败:{}\".format(ret_data))\n break\n\n if lot_size == 0:\n print(\"get lotsize...\")\n ret, data = quote_ctx.get_market_snapshot(test_code)\n lot_size = data.iloc[0]['lot_size'] if ret == ft.RET_OK else 0\n if ret != ft.RET_OK:\n print(\"取不到每手信息,重试中: {}\".format(data))\n continue\n elif lot_size <= 0:\n raise BaseException(\"该股票每手信息错误,可能不支持交易 code ={}\".format(test_code))\n\n print(\"get order book...\")\n ret, data = quote_ctx.get_order_book(test_code) # 得到第十档数据\n if ret != ft.RET_OK:\n continue\n\n # 计算交易价格\n bid_order_arr = data['Bid']\n if is_hk_trade:\n if len(bid_order_arr) != 10:\n continue\n # 港股下单: 价格定为第十档\n price, _, _ = bid_order_arr[9]\n else:\n if len(bid_order_arr) == 0:\n continue\n # 美股下单: 价格定为一档降10%\n price, _, _ = bid_order_arr[0]\n price = round(price * 0.9, 2)\n\n qty = lot_size\n\n # 价格和数量判断\n if qty == 0 or price == 0.0:\n continue\n\n # 下单\n order_id = 0\n print(\"place order : price={} qty={} code={}\".format(price, qty, test_code))\n ret_code, ret_data = trade_ctx.place_order(price=price, qty=qty, code=test_code, trd_side=ft.TrdSide.BUY,\n order_type=ft.OrderType.NORMAL, trd_env=trade_env, acc_id=acc_id)\n is_fire_trade = True\n print('下单ret={} data={}'.format(ret_code, ret_data))\n if ret_code == ft.RET_OK:\n row = ret_data.iloc[0]\n order_id = row['order_id']\n\n # 循环撤单\n sleep(2)\n\n\n if order_id:\n while True:\n ret_code, ret_data = trade_ctx.order_list_query(order_id=order_id, status_filter_list=[], code='',\n start='', end='', trd_env=trade_env, acc_id=acc_id)\n\n if ret_code != ft.RET_OK:\n sleep(2)\n continue\n order_status = ret_data.iloc[0]['order_status']\n if order_status in [ft.OrderStatus.SUBMIT_FAILED, ft.OrderStatus.TIMEOUT, ft.OrderStatus.FILLED_ALL,\n ft.OrderStatus.FAILED, ft.OrderStatus.DELETED]:\n break\n\n print(\"cancel order...\")\n ret_code, ret_data = trade_ctx.modify_order(modify_order_op=ft.ModifyOrderOp.CANCEL, order_id=order_id,\n price=price, qty=qty, adjust_limit=0, trd_env=trade_env, acc_id=acc_id)\n print(\"撤单ret={} data={}\".format(ret_code, ret_data))\n if ret_code == ft.RET_OK:\n break\n else:\n sleep(2)\n # destroy object\n quote_ctx.close()\n trade_ctx.close()\n\n\nif __name__ == \"__main__\":\n ip = '127.0.0.1'\n port = 11111\n unlock_pwd = \"<PASSWORD>\" # 交易密码\n code = 'HK.00123' # 'US.BABA' 'HK.00700'\n trd_env = ft.TrdEnv.SIMULATE # 交易环境:真实或模拟\n acc_id = 0 # get_acc_list可查询交易子账号列表, 默认传0取列表中的第1个\n\n make_order_and_cancel(ip, port, unlock_pwd, code, trd_env, acc_id)\n", "id": "4828627", "language": "Python", "matching_score": 3.1598362922668457, "max_stars_count": 5, "path": "futuquant/examples/learn/make_order_and_cancel.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nExamples for use the python functions: get push data\n\"\"\"\n\nfrom futuquant import *\nfrom time import sleep\n\n\nclass StockQuoteTest(StockQuoteHandlerBase):\n \"\"\"\n 获得报价推送数据\n \"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(StockQuoteTest, self).parse_rsp_pb(rsp_pb)\n if ret_code != RET_OK:\n logger.debug(\"StockQuoteTest: error, msg: %s\" % content)\n return RET_ERROR, content\n print(\"* StockQuoteTest : %s\" % content)\n return RET_OK, content\n\n\nclass CurKlineTest(CurKlineHandlerBase):\n \"\"\" kline push\"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(CurKlineTest, self).parse_rsp_pb(rsp_pb)\n if ret_code == RET_OK:\n print(\"* CurKlineTest : %s\\n\" % content)\n return RET_OK, content\n\n\nclass RTDataTest(RTDataHandlerBase):\n \"\"\" 获取分时推送数据 \"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(RTDataTest, self).parse_rsp_pb(rsp_pb)\n if ret_code != RET_OK:\n print(\"* RTDataTest: error, msg: %s\" % content)\n return RET_ERROR, content\n print(\"* RTDataTest :%s \\n\" % content)\n return RET_OK, content\n\n\nclass TickerTest(TickerHandlerBase):\n \"\"\" 获取逐笔推送数据 \"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(TickerTest, self).parse_rsp_pb(rsp_pb)\n if ret_code != RET_OK:\n print(\"* TickerTest: error, msg: %s\" % content)\n return RET_ERROR, content\n print(\"* TickerTest\\n\", content)\n return RET_OK, content\n\n\nclass OrderBookTest(OrderBookHandlerBase):\n \"\"\" 获得摆盘推送数据 \"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(OrderBookTest, self).parse_rsp_pb(rsp_pb)\n if ret_code != RET_OK:\n print(\"* OrderBookTest: error, msg: %s\" % content)\n return RET_ERROR, content\n print(\"* OrderBookTest\\n\", content)\n return RET_OK, content\n\n\nclass BrokerTest(BrokerHandlerBase):\n \"\"\" 获取经纪队列推送数据 \"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(BrokerTest, self).parse_rsp_pb(rsp_pb)\n if ret_code != RET_OK:\n print(\"* BrokerTest: error, msg: %s \" % content)\n return RET_ERROR, content\n\n stock_code, bid_content, ask_content = content\n print(\"* BrokerTest code \\n\", stock_code)\n print(\"* BrokerTest bid \\n\", bid_content)\n print(\"* BrokerTest ask \\n\", ask_content)\n return RET_OK, content\n\n\nclass SysNotifyTest(SysNotifyHandlerBase):\n \"\"\"sys notify\"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"receive response callback function\"\"\"\n ret_code, content = super(SysNotifyTest, self).parse_rsp_pb(rsp_pb)\n\n if ret_code == RET_OK:\n main_type, sub_type, msg = content\n print(\"* SysNotify main_type='{}' sub_type='{}' msg='{}'\\n\".format(main_type, sub_type, msg))\n else:\n print(\"* SysNotify error:{}\\n\".format(content))\n return ret_code, content\n\n\nclass TradeOrderTest(TradeOrderHandlerBase):\n \"\"\" order update push\"\"\"\n def on_recv_rsp(self, rsp_pb):\n ret, content = super(TradeOrderTest, self).on_recv_rsp(rsp_pb)\n\n if ret == RET_OK:\n print(\"* TradeOrderTest content={}\\n\".format(content))\n\n return ret, content\n\n\nclass TradeDealTest(TradeDealHandlerBase):\n \"\"\" order update push\"\"\"\n def on_recv_rsp(self, rsp_pb):\n ret, content = super(TradeDealTest, self).on_recv_rsp(rsp_pb)\n\n if ret == RET_OK:\n print(\"TradeDealTest content={}\".format(content))\n\n return ret, content\n\n\ndef quote_test():\n '''\n 行情接口调用测试\n :return:\n '''\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n\n # 设置异步回调接口\n quote_ctx.set_handler(StockQuoteTest())\n quote_ctx.set_handler(CurKlineTest())\n quote_ctx.set_handler(RTDataTest())\n quote_ctx.set_handler(TickerTest())\n quote_ctx.set_handler(OrderBookTest())\n quote_ctx.set_handler(BrokerTest())\n quote_ctx.set_handler(SysNotifyTest())\n quote_ctx.start()\n\n print(\"* get_global_state : {}\\n\".format(quote_ctx.get_global_state()))\n\n # 获取推送数据\n big_sub_codes = ['HK.02318', 'HK.02828', 'HK.00939', 'HK.01093', 'HK.01299', 'HK.00175',\n 'HK.01299', 'HK.01833', 'HK.00005', 'HK.00883', 'HK.00388', 'HK.01398',\n 'HK.01114', 'HK.02800', 'HK.02018', 'HK.03988', 'HK.00386', 'HK.01211',\n 'HK.00857', 'HK.01177', 'HK.02601', 'HK.02628', 'HK_FUTURE.999010']\n big_sub_codes = []\n subtype_list = [SubType.QUOTE, SubType.ORDER_BOOK, SubType.TICKER, SubType.K_DAY, SubType.RT_DATA, SubType.BROKER]\n\n code_list = ['HK.00700', 'HK.00388']\n\n print(\"* get_owner_plate : {}\\n\".format(quote_ctx.get_owner_plate(code_list)))\n print(\"* get_referencestock_list : {}\\n\".format(quote_ctx.get_referencestock_list(\n code_list[0], SecurityReferenceType.WARRANT)))\n print(\"* get_holding_change_list : {}\\n\".format(quote_ctx.get_holding_change_list(\n \"US.AAPL\", StockHolder.EXECUTIVE, \"2018-01-01\", None)))\n\n print(\"* request_history_kline : {}\\n\".format(quote_ctx.request_history_kline(\n code_list[0], \"2018-01-01\", None, KLType.K_1M, AuType.QFQ, [KL_FIELD.ALL], 50000)))\n\n # 测试大量数据定阅\n if len(big_sub_codes):\n print(\"* subscribe : {}\\n\".format(quote_ctx.subscribe(big_sub_codes, subtype_list)))\n\n \"\"\"\n if True:\n print(\"* subscribe : {}\\n\".format(quote_ctx.subscribe(code_list, subtype_list)))\n print(\"* query_subscription : {}\\n\".format(quote_ctx.query_subscription(True)))\n sleep(60.1)\n print(\"* unsubscribe : {}\\n\".format(quote_ctx.unsubscribe(code_list, subtype_list)))\n print(\"* query_subscription : {}\\n\".format(quote_ctx.query_subscription(True)))\n sleep(1)\n \"\"\"\n print(\"* subscribe : {}\\n\".format(quote_ctx.subscribe(code_list, subtype_list)))\n\n # \"\"\"\n print(\"* get_stock_basicinfo : {}\\n\".format(quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.ETF)))\n print(\"* get_cur_kline : {}\\n\".format(quote_ctx.get_cur_kline(code_list[0], 10, SubType.K_DAY, AuType.QFQ)))\n\n print(\"* get_rt_data : {}\\n\".format(quote_ctx.get_rt_data(code_list[0])))\n print(\"* get_rt_ticker : {}\\n\".format(quote_ctx.get_rt_ticker(code_list[0], 10)))\n\n print(\"* get_broker_queue : {}\\n\".format(quote_ctx.get_broker_queue(code_list[0])))\n print(\"* get_order_book : {}\\n\".format(quote_ctx.get_order_book(code_list[0])))\n print(\"* get_history_kline : {}\\n\".format(quote_ctx.get_history_kline('HK.00700', start='2017-06-20', end='2017-06-22')))\n # \"\"\"\n\n # \"\"\"\n print(\"* get_multi_points_history_kline : {}\\n\".format(quote_ctx.get_multi_points_history_kline(code_list, ['2017-06-20', '2017-06-22', '2017-06-23'], KL_FIELD.ALL,\n KLType.K_DAY, AuType.QFQ)))\n print(\"* get_autype_list : {}\\n\".format(quote_ctx.get_autype_list(\"HK.00700\")))\n\n print(\"* get_trading_days : {}\\n\".format(quote_ctx.get_trading_days(Market.HK, '2018-11-01', '2018-11-20')))\n\n print(\"* get_market_snapshot : {}\\n\".format(quote_ctx.get_market_snapshot('HK.21901')))\n print(\"* get_market_snapshot : {}\\n\".format(quote_ctx.get_market_snapshot(code_list)))\n\n print(\"* get_plate_list : {}\\n\".format(quote_ctx.get_plate_list(Market.HK, Plate.ALL)))\n print(\"* get_plate_stock : {}\\n\".format(quote_ctx.get_plate_stock('HK.BK1001')))\n # \"\"\"\n\n # \"\"\"\n sleep(10)\n quote_ctx.close()\n # \"\"\"\n\n\ndef trade_hkcc_test():\n \"\"\"\n A股通交易测试\n :return:\n \"\"\"\n trd_ctx = OpenHKCCTradeContext(host='127.0.0.1', port=11111)\n trd_ctx.set_handler(TradeOrderTest())\n trd_ctx.set_handler(TradeDealTest())\n trd_ctx.start()\n\n # 交易请求必须先解锁 !!!\n pwd_unlock = '<PASSWORD>'\n print(\"* unlock_trade : {}\\n\".format(trd_ctx.unlock_trade(pwd_unlock)))\n\n print(\"* accinfo_query : {}\\n\".format(trd_ctx.accinfo_query()))\n print(\"* position_list_query : {}\\n\".format(trd_ctx.position_list_query(pl_ratio_min=-50, pl_ratio_max=50)))\n print(\"* order_list_query : {}\\n\".format(trd_ctx.order_list_query(status_filter_list=[OrderStatus.DISABLED])))\n print(\"* get_acc_list : {}\\n\".format(trd_ctx.get_acc_list()))\n print(\"* order_list_query : {}\\n\".format(trd_ctx.order_list_query(status_filter_list=[OrderStatus.SUBMITTED])))\n\n ret_code, ret_data = trd_ctx.place_order(0.1, 100, \"SZ.000979\", TrdSide.BUY)\n print(\"* place_order : {}\\n\".format(ret_data))\n if ret_code == RET_OK:\n order_id = ret_data['order_id'][0]\n print(\"* modify_order : {}\\n\".format(trd_ctx.modify_order(ModifyOrderOp.CANCEL, order_id, 0, 0)))\n\n print(\"* deal_list_query : {}\\n\".format(trd_ctx.deal_list_query(code=\"000979\")))\n print(\"* history_order_list_query : {}\\n\".format(trd_ctx.history_order_list_query(status_filter_list=[OrderStatus.FILLED_ALL, OrderStatus.FILLED_PART],\n code=\"512310\", start=\"\", end=\"2018-2-1\")))\n\n print(\"* history_deal_list_query : {}\\n\".format(trd_ctx.history_deal_list_query(code=\"\", start=\"\", end=\"2018-6-1\")))\n\n sleep(10)\n trd_ctx.close()\n\n\ndef trade_hk_test():\n '''\n 港股交易测试\n :return:\n '''\n trd_ctx = OpenHKTradeContext(host='127.0.0.1', port=11111)\n trd_ctx.set_handler(TradeOrderTest())\n trd_ctx.set_handler(TradeDealTest())\n trd_ctx.start()\n\n # 交易请求必须先解锁 !!!\n pwd_unlock = '<PASSWORD>'\n print(\"* unlock_trade : {}\\n\".format(trd_ctx.unlock_trade(pwd_unlock)))\n\n # \"\"\"\n print(\"* accinfo_query : {}\\n\".format(trd_ctx.accinfo_query()))\n print(\"* position_list_query : {}\\n\".format(trd_ctx.position_list_query(pl_ratio_min=-50, pl_ratio_max=50)))\n print(\"* order_list_query : {}\\n\".format(trd_ctx.order_list_query(status_filter_list=[OrderStatus.DISABLED])))\n print(\"* get_acc_list : {}\\n\".format(trd_ctx.get_acc_list()))\n print(\"* order_list_query : {}\\n\".format(trd_ctx.order_list_query(status_filter_list=[OrderStatus.SUBMITTED])))\n\n ret_code, ret_data = trd_ctx.place_order(700.0, 100, \"HK.00700\", TrdSide.SELL)\n print(\"* place_order : {}\\n\".format(ret_data))\n if ret_code == RET_OK:\n order_id = ret_data['order_id'][0]\n print(\"* modify_order : {}\\n\".format(trd_ctx.modify_order(ModifyOrderOp.CANCEL, order_id, 0, 0)))\n\n print(\"* deal_list_query : {}\\n\".format(trd_ctx.deal_list_query(code=\"00700\")))\n print(\"* history_order_list_query : {}\\n\".format(trd_ctx.history_order_list_query(status_filter_list=[OrderStatus.FILLED_ALL, OrderStatus.FILLED_PART],\n code=\"00700\", start=\"\", end=\"2018-2-1\")))\n\n print(\"* history_deal_list_query : {}\\n\".format(trd_ctx.history_deal_list_query(code=\"\", start=\"\", end=\"2018-6-1\")))\n # \"\"\"\n\n sleep(10)\n trd_ctx.close()\n\n\nif __name__ ==\"__main__\":\n '''\n 默认rsa密钥在futuquant.common下的conn_key.txt\n 注意同步配置FutuOpenD的FTGateway.xml中的 rsa_private_key 字段\n '''\n # SysConfig.set_init_rsa_file()\n\n ''' 是否启用协议加密 '''\n # SysConfig.enable_proto_encrypt(False)\n\n '''设置通讯协议格式 '''\n # SysConfig.set_proto_fmt(ProtoFMT.Json)\n\n '''设置client信息'''\n # SysConfig.set_client_info('sample', 0)\n\n ''' 行情api测试 '''\n quote_test()\n\n ''' 交易api测试 '''\n # trade_hk_test()\n\n # trade_hkcc_test()\n\n\n\n", "id": "8632404", "language": "Python", "matching_score": 5.56673002243042, "max_stars_count": 5, "path": "futuquant/examples/learn/check_all_get_push.py" }, { "content": "#-*-coding:utf-8-*-\n\nimport futuquant\nfrom futuquant.quote.quote_response_handler import *\nfrom futuquant.common.constant import *\nfrom logUtil import Logs\nimport time\n\n\n# port=11118 285706压测\nclass QoutationAsynPush(object):\n '''\n gateway负载测试:订阅3大市场各类股票的实时行情数据,求最大可订阅股票个数\n '''\n timestamp = (int)(time.time())\n dir = 'QoutationAsynPush_'+str(timestamp)\n\n def __init__(self):\n # 加密通道\n # SysConfig.enable_proto_encrypt(True)\n # SysConfig.enable_proto_encrypt(True)\n\n self.quote_ctx = quote_ctx = futuquant.OpenQuoteContext(host='127.0.0.1',port=11111)\n self.quote_ctx.start()\n\n\n def allStockQoutation(self):\n '''\n 订阅多只股票的行情数据\n :return:\n '''\n logger = Logs().getNewLogger('allStockQoutation', QoutationAsynPush.dir)\n markets= [Market.HK,Market.US,Market.SH,Market.SZ] #,Market.HK_FUTURE,Market.US_OPTION\n stockTypes = [SecurityType.STOCK,SecurityType.WARRANT,SecurityType.IDX,SecurityType.BOND,SecurityType.ETF]\n\n for stockType in stockTypes:\n for market in markets:\n ret_code_stock_basicinfo ,ret_data_stock_basicinfo = self.quote_ctx.get_stock_basicinfo(market,stockType)\n codes = ret_data_stock_basicinfo['code'].tolist()\n codes_len = len(codes)\n code_sub = 0\n code_sub_succ = 0\n for code in codes:\n ret_code = self.aStockQoutation(code)\n code_sub += 1\n if ret_code is RET_OK:\n code_sub_succ += 1\n logger.info('市场 = %s,股票类型 = %s, 股票总数 = %d, 已发起订阅 = %d,订阅成功 = %d' % (market, stockType, codes_len, code_sub,code_sub_succ)) # 记录\n logger.info('end-------->市场 = %s,股票类型 = %s, 股票总数 = %d, 已发起订阅 = %d,订阅成功 = %d' % ( market, stockType, codes_len, code_sub,code_sub_succ)) # 记录\n\n time.sleep(5)\n self.quote_ctx.stop()\n self.quote_ctx.close()\n\n def aStockQoutation(self,code):\n '''\n 订阅一只股票的实时行情数据,接收推送\n :param code: 股票代码\n :return:\n '''\n\n #设置监听-->订阅-->调用接口\n\n # 分时\n self.quote_ctx.set_handler(RTDataTest())\n self.quote_ctx.subscribe(code, SubType.RT_DATA)\n ret_code_rt_data, ret_data_rt_data = self.quote_ctx.get_rt_data(code)\n # 逐笔\n self.quote_ctx.set_handler(TickerTest())\n self.quote_ctx.subscribe(code, SubType.TICKER)\n ret_code_rt_ticker, ret_data_rt_ticker = self.quote_ctx.get_rt_ticker(code)\n # 报价\n self.quote_ctx.set_handler(StockQuoteTest())\n self.quote_ctx.subscribe(code, SubType.QUOTE)\n ret_code_stock_quote, ret_data_stock_quote = self.quote_ctx.get_stock_quote([code])\n # 实时K线\n self.quote_ctx.set_handler(CurKlineTest())\n kTypes = [SubType.K_1M, SubType.K_5M, SubType.K_15M, SubType.K_30M, SubType.K_60M, SubType.K_DAY,\n SubType.K_WEEK, SubType.K_MON]\n auTypes = [AuType.NONE, AuType.QFQ, AuType.HFQ]\n num = 10\n ret_code_cur_kline = RET_OK\n for kType in kTypes:\n self.quote_ctx.subscribe(code, kType)\n for auType in auTypes:\n ret_code_cur_kline_temp, ret_data_cur_kline = self.quote_ctx.get_cur_kline(code, num, kType, auType)\n if ret_code_cur_kline_temp is RET_ERROR:\n ret_code_cur_kline = RET_ERROR\n # 摆盘\n self.quote_ctx.set_handler(OrderBookTest())\n self.quote_ctx.subscribe(code, SubType.ORDER_BOOK)\n ret_code_order_book, ret_data_order_book = self.quote_ctx.get_order_book(code)\n # 经纪队列\n self.quote_ctx.set_handler(BrokerTest())\n self.quote_ctx.subscribe(code, SubType.BROKER)\n ret_code_broker_queue, bid_frame_table, ask_frame_table = self.quote_ctx.get_broker_queue(code)\n\n return ret_code_rt_data+ret_code_rt_ticker+ret_code_stock_quote+ret_code_cur_kline+ret_code_order_book+ret_code_broker_queue\n\n\nclass BrokerTest(BrokerHandlerBase):\n logger = Logs().getNewLogger('BrokerTest', QoutationAsynPush.dir)\n def on_recv_rsp(self, rsp_pb):\n ret_code,stock_code,ret_data = super(BrokerTest, self).on_recv_rsp(rsp_pb)\n #打印日志\n BrokerTest.logger.info('BrokerTest')\n BrokerTest.logger.info(stock_code)\n BrokerTest.logger.info(ret_code)\n BrokerTest.logger.info(ret_data)\n\n return RET_OK,ret_data\n\nclass CurKlineTest(CurKlineHandlerBase):\n '''获取实时K线 get_cur_kline 和 CurKlineHandlerBase'''\n logger = Logs().getNewLogger('CurKlineTest', QoutationAsynPush.dir)\n def on_recv_rsp(self, rsp_pb):\n ret_code, ret_data = super(CurKlineTest, self).on_recv_rsp(rsp_pb)\n # 打印,记录日志\n CurKlineTest.logger.info('CurKlineTest')\n CurKlineTest.logger.info(ret_code)\n CurKlineTest.logger.info(ret_data)\n return RET_OK, ret_data\n\nclass OrderBookTest(OrderBookHandlerBase):\n logger = Logs().getNewLogger('OrderBookTest', QoutationAsynPush.dir)\n\n def on_recv_rsp(self, rsp_pb):\n ret_code ,ret_data = super(OrderBookTest, self).on_recv_rsp(rsp_pb)\n #打印\n OrderBookTest.logger.info('OrderBookTest')\n OrderBookTest.logger.info(ret_code)\n OrderBookTest.logger.info(ret_data)\n return RET_OK, ret_data\n\nclass RTDataTest(RTDataHandlerBase):\n logger = Logs().getNewLogger('RTDataTest', QoutationAsynPush.dir)\n def on_recv_rsp(self, rsp_pb):\n ret_code,ret_data = super(RTDataTest, self).on_recv_rsp(rsp_pb)\n #打印信息\n RTDataTest.logger.info('RTDataTest')\n RTDataTest.logger.info(ret_code)\n RTDataTest.logger.info(ret_data)\n\n return RET_OK,ret_data\n\nclass TickerTest(TickerHandlerBase):\n '''获取逐笔 get_rt_ticker 和 TickerHandlerBase'''\n logger = Logs().getNewLogger('TickerTest', QoutationAsynPush.dir)\n def on_recv_rsp(self, rsp_pb):\n ret_code, ret_data = super(TickerTest, self).on_recv_rsp(rsp_pb)\n # 打印,记录日志\n TickerTest.logger.info('TickerTest')\n TickerTest.logger.info(ret_code)\n TickerTest.logger.info(ret_data)\n return RET_OK, ret_data\n\nclass StockQuoteTest(StockQuoteHandlerBase):\n # 获取报价get_stock_quote和StockQuoteHandlerBase\n logger = Logs().getNewLogger('StockQuoteTest', QoutationAsynPush.dir)\n def on_recv_rsp(self, rsp_str):\n ret_code, ret_data = super(StockQuoteTest,self).on_recv_rsp(rsp_str) # 基类的on_recv_rsp方法解包返回了报价信息,格式与get_stock_quote一样\n #打印,记录日志\n StockQuoteTest.logger.info('StockQuoteTest')\n StockQuoteTest.logger.info(ret_code)\n StockQuoteTest.logger.info(ret_data)\n return RET_OK, ret_data\n\nif __name__ =='__main__':\n ta = QoutationAsynPush()\n ta.allStockQoutation()\n\n", "id": "12378364", "language": "Python", "matching_score": 3.238499402999878, "max_stars_count": 5, "path": "futuquant/examples/learn/max_sub.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nExamples for use the python functions: get push data\n\"\"\"\n\nfrom futuquant import *\nfrom time import sleep\nfrom futuquant.common.ft_logger import logger\n\n\nclass TickerTest(TickerHandlerBase):\n \"\"\" 获取逐笔推送数据 \"\"\"\n def on_recv_rsp(self, rsp_pb):\n \"\"\"数据响应回调函数\"\"\"\n ret_code, content = super(TickerTest, self).on_recv_rsp(rsp_pb)\n if ret_code != RET_OK:\n print(\"* TickerTest: error, msg: %s\" % content)\n return RET_ERROR, content\n\n dt_cur = datetime.now()\n for ix, item in content.iterrows():\n time = item['time']\n\n dt_tick = datetime.strptime(time, \"%Y-%m-%d %H:%M:%S\")\n delay_sec = (dt_cur.minute * 60 + dt_cur.second) - (dt_tick.minute * 60 + dt_tick.second)\n if delay_sec > 15:\n logger.critical(\"* Ticker cirtical :{}\".format(item))\n elif delay_sec > 5:\n logger.error(\"* Ticker error :{}\".format(item))\n\n return RET_OK, content\n\n\n\ndef main_thread_do_something(quote_ctx):\n code = 'HK.00700'\n last_time = time.time()\n data = None\n vol = 0\n last_seq = 0\n while True:\n if data is None or time.time() - last_time > 10:\n last_time = time.time()\n ret, data = quote_ctx.get_rt_ticker(code)\n if ret != RET_OK:\n data = None\n logger.debug(\"total vol:{}\".format(vol))\n\n sleep(0.1)\n if data is None:\n continue\n for ix, row in data.iterrows():\n seq = row['sequence']\n if seq > last_seq:\n vol += row['volume']\n last_seq = seq\n\ndef quote_test_tick():\n quote_ctx = OpenQuoteContext(host='192.168.127.12', port=12111)\n\n # 设置异步回调接口\n quote_ctx.set_handler(TickerTest())\n quote_ctx.start()\n\n code_list = ['HK.00700']\n ret, data = quote_ctx.get_stock_basicinfo(Market.HK, SecurityType.STOCK)\n if ret != RET_OK:\n exit(0)\n codes = list(data['code'])\n max_len = len(data)\n\n # 定阅的最大数量\n sub_len = 2000\n if sub_len > max_len:\n sub_len = max_len\n import random\n random.seed(time.time())\n while True:\n if len(code_list) >= sub_len:\n break\n rnd_idx = random.randint(0, len(codes) - 1)\n code_tmp = codes[rnd_idx]\n if code_tmp not in code_list:\n code_list.append(code_tmp)\n\n print(quote_ctx.subscribe(code_list, SubType.TICKER))\n\n main_thread_do_something(quote_ctx)\n\n quote_ctx.close()\n\n\n\nfrom multiprocessing import Process, Queue, Manager\n\ndef f(val, q, ns, share_list):\n q.put([42, None, 'hello'])\n q.put({'code': '123',\n 'vol': 1,\n 'price': 3.2,\n })\n ns.xx = 200\n ns.yy = '3f'\n val = 1000\n\n share_list.append(200)\n\nif __name__ == '__main__':\n q = Queue()\n mg = Manager()\n ns = mg.Namespace()\n\n share_list = mg.list()\n share_list.append(100)\n ns.xx = 100\n ns.yy = 'abc'\n val = 100\n p = Process(target=f, args=(val, q, ns, share_list))\n p.start()\n sleep(3)\n\n while not q.empty():\n print(q.get()) # prints \"[42, None, 'hello']\"\n\n print(ns)\n print(share_list)\n\n share_list.remove(100)\n print(share_list)\n print(val)\n\n p.join()\n\n # quote_test_tick()\n\n\n\n\n", "id": "3122785", "language": "Python", "matching_score": 1.1267019510269165, "max_stars_count": 5, "path": "futuquant/examples/learn/BUG_ticker_delay.py" }, { "content": "import errno\nimport datetime\nimport threading\nfrom time import sleep\nfrom futu.common.utils import *\nfrom futu.quote.quote_query import parse_head\nfrom .err import Err\nfrom .sys_config import SysConfig\nfrom .ft_logger import *\nimport enum\n\n\nif IS_PY2:\n import selectors2 as selectors\n import Queue as queue\nelse:\n import queue\n import selectors\n\n\nclass ConnStatus:\n Start = 0\n Connecting = 1\n Connected = 2\n Closed = 3\n\n\nclass ConnectErr(enum.Enum):\n Ok = 0\n Fail = 1\n Timeout = 2\n\n\nclass CloseReason(enum.Enum):\n Close = 0\n RemoteClose = 1\n ReadFail = 2\n SendFail = 3\n ConnectFail = 4\n\n\nclass PacketErr(enum.Enum):\n Ok = 0\n Timeout = 1\n Invalid = 2\n Disconnect = 3\n SendFail = 4\n\n\nclass SyncReqRspInfo:\n def __init__(self):\n self.event = threading.Event()\n self.ret = RET_OK\n self.msg = ''\n self.data = None\n\n\nclass ConnectInfo:\n def __init__(self, is_sync):\n self.start_time = None\n self.event = None\n self.conn_id = 0\n self.err = ConnectErr.Ok\n self.msg = ''\n if is_sync:\n self.event = threading.Event()\n\n @property\n def is_sync(self):\n return self.event is not None\n\n def set_result(self, err, msg):\n self.err = err\n self.msg = msg\n if self.event is not None:\n self.event.set()\n\n def wait(self):\n if self.event is not None:\n self.event.wait()\n\n\nclass SendInfo:\n def __init__(self, is_sync):\n self.send_time = None\n self.proto_id = 0\n self.serial_no = 0\n self.header_dict = None\n self.err = PacketErr.Ok\n self.msg = ''\n self.event = None\n self.rsp = None\n if is_sync:\n self.event = threading.Event()\n\n @property\n def is_sync(self):\n return self.event is not None\n\n def set_result(self, err, msg, rsp):\n self.err = err\n self.msg = msg\n self.rsp = rsp\n if self.event is not None:\n self.event.set()\n\n def wait(self):\n if self.event is not None:\n self.event.wait()\n\n\nclass Connection:\n def __init__(self, conn_id, sock, addr, handler, is_encrypt, is_sync):\n self._conn_id = conn_id\n self.opend_conn_id = 0\n self.sock = sock\n self.is_encrypt = is_encrypt\n self.handler = handler\n self._peer_addr = addr\n self.status = ConnStatus.Start\n self.keep_alive_interval = 10\n self.last_keep_alive_time = datetime.now()\n self.timeout = None\n self.readbuf = bytearray()\n self.writebuf = bytearray()\n self.req_dict = {} # ProtoInfo -> req time\n self.sync_req_dict = {} # ProtoInfo -> SyncReqRspInfo\n self.connect_info = ConnectInfo(is_sync)\n self.connect_info.conn_id = conn_id\n self.send_info_dict = {} # ProtoInfo -> SendInfo\n\n @property\n def conn_id(self):\n return self._conn_id\n\n @property\n def peer_addr(self):\n return self._peer_addr\n\n def fileno(self):\n return self.sock.fileno\n\n\ndef is_socket_exception_wouldblock(e):\n has_errno = False\n if IS_PY2:\n if isinstance(e, IOError):\n has_errno = True\n else:\n if isinstance(e, OSError):\n has_errno = True\n\n if has_errno:\n if e.errno == errno.EWOULDBLOCK or e.errno == errno.EAGAIN or e.errno == errno.EINPROGRESS:\n return True\n return False\n\n\ndef make_ctrl_socks():\n LOCAL_HOST = '127.0.0.1'\n if IS_PY2:\n svr_sock = []\n lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n def svr_sock_func():\n try:\n sock, _ = lsock.accept()\n svr_sock.append(sock)\n except Exception as e:\n logger.warning('Ctrl sock fail: {}'.format(str(e)))\n try:\n lsock.bind((LOCAL_HOST, 0))\n _, port = lsock.getsockname()[:2]\n lsock.listen(1)\n thread = threading.Thread(target=svr_sock_func)\n thread.start()\n client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_sock.settimeout(0.1)\n client_sock.connect((LOCAL_HOST, port))\n thread.join()\n return svr_sock[0], client_sock\n except Exception as e:\n logger.warning('Ctrl sock fail: {}'.format(str(e)))\n return None, None\n finally:\n lsock.close()\n else:\n return socket.socketpair()\n\n\nclass NetManager:\n _default_inst = None\n _default_inst_lock = threading.Lock()\n\n @classmethod\n def default(cls):\n with cls._default_inst_lock:\n if cls._default_inst is None:\n cls._default_inst = NetManager()\n return cls._default_inst\n\n def __init__(self):\n self._use_count = 0\n self._next_conn_id = 1\n self._lock = threading.RLock()\n self._mgr_lock = threading.Lock() # Used to control start and stop\n self._pending_read_conns = set()\n self._create_all()\n\n def _close_all(self):\n for sel_key in list(self._selector.get_map().values()):\n self._selector.unregister(sel_key.fileobj)\n sel_key.fileobj.close()\n self._selector.close()\n self._selector = None\n if self._r_sock:\n self._r_sock.close()\n self._r_sock = None\n if self._w_sock:\n self._w_sock.close()\n self._w_sock = None\n\n def _create_all(self):\n self._selector = selectors.DefaultSelector()\n self._req_queue = queue.Queue()\n self._sync_req_timeout = 12\n self._thread = None\n now = datetime.now()\n self._last_activate_time = now\n self._last_check_req_time = now\n self._r_sock, self._w_sock = make_ctrl_socks()\n self._selector.register(self._r_sock, selectors.EVENT_READ)\n\n def connect(self, addr, handler, timeout, is_encrypt, is_sync):\n with self._lock:\n conn_id = self._next_conn_id\n self._next_conn_id += 1\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024 * 1024)\n sock.setblocking(False)\n conn = Connection(conn_id, sock, addr, handler, is_encrypt, is_sync)\n conn.status = ConnStatus.Connecting\n conn.timeout = timeout\n\n def work():\n conn.connect_info.start_time = datetime.now()\n self._selector.register(sock, selectors.EVENT_WRITE, conn)\n try:\n sock.connect(addr)\n except Exception as e:\n if not is_socket_exception_wouldblock(e):\n self._do_close(conn.conn_id, CloseReason.ConnectFail, str(e), False)\n conn.connect_info.set_result(ConnectErr.Fail, str(e))\n if not conn.connect_info.is_sync:\n conn.handler.on_connect(conn_id, ConnectErr.Fail, str(e))\n\n self._req_queue.put(work)\n self._w_sock.send(b'1')\n\n return RET_OK, conn.connect_info\n\n def poll(self):\n now = datetime.now()\n events = self._selector.select(0.05)\n for key, evt_mask in events:\n if key.fileobj == self._r_sock:\n self._r_sock.recv(1024)\n while True:\n try:\n work = self._req_queue.get(block=False)\n work()\n except queue.Empty:\n break\n continue\n conn = key.data\n if evt_mask & selectors.EVENT_WRITE != 0:\n self._on_write(conn)\n\n if evt_mask & selectors.EVENT_READ != 0:\n self._pending_read_conns.discard(conn)\n self._on_read(conn)\n\n pending_read_conns = list(self._pending_read_conns)\n self._pending_read_conns.clear()\n for conn in pending_read_conns:\n self._on_read(conn)\n\n activate_elapsed_time = now - self._last_activate_time\n check_req_elapsed_time = now - self._last_check_req_time\n is_activate = activate_elapsed_time.total_seconds() >= 0.05\n is_check_req = check_req_elapsed_time.total_seconds() >= 0.1\n\n if is_activate or is_check_req:\n for key in list(self._selector.get_map().values()):\n if key.fileobj == self._r_sock:\n continue\n conn = key.data\n if conn.status == ConnStatus.Connecting:\n if is_activate:\n self._check_connect_timeout(conn, now)\n elif conn.status == ConnStatus.Connected:\n if is_activate:\n conn.handler.on_tick(conn.conn_id, now)\n if is_check_req:\n self._check_req(conn, now)\n\n if is_activate:\n self._last_activate_time = now\n if is_check_req:\n self._last_check_req_time = now\n\n def _check_connect_timeout(self, conn, now):\n time_delta = now - conn.connect_info.start_time\n if conn.timeout is not None and conn.timeout > 0 and time_delta.total_seconds() >= conn.timeout:\n self._on_connect_timeout(conn)\n\n def _check_req(self, conn, now):\n \"\"\"\n\n :param conn:\n :type conn: Connection\n :param now:\n :type now: datetime\n :return:\n \"\"\"\n req_dict = dict(conn.send_info_dict.items())\n for proto_info, send_info in req_dict.items(): # type: ProtoInfo, SendInfo\n elapsed_time = now - send_info.send_time\n if elapsed_time.total_seconds() >= self._sync_req_timeout:\n self._on_packet(conn, send_info.header_dict, PacketErr.Timeout, '', None)\n\n def _thread_func(self):\n while True:\n with self._lock:\n if not self.is_alive():\n self._thread = None\n break\n self.poll()\n\n def start(self):\n \"\"\"\n Should be called from main thread\n :return:\n \"\"\"\n with self._mgr_lock:\n with self._lock:\n self._use_count += 1\n\n if self._thread is None:\n self._create_all()\n self._thread = threading.Thread(target=self._thread_func)\n self._thread.setDaemon(SysConfig.get_all_thread_daemon())\n self._thread.start()\n\n def stop(self):\n with self._mgr_lock:\n with self._lock:\n self._use_count = max(self._use_count - 1, 0)\n\n def is_alive(self):\n with self._lock:\n return self._use_count > 0\n\n def do_send(self, conn_id, send_info: SendInfo, data):\n logger.debug2(FTLog.ONLY_FILE, 'Send: conn_id={}; proto_id={}; serial_no={}; total_len={};'.format(conn_id,\n send_info.proto_id,\n send_info.serial_no,\n len(data)))\n now = datetime.now()\n ret_code = RET_OK\n msg = ''\n conn = self._get_conn(conn_id) # type: Connection\n if not conn:\n logger.debug(\n FTLog.make_log_msg('Send fail', conn_id=conn_id, proto_id=send_info.proto_id, serial_no=send_info.serial_no,\n msg=Err.ConnectionLost.text))\n ret_code, msg = RET_ERROR, Err.ConnectionLost.text\n elif conn.status != ConnStatus.Connected:\n ret_code, msg = RET_ERROR, Err.NotConnected.text\n\n if ret_code != RET_OK:\n logger.warning(FTLog.make_log_msg('Send fail', proto_id=send_info.proto_id, serial_no=send_info.serial_no,\n conn_id=conn_id, msg=msg))\n send_info.set_result(PacketErr.SendFail, msg, None)\n return\n\n proto_info = ProtoInfo(send_info.proto_id, send_info.serial_no)\n conn.send_info_dict[proto_info] = send_info\n size = 0\n try:\n if len(conn.writebuf) > 0:\n conn.writebuf.extend(data)\n else:\n size = conn.sock.send(data)\n except Exception as e:\n if is_socket_exception_wouldblock(e):\n pass\n else:\n ret_code, msg = RET_ERROR, str(e)\n\n if 0 < size < len(data):\n conn.writebuf.extend(data[size:])\n self._watch_write(conn, True)\n\n if ret_code != RET_OK:\n logger.warning(FTLog.make_log_msg('Send error', conn_id=conn_id, msg=msg))\n send_info.set_result(PacketErr.SendFail, msg, None)\n self._do_close(conn.conn_id, CloseReason.SendFail, msg, True)\n\n return RET_OK, ''\n\n def send(self, conn_id, data, is_sync=False):\n \"\"\"\n\n :param conn_id:\n :param data:\n :return:\n \"\"\"\n header = self._parse_req_head(data)\n send_info = SendInfo(is_sync)\n send_info.proto_id = header['proto_id']\n send_info.serial_no = header['serial_no']\n send_info.header_dict = header\n\n def work():\n send_info.send_time = datetime.now()\n self.do_send(conn_id, send_info, data)\n\n self._req_queue.put(work)\n self._w_sock.send(b'1')\n return RET_OK, send_info\n\n def close(self, conn_id):\n def work():\n self._do_close(conn_id, CloseReason.Close, '', True)\n\n self._req_queue.put(work)\n self._w_sock.send(b'1')\n\n def _do_close(self, conn_id, reason, msg, notify):\n conn = self._get_conn(conn_id) # type: Connection\n if not conn:\n return\n if conn.sock is None:\n return\n self._selector.unregister(conn.sock)\n conn.sock.close()\n conn.sock = None\n conn.status = ConnStatus.Closed\n send_info: SendInfo\n for send_info in conn.send_info_dict.values():\n send_info.set_result(PacketErr.Disconnect, '', None)\n logger.debug(\"Close: conn_id={}\".format(conn_id))\n if notify:\n conn.handler.on_disconnect(conn_id, reason, msg)\n\n def _watch_read(self, conn, is_watch):\n try:\n sel_key = self._selector.get_key(conn.sock)\n except KeyError:\n return\n\n if is_watch:\n new_event = sel_key.events | selectors.EVENT_READ\n else:\n new_event = sel_key.events & (~selectors.EVENT_READ)\n\n if new_event != 0:\n self._selector.modify(conn.sock, new_event, conn)\n else:\n self._selector.unregister(conn.sock)\n\n def _watch_write(self, conn, is_watch):\n try:\n sel_key = self._selector.get_key(conn.sock)\n except KeyError:\n return\n\n if is_watch:\n new_event = sel_key.events | selectors.EVENT_WRITE\n else:\n new_event = sel_key.events & (~selectors.EVENT_WRITE)\n\n if new_event != 0:\n self._selector.modify(conn.sock, new_event, conn)\n else:\n self._selector.unregister(conn.sock)\n\n def sync_query(self, conn_id, req_str):\n head_dict = self._parse_req_head(req_str)\n proto_info = ProtoInfo(head_dict['proto_id'], head_dict['serial_no'])\n rsp_info = SyncReqRspInfo()\n\n def work():\n conn = self._get_conn(conn_id) # type: Connection\n ret, msg = RET_OK, ''\n if not conn:\n ret = RET_ERROR\n msg = Err.ConnectionLost.text\n else:\n conn.sync_req_dict[proto_info] = rsp_info\n self.do_send(conn_id, proto_info, req_str)\n\n if ret != RET_OK:\n rsp_info.ret = ret\n rsp_info.msg = msg\n rsp_info.event.set()\n\n self._req_queue.put(work)\n self._w_sock.send(b'1')\n\n rsp_info.event.wait()\n return rsp_info.ret, rsp_info.msg, rsp_info.data\n\n def _parse_req_head(self, req_str):\n head_len = get_message_head_len()\n req_head_dict = parse_head(req_str[:head_len])\n return req_head_dict\n\n def _parse_req_head_proto_info(self, req_str):\n head_len = get_message_head_len()\n proto_info = parse_proto_info(req_str[:head_len])\n return proto_info\n\n def _get_conn(self, conn_id):\n with self._lock:\n for sock, sel_key in self._selector.get_map().items():\n if sel_key.fileobj == self._r_sock:\n continue\n conn = sel_key.data\n if conn.conn_id == conn_id:\n return conn\n return None\n\n def _on_read(self, conn):\n if conn.status == ConnStatus.Closed:\n return\n\n packet_count = 0\n msg = ''\n is_closed = False\n try:\n data = conn.sock.recv(128 * 1024)\n if data == b'':\n is_closed = True\n else:\n conn.readbuf.extend(data)\n except Exception as e:\n if not is_socket_exception_wouldblock(e):\n is_closed = True\n msg = str(e)\n\n while len(conn.readbuf) > 0:\n head_len = get_message_head_len()\n if len(conn.readbuf) < head_len:\n break\n head_dict = parse_head(conn.readbuf[:head_len])\n body_len = head_dict['body_len']\n if len(conn.readbuf) < head_len + body_len:\n break\n\n rsp_body = conn.readbuf[head_len:head_len+body_len]\n del conn.readbuf[:head_len+body_len]\n packet_count += 1\n self._on_packet(conn, head_dict, PacketErr.Ok, '', rsp_body)\n if packet_count >= 10:\n if len(conn.readbuf) > 0:\n self._pending_read_conns.add(conn)\n self._w_sock.send(b'1')\n break # 收10个包强制跳出循环,避免长时间解包导致无法发送心跳\n\n if is_closed:\n if msg == '':\n self._do_close(conn.conn_id, CloseReason.RemoteClose, msg, True)\n else:\n self._do_close(conn.conn_id, CloseReason.ReadFail, msg, True)\n\n # logger.debug2(FTLog.ONLY_FILE, 'conn_id={}; elapsed={}; recv_len={}; buf_len={}; packet={};'.format(conn.conn_id, end_time-start_time, recv_len, buf_len, packet_count))\n\n def _on_write(self, conn: Connection):\n if conn.status == ConnStatus.Closed:\n return\n elif conn.status == ConnStatus.Connecting:\n err_code = conn.sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)\n self._watch_read(conn, True)\n self._watch_write(conn, False)\n if err_code != 0:\n msg = errno.errorcode[err_code]\n self._do_close(conn.conn_id, CloseReason.ConnectFail, msg, False)\n conn.connect_info.set_result(ConnectErr.Fail, msg)\n if not conn.connect_info.is_sync:\n conn.handler.on_connect(conn.conn_id, ConnectErr.Fail, msg)\n else:\n conn.status = ConnStatus.Connected\n conn.connect_info.set_result(ConnectErr.Ok, '')\n if not conn.connect_info.is_sync:\n conn.handler.on_connect(conn.conn_id, ConnectErr.Ok, '')\n return\n\n msg = ''\n size = 0\n try:\n if len(conn.writebuf) > 0:\n size = conn.sock.send(conn.writebuf)\n except Exception as e:\n if not is_socket_exception_wouldblock(e):\n msg = str(e)\n\n if size > 0:\n del conn.writebuf[:size]\n\n if len(conn.writebuf) == 0:\n self._watch_write(conn, False)\n\n if msg:\n self._do_close(conn.conn_id, CloseReason.SendFail, msg, True)\n\n def _on_connect_timeout(self, conn: Connection):\n conn.connect_info.set_result(ConnectErr.Timeout, '')\n if not conn.connect_info.is_sync:\n conn.handler.on_connect(conn.conn_id, ConnectErr.Timeout, '')\n self._do_close(conn.conn_id, CloseReason.ConnectFail, '', False)\n\n def _on_packet(self, conn, head_dict, err: PacketErr, msg: str, rsp_body_data: bytes):\n proto_info = ProtoInfo(head_dict['proto_id'], head_dict['serial_no'])\n rsp_pb = None\n if err is PacketErr.Ok:\n ret_decrypt, msg_decrypt, rsp_body = decrypt_rsp_body(rsp_body_data, head_dict, conn.opend_conn_id, conn.is_encrypt)\n if ret_decrypt == RET_OK:\n rsp_pb = binary2pb(rsp_body, head_dict['proto_id'], head_dict['proto_fmt_type'])\n else:\n err = PacketErr.Invalid\n msg = msg_decrypt\n rsp_pb = None\n elif msg == '':\n msg = str(err)\n\n log_msg = 'Recv: conn_id={}; proto_id={}; serial_no={}; data_len={}; msg={};'.format(conn.conn_id,\n proto_info.proto_id,\n proto_info.serial_no,\n len(\n rsp_body_data) if rsp_body_data else 0,\n msg)\n if err is PacketErr.Ok:\n logger.debug2(FTLog.ONLY_FILE, log_msg)\n else:\n logger.warning(log_msg)\n\n send_info: SendInfo\n send_info = conn.send_info_dict.get(proto_info, None)\n conn.send_info_dict.pop(proto_info, None)\n if send_info:\n send_info.set_result(err, msg, rsp_pb)\n if not send_info.is_sync:\n conn.handler.on_packet(conn.conn_id, proto_info, err, msg, rsp_pb)\n elif ProtoId.is_proto_id_push(proto_info.proto_id):\n conn.handler.on_packet(conn.conn_id, proto_info, err, msg, rsp_pb)\n\n @staticmethod\n def extract_rsp_pb(opend_conn_id, head_dict, rsp_body):\n ret, msg, rsp = decrypt_rsp_body(rsp_body, head_dict, opend_conn_id)\n if ret == RET_OK:\n rsp_pb = binary2pb(rsp_body, head_dict['proto_id'], head_dict['proto_fmt_type'])\n else:\n rsp_pb = None\n return ret, msg, rsp_pb\n\n def set_conn_info(self, conn_id, info):\n with self._lock:\n conn = self._get_conn(conn_id)\n if conn is not None:\n conn.opend_conn_id = info.get('conn_id', conn.opend_conn_id)\n conn.keep_alive_interval = info.get('keep_alive_interval', conn.keep_alive_interval)\n else:\n return RET_ERROR, Err.ConnectionLost.text\n return RET_OK, ''\n", "id": "9444027", "language": "Python", "matching_score": 4.602126598358154, "max_stars_count": 858, "path": "futu/common/network_manager.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport select\nfrom abc import abstractmethod\nimport socket as sock\nfrom struct import pack\nfrom time import sleep\nimport traceback\nfrom threading import RLock\nfrom futuquant.common.constant import *\nfrom futuquant.common.utils import *\nfrom futuquant.quote.quote_query import parse_head\n\nclass _SyncNetworkQueryCtx:\n \"\"\"\n Network query context manages connection between python program and FUTU client program.\n\n Short (non-persistent) connection can be created by setting long_conn parameter False, which suggests that\n TCP connection is closed once a query session finished\n\n Long (persistent) connection can be created by setting long_conn parameter True, which suggests that TCP\n connection is persisted after a query session finished, waiting for next query.\n\n \"\"\"\n\n def __init__(self, host, port, long_conn=True, connected_handler=None, create_session_handler=None):\n self.s = None\n self.__host = host\n self.__port = port\n self.long_conn = long_conn\n self._socket_lock = RLock()\n self._connected_handler = connected_handler\n self._is_loop_connecting = False\n self._create_session_handler = create_session_handler\n self._conn_id = 0\n\n def set_conn_id(self, conn_id):\n self._conn_id = conn_id\n\n def close_socket(self):\n \"\"\"close socket\"\"\"\n self._socket_lock.acquire()\n self._force_close_session()\n self._socket_lock.release()\n\n def is_sock_ok(self, timeout_select):\n \"\"\"check if socket is OK\"\"\"\n self._socket_lock.acquire()\n try:\n ret = self._is_socket_ok(timeout_select)\n finally:\n self._socket_lock.release()\n return ret\n\n def _is_socket_ok(self, timeout_select):\n if not self.s:\n return False\n _, _, sel_except = select.select([self.s], [], [], timeout_select)\n if self.s in sel_except:\n return False\n return True\n\n def reconnect(self):\n \"\"\"reconnect\"\"\"\n logger.debug(\" ****\")\n return self._socket_create_and_loop_connect()\n\n def network_query(self, req_str, is_create_socket=True):\n \"\"\"\n the function sends req_str to FUTU client and try to get response from the client.\n :param req_str\n :return: rsp_str\n \"\"\"\n req_proto_id = 0\n try:\n is_socket_lock = False\n ret, msg = self._create_session(is_create_socket)\n if ret != RET_OK:\n return ret, msg, None\n\n self._socket_lock.acquire()\n if not self.s:\n self._socket_lock.release()\n return RET_ERROR, \"socket is closed\"\n is_socket_lock = True\n\n head_len = get_message_head_len()\n req_head_dict = parse_head(req_str[:head_len])\n req_proto_id = req_head_dict['proto_id']\n req_serial_no = req_head_dict['serial_no']\n s_cnt = self.s.send(req_str)\n\n is_rsp_body = False\n left_buf = b''\n rsp_body = b''\n head_dict = []\n while not is_rsp_body:\n if len(left_buf) < head_len:\n recv_buf = self.s.recv(5 * 1024 * 1024)\n if recv_buf == b'':\n raise Exception(\"_SyncNetworkQueryCtx : head recv error, remote server close\")\n left_buf += recv_buf\n\n head_dict = parse_head(left_buf[:head_len])\n rsp_body = left_buf[head_len:]\n\n while head_dict['body_len'] > len(rsp_body):\n try:\n recv_buf = self.s.recv(5 * 1024 * 1024)\n rsp_body += recv_buf\n if recv_buf == b'':\n raise Exception(\"_SyncNetworkQueryCtx : body recv error, remote server close\")\n except Exception as e:\n traceback.print_exc()\n err = sys.exc_info()[1]\n error_str = ERROR_STR_PREFIX + str(\n err) + ' when receiving after sending %s bytes.' % s_cnt + \"\"\n self._force_close_session()\n return RET_ERROR, error_str, None\n if head_dict[\"proto_id\"] == req_proto_id and head_dict[\"serial_no\"] == req_serial_no:\n is_rsp_body = True\n else:\n left_buf = rsp_body[head_dict['body_len']:]\n logger.debug(\"recv dirty response: req protoID={} serial={}, recv protoID={} serial={} conn_id={}\".format(\n req_proto_id, req_serial_no, head_dict[\"proto_id\"], head_dict[\"serial_no\"], self._conn_id))\n\n ret_decrypt, msg_decrypt, rsp_body = decrypt_rsp_body(rsp_body, head_dict, self._conn_id)\n\n if ret_decrypt != RET_OK:\n return ret_decrypt, msg_decrypt, None\n\n rsp_pb = binary2pb(rsp_body, head_dict['proto_id'], head_dict['proto_fmt_type'])\n if rsp_pb is None:\n return RET_ERROR, \"parse error\", None\n\n self._close_session()\n\n except Exception as e:\n traceback.print_exc()\n err = sys.exc_info()[1]\n str_proto = ' when req proto:{} conn_id:{}, host:{} port:{}'.format(req_proto_id, self._conn_id, self.__host, self.__port)\n error_str = ERROR_STR_PREFIX + str(err) + str_proto\n logger.error(error_str)\n\n return RET_ERROR, error_str, None\n finally:\n if is_socket_lock:\n self._socket_lock.release()\n\n return RET_OK, \"\", rsp_pb\n\n def _socket_create_and_loop_connect(self):\n\n logger.debug(\"***\")\n if self._is_loop_connecting:\n return RET_ERROR, \"is loop connecting, can't create_session\"\n self._is_loop_connecting = True\n\n self._socket_lock.acquire()\n ret_code = RET_OK\n ret_msg = ''\n\n if self.s is not None:\n self._force_close_session()\n\n conn_cnt = 0\n while True:\n try:\n s = sock.socket()\n s.setsockopt(sock.SOL_SOCKET, sock.SO_REUSEADDR, 0)\n s.setsockopt(sock.SOL_SOCKET, sock.SO_LINGER, pack(\"ii\", 1, 0))\n s.settimeout(10)\n self.s = s\n self.s.connect((self.__host, self.__port))\n except Exception as e:\n traceback.print_exc()\n err = sys.exc_info()[1]\n err_msg = ERROR_STR_PREFIX + str(err)\n logger.error(\"socket connect count:{} err:{}\".format(conn_cnt, err_msg))\n conn_cnt += 1\n self.s = None\n if s:\n s.close()\n del s\n sleep(1.5)\n continue\n\n if self._connected_handler is not None:\n sock_ok, is_retry = self._connected_handler.notify_sync_socket_connected(self)\n if not sock_ok:\n self._force_close_session()\n if is_retry:\n logger.error(\"wait to connect FutuOpenD\")\n sleep(1.5)\n continue\n else:\n ret_code = RET_ERROR\n ret_msg = \"obj is closed\"\n break\n else:\n break\n\n self._is_loop_connecting = False\n self._socket_lock.release()\n\n return ret_code, ret_msg\n\n def on_create_sync_session(self):\n self.reconnect()\n return RET_OK, \"\"\n\n def _create_session(self, is_create_socket):\n if self.long_conn is True and self.s is not None:\n return RET_OK, \"\"\n\n if not is_create_socket:\n return RET_ERROR, \"no exist connect session\"\n\n if self._create_session_handler:\n ret, msg = self._create_session_handler.on_create_sync_session()\n else:\n ret, msg = self.on_create_sync_session()\n\n if ret != RET_OK:\n return ret, msg\n return RET_OK, \"\"\n\n def _force_close_session(self):\n if self.s is None:\n return\n self.s.close()\n del self.s\n self.s = None\n\n def _close_session(self):\n if self.s is None or self.long_conn is True:\n return\n self.s.close()\n self.s = None\n\n def __del__(self):\n if self.s is not None:\n self.s.close()\n self.s = None\n\n\n", "id": "887462", "language": "Python", "matching_score": 3.911794900894165, "max_stars_count": 5, "path": "futuquant/common/sync_network_manager.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport time\nfrom abc import ABCMeta, abstractmethod\nfrom collections import namedtuple\nfrom time import sleep\n# from typing import Optional\nfrom threading import Timer\nfrom datetime import datetime\nfrom threading import RLock, Thread\nfrom futuquant.common.utils import *\nfrom futuquant.common.handler_context import HandlerContext\nfrom futuquant.quote.quote_query import InitConnect\nfrom futuquant.quote.quote_response_handler import AsyncHandler_InitConnect\nfrom futuquant.quote.quote_query import GlobalStateQuery\nfrom futuquant.quote.quote_query import KeepAlive, parse_head\nfrom futuquant.common.conn_mng import FutuConnMng\nfrom futuquant.common.network_manager import NetManager\nfrom .err import Err\nfrom .ft_logger import make_log_msg\nfrom .callback_executor import callback_executor, CallbackItem\n\n_SyncReqRet = namedtuple('_SyncReqRet', ('ret', 'msg'))\n\nclass ContextStatus:\n Start = 0\n Connecting = 1\n Ready = 2\n Closed = 3\n\nclass OpenContextBase(object):\n \"\"\"Base class for set context\"\"\"\n metaclass__ = ABCMeta\n\n def __init__(self, host, port, async_enable):\n self.__host = host\n self.__port = port\n self.__async_socket_enable = async_enable\n self._net_mgr = NetManager.default()\n self._handler_ctx = HandlerContext(self._is_proc_run)\n self._lock = RLock()\n self._status = ContextStatus.Start\n self._proc_run = True\n self._sync_req_ret = None\n self._sync_conn_id = 0\n self._conn_id = 0\n self._keep_alive_interval = 10\n self._last_keep_alive_time = datetime.now()\n self._reconnect_timer = None\n self._keep_alive_fail_count = 0\n self._net_mgr.start()\n self._socket_reconnect_and_wait_ready()\n while True:\n with self._lock:\n if self._status == ContextStatus.Ready:\n break\n sleep(0.02)\n\n def get_login_user_id(self):\n \"\"\"\n get login user id\n :return: user id(int64)\n \"\"\"\n with self._lock:\n return FutuConnMng.get_conn_user_id(self._sync_conn_id)\n\n def __del__(self):\n self._close()\n\n @abstractmethod\n def close(self):\n \"\"\"\n to call close old obj before loop create new, otherwise socket will encounter error 10053 or more!\n \"\"\"\n self._close()\n\n @abstractmethod\n def on_api_socket_reconnected(self):\n \"\"\"\n callback after reconnect ok\n \"\"\"\n # logger.debug(\"on_api_socket_reconnected obj ID={}\".format(id(self)))\n return RET_OK, ''\n\n def _close(self):\n with self._lock:\n if self._status == ContextStatus.Closed:\n return\n self._status = ContextStatus.Closed\n net_mgr = self._net_mgr\n conn_id = self._conn_id\n self._conn_id = 0\n self._net_mgr = None\n self.stop()\n self._handlers_ctx = None\n if self._reconnect_timer is not None:\n self._reconnect_timer.cancel()\n self._reconnect_timer = None\n if conn_id > 0:\n net_mgr.close(conn_id)\n net_mgr.stop()\n\n def start(self):\n \"\"\"\n 启动异步接收推送数据\n \"\"\"\n with self._lock:\n self._proc_run = True\n\n def stop(self):\n \"\"\"\n 停止异步接收推送数据\n \"\"\"\n with self._lock:\n self._proc_run = False\n\n def set_handler(self, handler):\n \"\"\"\n 设置异步回调处理对象\n\n :param handler: 回调处理对象,必须是以下类的子类实例\n\n =============================== =========================\n 类名 说明\n =============================== =========================\n StockQuoteHandlerBase 报价处理基类\n OrderBookHandlerBase 摆盘处理基类\n CurKlineHandlerBase 实时k线处理基类\n TickerHandlerBase 逐笔处理基类\n RTDataHandlerBase 分时数据处理基类\n BrokerHandlerBase 经济队列处理基类\n =============================== =========================\n\n :return: RET_OK: 设置成功\n\n RET_ERROR: 设置失败\n \"\"\"\n with self._lock:\n if self._handler_ctx is not None:\n return self._handler_ctx.set_handler(handler)\n return RET_ERROR\n\n def set_pre_handler(self, handler):\n '''set pre handler'''\n with self._lock:\n if self._handler_ctx is not None:\n return self._handler_ctx.set_pre_handler(handler)\n return RET_ERROR\n\n def _is_proc_run(self):\n with self._lock:\n return self._proc_run\n\n def _get_sync_query_processor(self, pack_func, unpack_func, is_create_socket=True):\n \"\"\"\n synchronize the query processor\n :param pack_func: back\n :param unpack_func: unpack\n :return: sync_query_processor\n \"\"\"\n\n def sync_query_processor(**kargs):\n \"\"\"sync query processor\"\"\"\n while True:\n with self._lock:\n if self._status == ContextStatus.Ready:\n net_mgr = self._net_mgr\n conn_id = self._conn_id\n break\n sleep(0.01)\n\n try:\n ret_code, msg, req_str = pack_func(**kargs)\n if ret_code != RET_OK:\n return ret_code, msg, None\n\n ret_code, msg, rsp_str = net_mgr.sync_query(conn_id, req_str)\n if ret_code != RET_OK:\n return ret_code, msg, None\n\n ret_code, msg, content = unpack_func(rsp_str)\n if ret_code != RET_OK:\n return ret_code, msg, None\n except Exception as e:\n logger.error(traceback.format_exc())\n return RET_ERROR, str(e), None\n\n return RET_OK, msg, content\n\n return sync_query_processor\n\n def _send_async_req(self, req_str):\n conn_id = 0\n net_mgr = None\n with self._lock:\n if self._status != ContextStatus.Ready:\n return RET_ERROR, 'Context closed or not ready'\n conn_id = self._conn_id\n net_mgr = self._net_mgr\n return net_mgr.send(conn_id, req_str)\n\n def _socket_reconnect_and_wait_ready(self):\n \"\"\"\n sync_socket & async_socket recreate\n :return: (ret, msg)\n \"\"\"\n logger.info(\"Start connecting: host={}; port={};\".format(self.__host, self.__port))\n with self._lock:\n self._status = ContextStatus.Connecting\n # logger.info(\"try connecting: host={}; port={};\".format(self.__host, self.__port))\n ret, msg, conn_id = self._net_mgr.connect((self.__host, self.__port), self, 5)\n if ret == RET_OK:\n self._conn_id = conn_id\n else:\n logger.warning(msg)\n\n if ret == RET_OK:\n while True:\n with self._lock:\n if self._sync_req_ret is not None:\n if self._sync_req_ret.ret == RET_OK:\n self._status = ContextStatus.Ready\n else:\n ret, msg = self._sync_req_ret.ret, self._sync_req_ret.msg\n self._sync_req_ret = None\n break\n sleep(0.01)\n\n if ret == RET_OK:\n ret, msg = self.on_api_socket_reconnected()\n else:\n self._wait_reconnect()\n return RET_OK, ''\n\n def get_sync_conn_id(self):\n with self._lock:\n return self._sync_conn_id\n\n def get_async_conn_id(self):\n return self.get_sync_conn_id()\n\n def get_global_state(self):\n \"\"\"\n 获取全局状态\n\n :return: (ret, data)\n\n ret == RET_OK data为包含全局状态的字典,含义如下\n\n ret != RET_OK data为错误描述字符串\n\n ===================== =========== ==============================================================\n key value类型 说明\n ===================== =========== ==============================================================\n market_sz str 深圳市场状态,参见MarketState\n market_us str 美国市场状态,参见MarketState\n market_sh str 上海市场状态,参见MarketState\n market_hk str 香港市场状态,参见MarketState\n market_future str 香港期货市场状态,参见MarketState\n server_ver str FutuOpenD版本号\n trd_logined str '1':已登录交易服务器,'0': 未登录交易服务器\n qot_logined str '1':已登录行情服务器,'0': 未登录行情服务器\n timestamp str Futu后台服务器当前时间戳(秒)\n local_timestamp double FutuOpenD运行机器当前时间戳(\n ===================== =========== ==============================================================\n :example:\n\n .. code:: python\n\n from futuquant import *\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n print(quote_ctx.get_global_state())\n quote_ctx.close()\n \"\"\"\n query_processor = self._get_sync_query_processor(\n GlobalStateQuery.pack_req, GlobalStateQuery.unpack_rsp)\n\n kargs = {\n 'user_id': self.get_login_user_id(),\n 'conn_id': self.get_sync_conn_id(),\n }\n ret_code, msg, state_dict = query_processor(**kargs)\n if ret_code != RET_OK:\n return ret_code, msg\n\n return RET_OK, state_dict\n\n def on_connected(self, conn_id):\n logger.info('Connected : conn_id={0}; '.format(conn_id))\n kargs = {\n 'client_ver': int(SysConfig.get_client_ver()),\n 'client_id': str(SysConfig.get_client_id()),\n 'recv_notify': True,\n }\n\n ret, msg, req_str = InitConnect.pack_req(**kargs)\n if ret == RET_OK:\n ret, msg = self._net_mgr.send(conn_id, req_str)\n else:\n logger.warning(make_log_msg('InitConnect.pack_req fail', msg=msg))\n\n if ret != RET_OK:\n with self._lock:\n self._sync_req_ret = _SyncReqRet(ret, msg)\n\n def on_error(self, conn_id, err):\n logger.warning('Connect error: conn_id={0}; err={1};'.format(conn_id, err))\n with self._lock:\n if self._status != ContextStatus.Connecting:\n self._wait_reconnect()\n else:\n self._sync_req_ret = _SyncReqRet(RET_ERROR, str(err))\n\n def on_closed(self, conn_id):\n logger.warning('Connect closed: conn_id={0}'.format(conn_id))\n with self._lock:\n if self._status != ContextStatus.Connecting:\n self._wait_reconnect()\n else:\n self._sync_req_ret = _SyncReqRet(RET_ERROR, 'Connection closed')\n\n def on_connect_timeout(self, conn_id):\n logger.warning('Connect timeout: conn_id={0}'.format(conn_id))\n with self._lock:\n self._sync_req_ret = _SyncReqRet(RET_ERROR, Err.Timeout.text)\n\n def on_packet(self, conn_id, proto_info, ret_code, msg, rsp_pb):\n if proto_info.proto_id == ProtoId.InitConnect:\n self._handle_init_connect(conn_id, proto_info.proto_id, ret_code, msg, rsp_pb)\n elif proto_info.proto_id == ProtoId.KeepAlive:\n self._handle_keep_alive(conn_id, proto_info.proto_id, ret_code, msg, rsp_pb)\n elif ret_code == RET_OK:\n item = CallbackItem(self, proto_info.proto_id, rsp_pb)\n callback_executor.queue.put(item)\n\n def on_activate(self, conn_id, now):\n with self._lock:\n if self._status != ContextStatus.Ready:\n return\n time_elapsed = now - self._last_keep_alive_time\n if time_elapsed.total_seconds() < self._keep_alive_interval:\n return\n\n logger.debug(\"Keepalive: conn_id={};\".format(conn_id))\n ret, msg, req = KeepAlive.pack_req(self.get_sync_conn_id())\n if ret != RET_OK:\n logger.warning(\"KeepAlive.pack_req fail: {0}\".format(msg))\n return\n ret, msg = self._net_mgr.send(conn_id, req)\n if ret != RET_OK:\n return\n\n self._last_keep_alive_time = now\n\n def packet_callback(self, proto_id, rsp_pb):\n with self._lock:\n if self._status != ContextStatus.Ready:\n return\n\n handler_ctx = self._handler_ctx\n if handler_ctx:\n handler_ctx.recv_func(rsp_pb, proto_id)\n\n def _handle_init_connect(self, conn_id, proto_info, ret, msg, rsp_pb):\n data = None\n if ret == RET_OK:\n ret, msg, data = InitConnect.unpack_rsp(rsp_pb)\n\n with self._lock:\n self._sync_req_ret = _SyncReqRet(ret, msg)\n if ret == RET_OK:\n conn_info = copy(data)\n self._sync_conn_id = conn_info['conn_id']\n self._keep_alive_interval = conn_info['keep_alive_interval'] * 4 / 5\n self._net_mgr.set_conn_info(conn_id, conn_info)\n self._last_keep_alive_time = datetime.now()\n FutuConnMng.add_conn(conn_info)\n logger.info(make_log_msg(\"InitConnect ok\", conn_id=conn_id, info=conn_info))\n else:\n logger.warning(make_log_msg(\"InitConnect error\", msg=msg))\n self._wait_reconnect()\n\n def _handle_keep_alive(self, conn_id, proto_info, ret_code, msg, rsp_pb):\n should_reconnect = False\n with self._lock:\n if ret_code == RET_OK:\n self._keep_alive_fail_count = 0\n else:\n self._keep_alive_fail_count += 1\n\n if self._keep_alive_fail_count >= 3:\n logger.warning('Fail to recv KeepAlive for 3 times')\n should_reconnect = True\n\n if should_reconnect:\n self._wait_reconnect()\n\n def _wait_reconnect(self):\n wait_reconnect_interval = 8\n net_mgr = None\n conn_id = 0\n with self._lock:\n if self._status == ContextStatus.Closed or self._reconnect_timer is not None:\n return\n logger.info('Wait reconnect in {} seconds: host={}; port={};'.format(wait_reconnect_interval,\n self.__host,\n self.__port))\n net_mgr = self._net_mgr\n conn_id = self._conn_id\n self._status = ContextStatus.Connecting\n self._sync_conn_id = 0\n self._conn_id = 0\n self._keep_alive_fail_count = 0\n self._reconnect_timer = Timer(wait_reconnect_interval, self._reconnect)\n self._reconnect_timer.start()\n\n net_mgr.close(conn_id)\n\n def _reconnect(self):\n with self._lock:\n self._reconnect_timer.cancel()\n self._reconnect_timer = None\n if self._status != ContextStatus.Connecting:\n return\n\n self._socket_reconnect_and_wait_ready()\n\n\n", "id": "3564240", "language": "Python", "matching_score": 5.471700191497803, "max_stars_count": 5, "path": "futuquant/common/open_context_base.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom futuquant.quote.quote_response_handler import *\nfrom futuquant.trade.trade_response_handler import *\n\n\nclass HandlerContext:\n \"\"\"Handle Context\"\"\"\n\n def __init__(self, cb_check_recv):\n self.cb_check_recv = cb_check_recv\n self._default_handler = RspHandlerBase()\n self._handler_table = {\n 1003: {\n \"type\": SysNotifyHandlerBase,\n \"obj\": SysNotifyHandlerBase()\n },\n 1004: {\n \"type\": KeepAliveHandlerBase,\n \"obj\": KeepAliveHandlerBase()\n },\n 2208: {\n \"type\": TradeOrderHandlerBase,\n \"obj\": TradeOrderHandlerBase()\n },\n 2218: {\n \"type\": TradeDealHandlerBase,\n \"obj\": TradeDealHandlerBase()\n },\n\n 3005: {\n \"type\": StockQuoteHandlerBase,\n \"obj\": StockQuoteHandlerBase()\n },\n 3007: {\n \"type\": CurKlineHandlerBase,\n \"obj\": CurKlineHandlerBase()\n },\n 3009: {\n \"type\": RTDataHandlerBase,\n \"obj\": RTDataHandlerBase()\n },\n 3011: {\n \"type\": TickerHandlerBase,\n \"obj\": TickerHandlerBase()\n },\n 3013: {\n \"type\": OrderBookHandlerBase,\n \"obj\": OrderBookHandlerBase()\n },\n 3015: {\n \"type\": BrokerHandlerBase,\n \"obj\": BrokerHandlerBase()\n },\n }\n\n self._pre_handler_table = {\n 1001: {\n \"type\": AsyncHandler_InitConnect,\n \"obj\": AsyncHandler_InitConnect()\n },\n 2008: {\n \"type\": AsyncHandler_TrdSubAccPush,\n \"obj\": AsyncHandler_TrdSubAccPush()\n },\n }\n # self._pre_handler_table = self._handler_table.copy()\n\n def set_pre_handler(self, handler):\n '''pre handler push\n return: ret_error or ret_ok\n '''\n set_flag = False\n for protoc in self._pre_handler_table:\n if isinstance(handler, self._pre_handler_table[protoc][\"type\"]):\n self._pre_handler_table[protoc][\"obj\"] = handler\n return RET_OK\n\n if set_flag is False:\n return RET_ERROR\n\n def set_handler(self, handler):\n \"\"\"\n set the callback processing object to be used by the receiving thread after receiving the data.User should set\n their own callback object setting in order to achieve event driven.\n :param handler:the object in callback handler base\n :return: ret_error or ret_ok\n \"\"\"\n set_flag = False\n for protoc in self._handler_table:\n if isinstance(handler, self._handler_table[protoc][\"type\"]):\n self._handler_table[protoc][\"obj\"] = handler\n return RET_OK\n\n if set_flag is False:\n return RET_ERROR\n\n def recv_func(self, rsp_pb, proto_id):\n \"\"\"receive response callback function\"\"\"\n\n if self.cb_check_recv is not None and not self.cb_check_recv() and ProtoId.is_proto_id_push(proto_id):\n return\n\n handler = self._default_handler\n pre_handler = None\n\n if proto_id in self._handler_table:\n handler = self._handler_table[proto_id]['obj']\n\n if proto_id in self._pre_handler_table:\n pre_handler = self._pre_handler_table[proto_id]['obj']\n\n if pre_handler is not None:\n pre_handler.on_recv_rsp(rsp_pb)\n\n handler.on_recv_rsp(rsp_pb)\n\n @staticmethod\n def error_func(err_str):\n \"\"\"error callback function\"\"\"\n print(err_str)\n\n", "id": "11712950", "language": "Python", "matching_score": 2.1132614612579346, "max_stars_count": 5, "path": "futuquant/common/handler_context.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport sys\nIS_PY2 = sys.version_info[0] == 2\n\n\ndef bytes_utf8(data):\n if IS_PY2:\n return bytes(data)\n else:\n return bytes(data, encoding='utf-8')\n\n\ndef str_utf8(data):\n if IS_PY2:\n return str(data)\n else:\n return str(data, encoding='utf-8')\n\n\nclass RspHandlerBase(object):\n \"\"\"callback function base class\"\"\"\n\n def __init__(self):\n pass\n\n def on_recv_rsp(self, rsp_pb):\n \"\"\"receive response callback function\"\"\"\n return 0, None\n\n\n", "id": "4948558", "language": "Python", "matching_score": 0.7724507451057434, "max_stars_count": 5, "path": "futuquant/common/__init__.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom datetime import datetime\nimport os\n\nlogger = logging.getLogger('FT')\nlog_level = logging.INFO\nis_file_log = True\n\n# 设置logger的level为DEBUG\nlogger.setLevel(log_level)\n\n# 创建一个输出日志到控制台的StreamHandler\nhdr = logging.StreamHandler()\nformatter = logging.Formatter(\n '%(asctime)s [%(filename)s] %(funcName)s:%(lineno)d: %(message)s')\nhdr.setFormatter(formatter)\n\n# 给logger添加上handler\nlogger.addHandler(hdr)\n\n# 添加文件handle\nif is_file_log:\n filename = 'ft_' + datetime.now().strftime('%Y%m%d') + '.log'\n tempPath = os.path.join(os.getcwd(), 'log')\n if not os.path.exists(tempPath):\n os.makedirs(tempPath)\n filepath = os.path.join(tempPath, filename)\n fileHandler = logging.FileHandler(filepath)\n fileHandler.setFormatter(formatter)\n logger.addHandler(fileHandler)\n\n\ndef make_log_msg(title, **kwargs):\n msg = ''\n if len(kwargs) > 0:\n msg = ':'\n for k, v in kwargs.items():\n msg += ' {0}={1};'.format(k, v)\n return title + msg\n\n\n\n", "id": "10790352", "language": "Python", "matching_score": 0.2903580069541931, "max_stars_count": 5, "path": "futuquant/common/ft_logger.py" }, { "content": "#!/usr/bin/env python\n#\n# merge_tests.py: testing merge\n#\n# Subversion is a tool for revision control.\n# See http://subversion.apache.org for more information.\n#\n# ====================================================================\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n######################################################################\n\n# General modules\nimport shutil, sys, re, os\nimport time\n\n# Our testing module\nimport svntest\nfrom svntest import main, wc, verify, actions\n\nfrom prop_tests import binary_mime_type_on_text_file_warning\n\n# (abbreviation)\nItem = wc.StateItem\nSkip = svntest.testcase.Skip_deco\nSkipUnless = svntest.testcase.SkipUnless_deco\nXFail = svntest.testcase.XFail_deco\nIssues = svntest.testcase.Issues_deco\nIssue = svntest.testcase.Issue_deco\nWimp = svntest.testcase.Wimp_deco\nexp_noop_up_out = svntest.actions.expected_noop_update_output\n\nfrom svntest.main import SVN_PROP_MERGEINFO\nfrom svntest.main import server_has_mergeinfo\nfrom svntest.actions import fill_file_with_lines\nfrom svntest.actions import make_conflict_marker_text\nfrom svntest.actions import inject_conflict_into_expected_state\nfrom svntest.verify import RegexListOutput\n\nfrom svntest.mergetrees import expected_merge_output, \\\n check_mergeinfo_recursively, \\\n set_up_dir_replace, \\\n set_up_branch, \\\n local_path, \\\n svn_mkfile, \\\n svn_modfile, \\\n svn_copy, \\\n svn_merge, \\\n noninheritable_mergeinfo_test_set_up\n\n######################################################################\n# Tests\n#\n# Each test must return on success or raise on failure.\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef textual_merges_galore(sbox):\n \"performing a merge, with mixed results\"\n\n ## The Plan:\n ##\n ## The goal is to test that \"svn merge\" does the right thing in the\n ## following cases:\n ##\n ## 1 : _ : Received changes already present in unmodified local file\n ## 2 : U : No local mods, received changes folded in without trouble\n ## 3 : G : Received changes already exist as local mods\n ## 4 : G : Received changes do not conflict with local mods\n ## 5 : C : Received changes conflict with local mods\n ##\n ## So first modify these files and commit:\n ##\n ## Revision 2:\n ## -----------\n ## A/mu ............... add ten or so lines\n ## A/D/G/rho .......... add ten or so lines\n ##\n ## Now check out an \"other\" working copy, from revision 2.\n ##\n ## Next further modify and commit some files from the original\n ## working copy:\n ##\n ## Revision 3:\n ## -----------\n ## A/B/lambda ......... add ten or so lines\n ## A/D/G/pi ........... add ten or so lines\n ## A/D/G/tau .......... add ten or so lines\n ## A/D/G/rho .......... add an additional ten or so lines\n ##\n ## In the other working copy (which is at rev 2), update rho back\n ## to revision 1, while giving other files local mods. This sets\n ## things up so that \"svn merge -r 1:3\" will test all of the above\n ## cases except case 4:\n ##\n ## case 1: A/mu .......... do nothing, the only change was in rev 2\n ## case 2: A/B/lambda .... do nothing, so we accept the merge easily\n ## case 3: A/D/G/pi ...... add same ten lines as committed in rev 3\n ## case 5: A/D/G/tau ..... add ten or so lines at the end\n ## [none]: A/D/G/rho ..... ignore what happens to this file for now\n ##\n ## Now run\n ##\n ## $ cd wc.other\n ## $ svn merge -r 1:3 url-to-repo\n ##\n ## ...and expect the right output.\n ##\n ## Now revert rho, then update it to revision 2, then *prepend* a\n ## bunch of lines, which will be separated by enough distance from\n ## the changes about to be received that the merge will be clean.\n ##\n ## $ cd wc.other/A/D/G\n ## $ svn merge -r 2:3 url-to-repo/A/D/G\n ##\n ## Which tests case 4. (Ignore the changes to the other files,\n ## we're only interested in rho here.)\n\n sbox.build()\n wc_dir = sbox.wc_dir\n # url = os.path.join(svntest.main.test_area_url, sbox.repo_dir)\n\n # Change mu and rho for revision 2\n mu_path = sbox.ospath('A/mu')\n rho_path = sbox.ospath('A/D/G/rho')\n mu_text = fill_file_with_lines(mu_path, 2)\n rho_text = fill_file_with_lines(rho_path, 2)\n\n # Create expected output tree for initial commit\n expected_output = wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n 'A/D/G/rho' : Item(verb='Sending'),\n })\n\n # Create expected status tree; all local revisions should be at 1,\n # but mu and rho should be at revision 2.\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', 'A/D/G/rho', wc_rev=2)\n\n # Initial commit.\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Make the \"other\" working copy\n other_wc = sbox.add_wc_path('other')\n svntest.actions.duplicate_dir(wc_dir, other_wc)\n\n # Now commit some more mods from the original working copy, to\n # produce revision 3.\n lambda_path = sbox.ospath('A/B/lambda')\n pi_path = sbox.ospath('A/D/G/pi')\n tau_path = sbox.ospath('A/D/G/tau')\n\n lambda_text = fill_file_with_lines(lambda_path, 2)\n pi_text = fill_file_with_lines(pi_path, 2)\n tau_text = fill_file_with_lines(tau_path, 2)\n additional_rho_text = fill_file_with_lines(rho_path, 2)\n\n # Created expected output tree for 'svn ci'\n expected_output = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Sending'),\n 'A/D/G/pi' : Item(verb='Sending'),\n 'A/D/G/tau' : Item(verb='Sending'),\n 'A/D/G/rho' : Item(verb='Sending'),\n })\n\n # Create expected status tree.\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=2)\n expected_status.tweak('A/B/lambda', 'A/D/G/pi', 'A/D/G/tau', 'A/D/G/rho',\n wc_rev=3)\n\n # Commit revision 3.\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Make local mods in wc.other\n other_pi_path = os.path.join(other_wc, 'A', 'D', 'G', 'pi')\n other_rho_path = os.path.join(other_wc, 'A', 'D', 'G', 'rho')\n other_tau_path = os.path.join(other_wc, 'A', 'D', 'G', 'tau')\n\n # For A/mu and A/B/lambda, we do nothing. For A/D/G/pi, we add the\n # same ten lines as were already committed in revision 3.\n # (Remember, wc.other is only at revision 2, so it doesn't have\n # these changes.)\n svntest.main.file_append(other_pi_path, pi_text)\n\n # We skip A/D/G/rho in this merge; it will be tested with a separate\n # merge command. Temporarily put it back to revision 1, so this\n # merge succeeds cleanly.\n svntest.actions.run_and_verify_svn(None, [],\n 'up', '-r', '1', other_rho_path)\n\n # For A/D/G/tau, we append few different lines, to conflict with the\n # few lines appended in revision 3.\n other_tau_text = fill_file_with_lines(other_tau_path, 2,\n line_descrip=\"Conflicting line\")\n\n # Do the first merge, revs 1:3. This tests all the cases except\n # case 4, which we'll handle in a second pass.\n expected_output = wc.State(other_wc, {'A/B/lambda' : Item(status='U '),\n 'A/D/G/rho' : Item(status='U '),\n 'A/D/G/tau' : Item(status='C '),\n })\n expected_mergeinfo_output = wc.State(other_wc, {'' : Item(status=' U')})\n expected_elision_output = wc.State(other_wc, {})\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak('A/mu',\n contents=expected_disk.desc['A/mu'].contents\n + mu_text)\n expected_disk.tweak('A/B/lambda',\n contents=expected_disk.desc['A/B/lambda'].contents\n + lambda_text)\n expected_disk.tweak('A/D/G/rho',\n contents=expected_disk.desc['A/D/G/rho'].contents\n + rho_text + additional_rho_text)\n expected_disk.tweak('A/D/G/pi',\n contents=expected_disk.desc['A/D/G/pi'].contents\n + pi_text)\n\n expected_status = svntest.actions.get_virginal_state(other_wc, 1)\n expected_status.tweak('', status=' M')\n expected_status.tweak('A/mu', wc_rev=2)\n expected_status.tweak('A/B/lambda', status='M ')\n expected_status.tweak('A/D/G/pi', status='M ')\n expected_status.tweak('A/D/G/rho', status='M ')\n\n inject_conflict_into_expected_state('A/D/G/tau', expected_disk,\n expected_status, other_tau_text, tau_text,\n 1, 3)\n\n expected_skip = wc.State('', { })\n\n tau_conflict_support_files = [\"tau\\.working\",\n \"tau\\.merge-right\\.r3\",\n \"tau\\.merge-left\\.r1\"]\n\n svntest.actions.run_and_verify_merge(other_wc, '1', '3',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], False, True,\n '--allow-mixed-revisions', other_wc,\n extra_files=list(tau_conflict_support_files))\n\n # Now reverse merge r3 into A/D/G/rho, give it non-conflicting local\n # mods, then merge in the 2:3 change. ### Not bothering to do the\n # whole expected_foo routine for these intermediate operations;\n # they're not what we're here to test, after all, so it's enough to\n # know that they worked. Is this a bad practice? ###\n #\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[-3]],\n ['G ' + other_rho_path + '\\n',\n ' G ' + other_rho_path + '\\n',]),\n [], 'merge', '-c-3',\n sbox.repo_url + '/A/D/G/rho',\n other_rho_path)\n\n # Now *prepend* ten or so lines to A/D/G/rho. Since rho had ten\n # lines appended in revision 2, and then another ten in revision 3,\n # these new local mods will be separated from the rev 3 changes by\n # enough distance that they won't conflict, so the merge should be\n # clean.\n other_rho_text = \"\"\n for x in range(1,10):\n other_rho_text = other_rho_text + 'Unobtrusive line ' + repr(x) + ' in rho\\n'\n current_other_rho_text = open(other_rho_path).read()\n svntest.main.file_write(other_rho_path,\n other_rho_text + current_other_rho_text)\n\n # We expect no merge attempt for pi and tau because they inherit\n # mergeinfo from the WC root. There is explicit mergeinfo on rho\n # ('/A/D/G/rho:2') so expect it to be merged (cleanly).\n G_path = os.path.join(other_wc, 'A', 'D', 'G')\n expected_output = wc.State(os.path.join(other_wc, 'A', 'D', 'G'),\n {'rho' : Item(status='G ')})\n expected_mergeinfo_output = wc.State(G_path, {\n '' : Item(status=' G'),\n 'rho' : Item(status=' G')\n })\n expected_elision_output = wc.State(G_path, {\n '' : Item(status=' U'),\n 'rho' : Item(status=' U')\n })\n expected_disk = wc.State(\"\", {\n 'pi' : Item(\"This is the file 'pi'.\\n\"),\n 'rho' : Item(\"This is the file 'rho'.\\n\"),\n 'tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_disk.tweak('rho',\n contents=other_rho_text\n + expected_disk.desc['rho'].contents\n + rho_text\n + additional_rho_text)\n expected_disk.tweak('pi',\n contents=expected_disk.desc['pi'].contents\n + pi_text)\n\n expected_status = wc.State(os.path.join(other_wc, 'A', 'D', 'G'),\n { '' : Item(wc_rev=1, status=' '),\n 'rho' : Item(wc_rev=1, status='M '),\n 'pi' : Item(wc_rev=1, status='M '),\n 'tau' : Item(wc_rev=1, status='C '),\n })\n\n inject_conflict_into_expected_state('tau', expected_disk, expected_status,\n other_tau_text, tau_text, 1, 3)\n\n # Do the merge, but check svn:mergeinfo props separately since\n # run_and_verify_merge would attempt to proplist tau's conflict\n # files if we asked it to check props.\n svntest.actions.run_and_verify_merge(\n os.path.join(other_wc, 'A', 'D', 'G'),\n '2', '3',\n sbox.repo_url + '/A/D/G', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n extra_files=list(tau_conflict_support_files))\n\n\n svntest.actions.run_and_verify_svn([], '.*W200017: Property.*not found',\n 'propget', SVN_PROP_MERGEINFO,\n os.path.join(other_wc,\n \"A\", \"D\", \"G\", \"rho\"))\n\n\n#----------------------------------------------------------------------\n# Merge should copy-with-history when adding files or directories\n@SkipUnless(server_has_mergeinfo)\ndef add_with_history(sbox):\n \"merge and add new files/dirs with history\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n C_path = sbox.ospath('A/C')\n F_path = sbox.ospath('A/B/F')\n F_url = sbox.repo_url + '/A/B/F'\n\n Q_path = os.path.join(F_path, 'Q')\n Q2_path = os.path.join(F_path, 'Q2')\n foo_path = os.path.join(F_path, 'foo')\n foo2_path = os.path.join(F_path, 'foo2')\n bar_path = os.path.join(F_path, 'Q', 'bar')\n bar2_path = os.path.join(F_path, 'Q', 'bar2')\n\n svntest.main.run_svn(None, 'mkdir', Q_path)\n svntest.main.run_svn(None, 'mkdir', Q2_path)\n svntest.main.file_append(foo_path, \"foo\")\n svntest.main.file_append(foo2_path, \"foo2\")\n svntest.main.file_append(bar_path, \"bar\")\n svntest.main.file_append(bar2_path, \"bar2\")\n svntest.main.run_svn(None, 'add', foo_path, foo2_path, bar_path, bar2_path)\n svntest.main.run_svn(None, 'propset', 'x', 'x', Q2_path)\n svntest.main.run_svn(None, 'propset', 'y', 'y', foo2_path)\n svntest.main.run_svn(None, 'propset', 'z', 'z', bar2_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/F/Q' : Item(verb='Adding'),\n 'A/B/F/Q2' : Item(verb='Adding'),\n 'A/B/F/Q/bar' : Item(verb='Adding'),\n 'A/B/F/Q/bar2': Item(verb='Adding'),\n 'A/B/F/foo' : Item(verb='Adding'),\n 'A/B/F/foo2' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/Q' : Item(status=' ', wc_rev=2),\n 'A/B/F/Q2' : Item(status=' ', wc_rev=2),\n 'A/B/F/Q/bar' : Item(status=' ', wc_rev=2),\n 'A/B/F/Q/bar2': Item(status=' ', wc_rev=2),\n 'A/B/F/foo' : Item(status=' ', wc_rev=2),\n 'A/B/F/foo2' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n expected_output = wc.State(C_path, {\n 'Q' : Item(status='A '),\n 'Q2' : Item(status='A '),\n 'Q/bar' : Item(status='A '),\n 'Q/bar2' : Item(status='A '),\n 'foo' : Item(status='A '),\n 'foo2' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(C_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(C_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/F:2'}),\n 'Q' : Item(),\n 'Q2' : Item(props={'x' : 'x'}),\n 'Q/bar' : Item(\"bar\"),\n 'Q/bar2' : Item(\"bar2\", props={'z' : 'z'}),\n 'foo' : Item(\"foo\"),\n 'foo2' : Item(\"foo2\", props={'y' : 'y'}),\n })\n expected_status = wc.State(C_path, {\n '' : Item(status=' M', wc_rev=1),\n 'Q' : Item(status='A ', wc_rev='-', copied='+'),\n 'Q2' : Item(status='A ', wc_rev='-', copied='+'),\n 'Q/bar' : Item(status=' ', wc_rev='-', copied='+'),\n 'Q/bar2' : Item(status=' ', wc_rev='-', copied='+'),\n 'foo' : Item(status='A ', wc_rev='-', copied='+'),\n 'foo2' : Item(status='A ', wc_rev='-', copied='+'),\n })\n\n expected_skip = wc.State(C_path, { })\n\n # Add some unversioned directory obstructions to the incoming\n # additions. This should be tolerated and *not* result in any\n # difference between the --dry-run and actual merge.\n # See http://svn.haxx.se/dev/archive-2012-11/0696.shtml\n os.mkdir(sbox.ospath('A/C/Q'))\n os.mkdir(sbox.ospath('A/C/Q2'))\n\n svntest.actions.run_and_verify_merge(C_path, '1', '2', F_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/C' : Item(verb='Sending'),\n 'A/C/Q' : Item(verb='Adding'),\n 'A/C/Q2' : Item(verb='Adding'),\n 'A/C/foo' : Item(verb='Adding'),\n 'A/C/foo2' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/C' : Item(status=' ', wc_rev=3),\n 'A/B/F/Q' : Item(status=' ', wc_rev=2),\n 'A/B/F/Q2' : Item(status=' ', wc_rev=2),\n 'A/B/F/Q/bar' : Item(status=' ', wc_rev=2),\n 'A/B/F/Q/bar2': Item(status=' ', wc_rev=2),\n 'A/B/F/foo' : Item(status=' ', wc_rev=2),\n 'A/B/F/foo2' : Item(status=' ', wc_rev=2),\n 'A/C/Q' : Item(status=' ', wc_rev=3),\n 'A/C/Q2' : Item(status=' ', wc_rev=3),\n 'A/C/Q/bar' : Item(status=' ', wc_rev=3),\n 'A/C/Q/bar2' : Item(status=' ', wc_rev=3),\n 'A/C/foo' : Item(status=' ', wc_rev=3),\n 'A/C/foo2' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n#----------------------------------------------------------------------\n# Issue 953\n@SkipUnless(server_has_mergeinfo)\n@Issue(953)\ndef simple_property_merges(sbox):\n \"some simple property merges\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Add a property to a file and a directory\n alpha_path = sbox.ospath('A/B/E/alpha')\n beta_path = sbox.ospath('A/B/E/beta')\n E_path = sbox.ospath('A/B/E')\n\n svntest.actions.set_prop('foo', 'foo_val', alpha_path)\n # A binary, non-UTF8 property value\n svntest.actions.set_prop('foo', b'foo\\201val', beta_path)\n svntest.actions.set_prop('foo', 'foo_val', E_path)\n\n # Commit change as rev 2\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E' : Item(verb='Sending'),\n 'A/B/E/alpha' : Item(verb='Sending'),\n 'A/B/E/beta' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/B/E', 'A/B/E/alpha', 'A/B/E/beta',\n wc_rev=2, status=' ')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Copy B to B2 as rev 3\n B_url = sbox.repo_url + '/A/B'\n B2_url = sbox.repo_url + '/A/B2'\n\n svntest.actions.run_and_verify_svn(None, [],\n 'copy', '-m', 'copy B to B2',\n B_url, B2_url)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Modify a property and add a property for the file and directory\n svntest.actions.set_prop('foo', 'mod_foo', alpha_path)\n svntest.actions.set_prop('bar', 'bar_val', alpha_path)\n svntest.actions.set_prop('foo', b'mod\\201foo', beta_path)\n svntest.actions.set_prop('bar', b'bar\\201val', beta_path)\n svntest.actions.set_prop('foo', 'mod_foo', E_path)\n svntest.actions.set_prop('bar', 'bar_val', E_path)\n\n # Commit change as rev 4\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_status.tweak('A/B/E', 'A/B/E/alpha', 'A/B/E/beta',\n wc_rev=4, status=' ')\n expected_status.add({\n 'A/B2' : Item(status=' ', wc_rev=3),\n 'A/B2/E' : Item(status=' ', wc_rev=3),\n 'A/B2/E/alpha' : Item(status=' ', wc_rev=3),\n 'A/B2/E/beta' : Item(status=' ', wc_rev=3),\n 'A/B2/F' : Item(status=' ', wc_rev=3),\n 'A/B2/lambda' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n pristine_status = expected_status\n pristine_status.tweak(wc_rev=4)\n\n # Merge B 3:4 into B2\n B2_path = sbox.ospath('A/B2')\n expected_output = wc.State(B2_path, {\n 'E' : Item(status=' U'),\n 'E/alpha' : Item(status=' U'),\n 'E/beta' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(B2_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(B2_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:4'}),\n 'E' : Item(),\n 'E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F' : Item(),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\"),\n })\n expected_disk.tweak('E', 'E/alpha',\n props={'foo' : 'mod_foo', 'bar' : 'bar_val'})\n expected_disk.tweak('E/beta',\n props={'foo' : b'mod\\201foo', 'bar' : b'bar\\201val'})\n expected_status = wc.State(B2_path, {\n '' : Item(status=' M'),\n 'E' : Item(status=' M'),\n 'E/alpha' : Item(status=' M'),\n 'E/beta' : Item(status=' M'),\n 'F' : Item(status=' '),\n 'lambda' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=4)\n expected_skip = wc.State('', { })\n svntest.actions.run_and_verify_merge(B2_path, '3', '4', B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Revert merge\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', wc_dir)\n svntest.actions.run_and_verify_status(wc_dir, pristine_status)\n\n # Merge B 2:1 into B2 (B2's mergeinfo should get elided away)\n expected_status.tweak('', status=' ')\n expected_disk.remove('')\n expected_disk.tweak('E', 'E/alpha', 'E/beta', props={})\n expected_elision_output = wc.State(B2_path, {\n '' : Item(status=' U'),\n })\n svntest.actions.run_and_verify_merge(B2_path, '2', '1', B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n def error_message(property, old_value, new_value):\n return \"Trying to change property '%s'\\n\" \\\n \"but the property has been locally deleted.\\n\" \\\n \"<<<<<<< (local property value)\\n\" \\\n \"||||||| (incoming 'changed from' value)\\n\" \\\n \"%s=======\\n\" \\\n \"%s>>>>>>> (incoming 'changed to' value)\\n\" % (property, old_value, new_value)\n\n # Merge B 3:4 into B2 now causes a conflict\n expected_disk.add({\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:4'}),\n 'E/dir_conflicts.prej'\n : Item(error_message('foo', 'foo_val', 'mod_foo')),\n 'E/alpha.prej'\n : Item(error_message('foo', 'foo_val', 'mod_foo')),\n 'E/beta.prej'\n : Item(error_message('foo', 'foo?\\\\81val', 'mod?\\\\81foo')),\n })\n expected_disk.tweak('E', 'E/alpha', props={'bar' : 'bar_val'})\n expected_disk.tweak('E/beta', props={'bar' : b'bar\\201val'})\n expected_status.tweak('', status=' M')\n expected_status.tweak('E', 'E/alpha', 'E/beta', status=' C')\n expected_output.tweak('E', 'E/alpha', 'E/beta', status=' C')\n expected_elision_output = wc.State(B2_path, {\n })\n svntest.actions.run_and_verify_merge(B2_path, '3', '4', B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # issue 1109 : single file property merge. This test performs a merge\n # that should be a no-op (adding properties that are already present).\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', wc_dir)\n svntest.actions.run_and_verify_status(wc_dir, pristine_status)\n\n # Copy A at rev 4 to A2 to make revision 5.\n A_url = sbox.repo_url + '/A'\n A2_url = sbox.repo_url + '/A2'\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 5.\\n'], [],\n 'copy', '-m', 'copy A to A2',\n A_url, A2_url)\n\n # Re-root the WC at A2.\n svntest.main.safe_rmtree(wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'checkout',\n A2_url, wc_dir)\n\n # Attempt to re-merge rev 4 of the original A's alpha. Mergeinfo\n # inherited from A2 (created by its copy from A) allows us to avoid\n # a repeated merge.\n alpha_url = sbox.repo_url + '/A/B/E/alpha'\n alpha_path = sbox.ospath('B/E/alpha')\n\n # Cannot use run_and_verify_merge with a file target\n svntest.actions.run_and_verify_svn([], [], 'merge', '-r', '3:4',\n alpha_url, alpha_path)\n\n exit_code, output, err = svntest.actions.run_and_verify_svn(None, [],\n 'pl', alpha_path)\n\n saw_foo = 0\n saw_bar = 0\n for line in output:\n if re.match(\"\\\\s*foo\\\\s*$\", line):\n saw_foo = 1\n if re.match(\"\\\\s*bar\\\\s*$\", line):\n saw_bar = 1\n\n if not saw_foo or not saw_bar:\n raise svntest.Failure(\"Expected properties not found\")\n\n#----------------------------------------------------------------------\n# This is a regression for issue #1176.\n@Issue(1176)\ndef merge_similar_unrelated_trees(sbox):\n \"merging similar trees ancestrally unrelated\"\n\n ## See https://issues.apache.org/jira/browse/SVN-1249. ##\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Simple test. Make three directories with the same content.\n # Modify some stuff in the second one. Now merge\n # (firstdir:seconddir->thirddir).\n\n base1_path = sbox.ospath('base1')\n base2_path = sbox.ospath('base2')\n apply_path = sbox.ospath('apply')\n\n base1_url = os.path.join(sbox.repo_url + '/base1')\n base2_url = os.path.join(sbox.repo_url + '/base2')\n\n # Make a tree of stuff ...\n os.mkdir(base1_path)\n svntest.main.file_append(os.path.join(base1_path, 'iota'),\n \"This is the file iota\\n\")\n os.mkdir(os.path.join(base1_path, 'A'))\n svntest.main.file_append(os.path.join(base1_path, 'A', 'mu'),\n \"This is the file mu\\n\")\n os.mkdir(os.path.join(base1_path, 'A', 'B'))\n svntest.main.file_append(os.path.join(base1_path, 'A', 'B', 'alpha'),\n \"This is the file alpha\\n\")\n svntest.main.file_append(os.path.join(base1_path, 'A', 'B', 'beta'),\n \"This is the file beta\\n\")\n\n # ... Copy it twice ...\n shutil.copytree(base1_path, base2_path)\n shutil.copytree(base1_path, apply_path)\n\n # ... Gonna see if merge is naughty or nice!\n svntest.main.file_append(os.path.join(base2_path, 'A', 'mu'),\n \"A new line in mu.\\n\")\n os.rename(os.path.join(base2_path, 'A', 'B', 'beta'),\n os.path.join(base2_path, 'A', 'B', 'zeta'))\n\n svntest.actions.run_and_verify_svn(None, [],\n 'add', base1_path, base2_path, apply_path)\n\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'rev 2', wc_dir)\n\n expected_output = wc.State(apply_path, {\n 'A/mu' : Item(status='U '),\n 'A/B/zeta' : Item(status='A '),\n 'A/B/beta' : Item(status='D '),\n })\n\n # run_and_verify_merge doesn't support 'svn merge URL URL path'\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(None, [],\n 'merge',\n '--ignore-ancestry',\n base1_url, base2_url,\n apply_path)\n\n expected_status = wc.State(apply_path, {\n '' : Item(status=' '),\n 'A' : Item(status=' '),\n 'A/mu' : Item(status='M '),\n 'A/B' : Item(status=' '),\n 'A/B/zeta' : Item(status='A ', copied='+'),\n 'A/B/alpha' : Item(status=' '),\n 'A/B/beta' : Item(status='D '),\n 'iota' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=2)\n expected_status.tweak('A/B/zeta', wc_rev='-')\n svntest.actions.run_and_verify_status(apply_path, expected_status)\n\n#----------------------------------------------------------------------\ndef merge_one_file_helper(sbox, arg_flav, record_only = 0):\n \"\"\"ARG_FLAV is one of 'r' (revision range) or 'c' (single change) or\n '*' (no revision specified).\"\"\"\n\n if arg_flav not in ('r', 'c', '*'):\n raise svntest.Failure(\"Unrecognized flavor of merge argument\")\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n rho_rel_path = os.path.join('A', 'D', 'G', 'rho')\n rho_path = os.path.join(wc_dir, rho_rel_path)\n G_path = sbox.ospath('A/D/G')\n rho_url = sbox.repo_url + '/A/D/G/rho'\n\n # Change rho for revision 2\n svntest.main.file_append(rho_path, 'A new line in rho.\\n')\n\n expected_output = wc.State(wc_dir, { rho_rel_path : Item(verb='Sending'), })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/D/G/rho', wc_rev=2)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Backdate rho to revision 1, so we can merge in the rev 2 changes.\n svntest.actions.run_and_verify_svn(None, [],\n 'up', '-r', '1', rho_path)\n\n # Try one merge with an explicit target; it should succeed.\n ### Yes, it would be nice to use run_and_verify_merge(), but it\n # appears to be impossible to get the expected_foo trees working\n # right. I think something is still assuming a directory target.\n if arg_flav == 'r':\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[2]],\n ['U ' + rho_path + '\\n',\n ' U ' + rho_path + '\\n']),\n [], 'merge', '-r', '1:2', rho_url, rho_path)\n elif arg_flav == 'c':\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[2]],\n ['U ' + rho_path + '\\n',\n ' U ' + rho_path + '\\n']),\n [], 'merge', '-c', '2', rho_url, rho_path)\n elif arg_flav == '*':\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[2]],\n ['U ' + rho_path + '\\n',\n ' U ' + rho_path + '\\n']),\n [], 'merge', rho_url, rho_path)\n\n expected_status.tweak(wc_rev=1)\n expected_status.tweak('A/D/G/rho', status='MM')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n # Inspect rho, make sure it's right.\n rho_text = svntest.tree.get_text(rho_path)\n if rho_text != \"This is the file 'rho'.\\nA new line in rho.\\n\":\n raise svntest.Failure(\"Unexpected text in merged '\" + rho_path + \"'\")\n\n # Restore rho to pristine revision 1, for another merge.\n svntest.actions.run_and_verify_svn(None, [], 'revert', rho_path)\n expected_status.tweak('A/D/G/rho', status=' ')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n # Cd into the directory and run merge with no targets.\n # It should still merge into rho.\n saved_cwd = os.getcwd()\n os.chdir(G_path)\n\n # Cannot use run_and_verify_merge with a file target\n merge_cmd = ['merge']\n if arg_flav == 'r':\n merge_cmd += ['-r', '1:2']\n elif arg_flav == 'c':\n merge_cmd += ['-c', '2']\n\n if record_only:\n expected_output = expected_merge_output([[2]],\n [' U rho\\n'])\n merge_cmd.append('--record-only')\n rho_expected_status = ' M'\n else:\n expected_output = expected_merge_output([[2]],\n ['U rho\\n',\n ' U rho\\n'])\n rho_expected_status = 'MM'\n merge_cmd.append(rho_url)\n\n svntest.actions.run_and_verify_svn(expected_output, [], *merge_cmd)\n\n # Inspect rho, make sure it's right.\n rho_text = svntest.tree.get_text('rho')\n if record_only:\n expected_text = \"This is the file 'rho'.\\n\"\n else:\n expected_text = \"This is the file 'rho'.\\nA new line in rho.\\n\"\n if rho_text != expected_text:\n print(\"\")\n raise svntest.Failure(\"Unexpected text merged to 'rho' in '\" +\n G_path + \"'\")\n os.chdir(saved_cwd)\n\n expected_status.tweak('A/D/G/rho', status=rho_expected_status)\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(1150)\ndef merge_one_file_using_r(sbox):\n \"merge one file using the -r option\"\n merge_one_file_helper(sbox, 'r')\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(1150)\ndef merge_one_file_using_c(sbox):\n \"merge one file using the -c option\"\n merge_one_file_helper(sbox, 'c')\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_one_file_using_implicit_revs(sbox):\n \"merge one file without explicit revisions\"\n merge_one_file_helper(sbox, '*')\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_record_only(sbox):\n \"mark a revision range as merged\"\n merge_one_file_helper(sbox, 'r', 1)\n\n#----------------------------------------------------------------------\n# This is a regression test for the enhancement added in issue #785 \"add\n# friendly enhancement to 'svn merge'\", which is about inferring that\n# the default target of \"svn merge [-r...] FILE\" should not be \".\" but\n# rather should be \"FILE\".\ndef merge_with_implicit_target_helper(sbox, arg_flav):\n \"\"\"ARG_FLAV is one of 'r' (revision range) or 'c' (single change) or\n '*' (no revision specified).\"\"\"\n\n if arg_flav not in ('r', 'c', '*'):\n raise svntest.Failure(\"Unrecognized flavor of merge argument\")\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Change mu for revision 2\n mu_path = sbox.ospath('A/mu')\n orig_mu_text = svntest.tree.get_text(mu_path)\n added_mu_text = \"\"\n for x in range(2,11):\n added_mu_text = added_mu_text + 'This is line ' + repr(x) + ' in mu\\n'\n svntest.main.file_append(mu_path, added_mu_text)\n\n # Create expected output tree for initial commit\n expected_output = wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n })\n\n # Create expected status tree; all local revisions should be at 1,\n # but mu should be at revision 2.\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=2)\n\n # Initial commit.\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Make the \"other\" working copy, at r1\n other_wc = sbox.add_wc_path('other')\n svntest.actions.duplicate_dir(wc_dir, other_wc)\n svntest.main.run_svn(None, 'up', '-r', 1, other_wc)\n\n # Try the merge without an explicit target; it should succeed.\n # Can't use run_and_verify_merge cuz it expects a directory argument.\n mu_url = sbox.repo_url + '/A/mu'\n\n os.chdir(os.path.join(other_wc, 'A'))\n\n # merge using filename for sourcepath\n # Cannot use run_and_verify_merge with a file target\n if arg_flav == 'r':\n svntest.actions.run_and_verify_svn(expected_merge_output([[2]],\n ['U mu\\n',\n ' U mu\\n']),\n [],\n 'merge', '-r', '1:2', 'mu')\n elif arg_flav == 'c':\n svntest.actions.run_and_verify_svn(expected_merge_output([[2]],\n ['U mu\\n',\n ' U mu\\n']),\n [],\n 'merge', '-c', '2', 'mu')\n\n elif arg_flav == '*':\n # Without a peg revision, the default merge range of BASE:1 (which\n # is a no-op) will be chosen. Let's do it both ways (no-op first,\n # of course).\n svntest.actions.run_and_verify_svn(None, [], 'merge', 'mu')\n svntest.actions.run_and_verify_svn(expected_merge_output([[2]],\n ['U mu\\n',\n ' U mu\\n']),\n [],\n 'merge', 'mu@2')\n\n # sanity-check resulting file\n if svntest.tree.get_text('mu') != orig_mu_text + added_mu_text:\n raise svntest.Failure(\"Unexpected text in 'mu'\")\n\n # merge using URL for sourcepath\n if arg_flav == 'r':\n svntest.actions.run_and_verify_svn(expected_merge_output([[-2]],\n ['G mu\\n',\n ' U mu\\n',\n ' G mu\\n',],\n elides=True),\n [],\n 'merge', '-r', '2:1', mu_url)\n elif arg_flav == 'c':\n svntest.actions.run_and_verify_svn(expected_merge_output([[-2]],\n ['G mu\\n',\n ' U mu\\n',\n ' G mu\\n'],\n elides=True),\n [],\n 'merge', '-c', '-2', mu_url)\n elif arg_flav == '*':\n # Implicit merge source URL and revision range detection is for\n # forward merges only (e.g. non-reverts). Undo application of\n # r2 to enable continuation of the test case.\n svntest.actions.run_and_verify_svn(expected_merge_output([[-2]],\n ['G mu\\n',\n ' U mu\\n',\n ' G mu\\n'],\n elides=True),\n [],\n 'merge', '-c', '-2', mu_url)\n\n # sanity-check resulting file\n if svntest.tree.get_text('mu') != orig_mu_text:\n raise svntest.Failure(\"Unexpected text '%s' in 'mu', expected '%s'\" %\n (svntest.tree.get_text('mu'), orig_mu_text))\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(785)\ndef merge_with_implicit_target_using_r(sbox):\n \"merging a file w/no explicit target path using -r\"\n merge_with_implicit_target_helper(sbox, 'r')\n\n#----------------------------------------------------------------------\n@Issue(785)\ndef merge_with_implicit_target_using_c(sbox):\n \"merging a file w/no explicit target path using -c\"\n merge_with_implicit_target_helper(sbox, 'c')\n\n#----------------------------------------------------------------------\n@Issue(785)\ndef merge_with_implicit_target_and_revs(sbox):\n \"merging a file w/no explicit target path or revs\"\n merge_with_implicit_target_helper(sbox, '*')\n\n#----------------------------------------------------------------------\ndef merge_with_prev(sbox):\n \"merge operations using PREV revision\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Change mu for revision 2\n mu_path = sbox.ospath('A/mu')\n orig_mu_text = svntest.tree.get_text(mu_path)\n added_mu_text = \"\"\n for x in range(2,11):\n added_mu_text = added_mu_text + '\\nThis is line ' + repr(x) + ' in mu'\n added_mu_text += \"\\n\"\n svntest.main.file_append(mu_path, added_mu_text)\n\n zot_path = sbox.ospath('A/zot')\n\n svntest.main.file_append(zot_path, \"bar\")\n svntest.main.run_svn(None, 'add', zot_path)\n\n # Create expected output tree for initial commit\n expected_output = wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n 'A/zot' : Item(verb='Adding'),\n })\n\n # Create expected status tree; all local revisions should be at 1,\n # but mu should be at revision 2.\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=2)\n expected_status.add({'A/zot' : Item(status=' ', wc_rev=2)})\n\n # Initial commit.\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Make some other working copies\n other_wc = sbox.add_wc_path('other')\n svntest.actions.duplicate_dir(wc_dir, other_wc)\n\n another_wc = sbox.add_wc_path('another')\n svntest.actions.duplicate_dir(wc_dir, another_wc)\n\n was_cwd = os.getcwd()\n\n os.chdir(os.path.join(other_wc, 'A'))\n\n # Try to revert the last change to mu via svn merge\n # Cannot use run_and_verify_merge with a file target\n svntest.actions.run_and_verify_svn(expected_merge_output([[-2]],\n ['U mu\\n',\n ' U mu\\n'],\n elides=True),\n [],\n 'merge', '-r', 'HEAD:PREV', 'mu')\n\n # sanity-check resulting file\n if svntest.tree.get_text('mu') != orig_mu_text:\n raise svntest.Failure(\"Unexpected text in 'mu'\")\n\n os.chdir(was_cwd)\n\n other_status = expected_status\n other_status.wc_dir = other_wc\n other_status.tweak('A/mu', status='M ', wc_rev=2)\n other_status.tweak('A/zot', wc_rev=2)\n svntest.actions.run_and_verify_status(other_wc, other_status)\n\n os.chdir(another_wc)\n\n # ensure 'A' will be at revision 2\n svntest.actions.run_and_verify_svn(None, [], 'up')\n\n # now try a revert on a directory, and verify that it removed the zot\n # file we had added previously\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-r', 'COMMITTED:PREV',\n 'A', 'A')\n\n if svntest.tree.get_text('A/zot') != None:\n raise svntest.Failure(\"Unexpected text in 'A/zot'\")\n\n os.chdir(was_cwd)\n\n another_status = expected_status\n another_status.wc_dir = another_wc\n another_status.tweak(wc_rev=2)\n another_status.tweak('A/mu', status='M ')\n another_status.tweak('A/zot', status='D ')\n svntest.actions.run_and_verify_status(another_wc, another_status)\n\n#----------------------------------------------------------------------\n# Regression test for issue #1319: 'svn merge' should *not* 'C' when\n# merging a change into a binary file, unless it has local mods, or has\n# different contents from the left side of the merge.\n@SkipUnless(server_has_mergeinfo)\n@Issue(1319)\ndef merge_binary_file(sbox):\n \"merge change into unchanged binary file\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Add a binary file to the project\n theta_contents = open(os.path.join(sys.path[0], \"theta.bin\"), 'rb').read()\n # Write PNG file data into 'A/theta'.\n theta_path = sbox.ospath('A/theta')\n svntest.main.file_write(theta_path, theta_contents, 'wb')\n\n svntest.main.run_svn(None, 'add', theta_path)\n\n # Commit the new binary file, creating revision 2.\n expected_output = svntest.wc.State(wc_dir, {\n 'A/theta' : Item(verb='Adding (bin)'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/theta' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Make the \"other\" working copy\n other_wc = sbox.add_wc_path('other')\n svntest.actions.duplicate_dir(wc_dir, other_wc)\n\n # Change the binary file in first working copy, commit revision 3.\n svntest.main.file_append(theta_path, \"some extra junk\")\n expected_output = wc.State(wc_dir, {\n 'A/theta' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/theta' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # In second working copy, attempt to 'svn merge -r 2:3'.\n # We should *not* see a conflict during the update, but a 'U'.\n # And after the merge, the status should be 'M'.\n expected_output = wc.State(other_wc, {\n 'A/theta' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(other_wc, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(other_wc, {\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n '' : Item(props={SVN_PROP_MERGEINFO : '/:3'}),\n 'A/theta' : Item(theta_contents + b\"some extra junk\",\n props={'svn:mime-type' : 'application/octet-stream'}),\n })\n expected_status = svntest.actions.get_virginal_state(other_wc, 1)\n expected_status.add({\n '' : Item(status=' M', wc_rev=1),\n 'A/theta' : Item(status='M ', wc_rev=2),\n })\n expected_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge(other_wc, '2', '3',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [],\n True, True, '--allow-mixed-revisions',\n other_wc)\n\n#----------------------------------------------------------------------\n# Regression test for Issue #1297:\n# A merge that creates a new file followed by an immediate diff\n# The diff should succeed.\n@SkipUnless(server_has_mergeinfo)\n@Issue(1297)\ndef merge_in_new_file_and_diff(sbox):\n \"diff after merge that creates a new file\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n trunk_url = sbox.repo_url + '/A/B/E'\n\n # Create a branch\n svntest.actions.run_and_verify_svn(None, [], 'cp',\n trunk_url,\n sbox.repo_url + '/branch',\n '-m', \"Creating the Branch\")\n\n # Update to revision 2.\n svntest.actions.run_and_verify_svn(None, [],\n 'update', wc_dir)\n\n new_file_path = sbox.ospath('A/B/E/newfile')\n svntest.main.file_write(new_file_path, \"newfile\\n\")\n\n # Add the new file, and commit revision 3.\n svntest.actions.run_and_verify_svn(None, [], \"add\", new_file_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m',\n \"Changing the trunk.\", wc_dir)\n\n branch_path = sbox.ospath('branch')\n url_branch_path = branch_path.replace(os.path.sep, '/')\n\n # Merge our addition into the branch.\n expected_output = svntest.wc.State(branch_path, {\n 'newfile' : Item(status='A '),\n })\n expected_mergeinfo_output = svntest.wc.State(branch_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(branch_path, {\n })\n expected_disk = wc.State('', {\n 'alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'beta' : Item(\"This is the file 'beta'.\\n\"),\n 'newfile' : Item(\"newfile\\n\"),\n })\n expected_status = wc.State(branch_path, {\n '' : Item(status=' M', wc_rev=2),\n 'alpha' : Item(status=' ', wc_rev=2),\n 'beta' : Item(status=' ', wc_rev=2),\n 'newfile' : Item(status='A ', wc_rev='-', copied='+')\n })\n expected_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge(branch_path,\n '1', 'HEAD', trunk_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip)\n\n # Finally, run diff.\n expected_output = [\n \"Index: \" + url_branch_path + \"/newfile\\n\",\n \"===================================================================\\n\",\n \"--- \"+ url_branch_path + \"/newfile\t(nonexistent)\\n\",\n \"+++ \"+ url_branch_path + \"/newfile\t(working copy)\\n\",\n \"@@ -0,0 +1 @@\\n\",\n \"+newfile\\n\",\n\n \"Index: \" + url_branch_path + \"\\n\",\n \"===================================================================\\n\",\n \"--- \"+ url_branch_path + \"\\t(revision 2)\\n\",\n \"+++ \"+ url_branch_path + \"\\t(working copy)\\n\",\n \"\\n\",\n \"Property changes on: \" + url_branch_path + \"\\n\",\n \"___________________________________________________________________\\n\",\n \"Added: \" + SVN_PROP_MERGEINFO + \"\\n\",\n \"## -0,0 +0,1 ##\\n\",\n \" Merged /A/B/E:r2-3\\n\",\n ]\n svntest.actions.run_and_verify_svn(expected_output, [], 'diff',\n '--show-copies-as-adds', branch_path)\n\n\n#----------------------------------------------------------------------\n# Issue #1425: 'svn merge' should skip over any unversioned obstructions.\n# This test involves tree conflicts. - but attempting to test for\n# pre-tree-conflict behaviour\n@SkipUnless(server_has_mergeinfo)\n@Issues(1425, 2898)\ndef merge_skips_obstructions(sbox):\n \"merge should skip over unversioned obstructions\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n C_path = sbox.ospath('A/C')\n F_path = sbox.ospath('A/B/F')\n F_url = sbox.repo_url + '/A/B/F'\n\n Q_path = os.path.join(F_path, 'Q')\n foo_path = os.path.join(F_path, 'foo')\n bar_path = os.path.join(F_path, 'Q', 'bar')\n\n svntest.main.run_svn(None, 'mkdir', Q_path)\n svntest.main.file_append(foo_path, \"foo\")\n svntest.main.file_append(bar_path, \"bar\")\n svntest.main.run_svn(None, 'add', foo_path, bar_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/F/Q' : Item(verb='Adding'),\n 'A/B/F/Q/bar' : Item(verb='Adding'),\n 'A/B/F/foo' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/Q' : Item(status=' ', wc_rev=2),\n 'A/B/F/Q/bar' : Item(status=' ', wc_rev=2),\n 'A/B/F/foo' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n pre_merge_status = expected_status\n\n # Revision 2 now has A/B/F/foo, A/B/F/Q, A/B/F/Q/bar. Let's merge\n # those 'F' changes into empty dir 'C'. But first, create an\n # unversioned 'foo' within C, and make sure 'svn merge' doesn't\n # error when the addition of foo is obstructed.\n\n expected_output = wc.State(C_path, {\n 'Q' : Item(status='A '),\n 'Q/bar' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(C_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(C_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/F:2'}),\n 'Q' : Item(),\n 'Q/bar' : Item(\"bar\"),\n 'foo' : Item(\"foo\"),\n })\n expected_status = wc.State(C_path, {\n '' : Item(status=' M', wc_rev=1),\n 'Q' : Item(status='A ', wc_rev='-', copied='+'),\n 'Q/bar' : Item(status=' ', wc_rev='-', copied='+'),\n })\n expected_skip = wc.State(C_path, {\n 'foo' : Item(verb='Skipped'),\n })\n # Unversioned:\n svntest.main.file_append(os.path.join(C_path, \"foo\"), \"foo\")\n\n svntest.actions.run_and_verify_merge(C_path, '1', '2', F_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True)\n\n # Revert the local mods, and this time make \"Q\" obstructed. An\n # unversioned file called \"Q\" will obstruct the adding of the\n # directory of the same name.\n\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '-R', wc_dir)\n os.unlink(os.path.join(C_path, \"foo\"))\n svntest.main.safe_rmtree(os.path.join(C_path, \"Q\"))\n svntest.main.file_append(os.path.join(C_path, \"Q\"), \"foo\") # unversioned\n svntest.actions.run_and_verify_status(wc_dir, pre_merge_status)\n\n expected_output = wc.State(C_path, {\n 'foo' : Item(status='A '),\n 'Q/bar' : Item(status=' ', treeconflict='A'), # Skipped\n })\n expected_mergeinfo_output = wc.State(C_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(C_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/F:2'}),\n 'Q' : Item(\"foo\"),\n 'foo' : Item(\"foo\"),\n })\n expected_status = wc.State(C_path, {\n '' : Item(status=' M', wc_rev=1),\n 'foo' : Item(status='A ', wc_rev='-', copied='+'),\n })\n expected_skip = wc.State(C_path, {\n 'Q' : Item(verb='Skipped'),\n })\n\n svntest.actions.run_and_verify_merge(C_path, '1', '2', F_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True)\n\n # Revert the local mods, and commit the deletion of iota and A/D/G. (r3)\n os.unlink(os.path.join(C_path, \"foo\"))\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n svntest.actions.run_and_verify_status(wc_dir, pre_merge_status)\n\n iota_path = sbox.ospath('iota')\n G_path = sbox.ospath('A/D/G')\n svntest.actions.run_and_verify_svn(None, [], 'rm', iota_path, G_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/D/G' : Item(verb='Deleting'),\n 'iota' : Item(verb='Deleting'),\n })\n expected_status = pre_merge_status\n expected_status.remove('iota', 'A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Now create unversioned iota and A/D/G, try running a merge -r2:3.\n # The merge process should skip over these targets, since they're\n # unversioned.\n\n svntest.main.file_append(iota_path, \"foo\") # unversioned\n os.mkdir(G_path) # unversioned\n\n expected_output = wc.State(wc_dir, {\n })\n expected_mergeinfo_output = wc.State(wc_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_dir, {\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n expected_disk.add({\n '' : Item(props={SVN_PROP_MERGEINFO : '/:3'}),\n 'A/B/F/Q' : Item(),\n 'A/B/F/Q/bar' : Item(\"bar\"),\n 'A/B/F/foo' : Item(\"foo\"),\n 'A/C/Q' : Item(\"foo\"),\n })\n expected_disk.tweak('iota', contents=\"foo\")\n # No-op merge still sets mergeinfo\n expected_status.tweak('', status=' M')\n expected_skip = wc.State(wc_dir, {\n 'iota' : Item(verb='Skipped'),\n 'A/D/G' : Item(verb='Skipped'),\n })\n svntest.actions.run_and_verify_merge(wc_dir, '2', '3',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status.copy(wc_dir),\n expected_skip,\n [],\n True, False, '--allow-mixed-revisions',\n wc_dir)\n\n # Revert the local mods, and commit a change to A/B/lambda (r4), and then\n # commit the deletion of the same file. (r5)\n svntest.main.safe_rmtree(G_path)\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n expected_status.tweak('', status=' ')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n lambda_path = sbox.ospath('A/B/lambda')\n svntest.main.file_append(lambda_path, \"more text\")\n expected_output = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Sending'),\n })\n expected_status.tweak('A/B/lambda', wc_rev=4)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n svntest.actions.run_and_verify_svn(None, [], 'rm', lambda_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Deleting'),\n })\n expected_status.remove('A/B/lambda')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # lambda is gone, so create an unversioned lambda in its place.\n # Then attempt to merge -r3:4, which is a change to lambda. The merge\n # should simply skip the unversioned file.\n\n svntest.main.file_append(lambda_path, \"foo\") # unversioned\n\n expected_output = wc.State(wc_dir, { })\n expected_mergeinfo_output = wc.State(wc_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_dir, {\n })\n expected_disk.add({\n 'A/B/lambda' : Item(\"foo\"),\n })\n expected_disk.remove('A/D/G')\n expected_disk.tweak('', props={SVN_PROP_MERGEINFO : '/:4'})\n expected_skip = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Skipped'),\n })\n # No-op merge still sets mergeinfo.\n expected_status_short = expected_status.copy(wc_dir)\n expected_status_short.tweak('', status=' M')\n\n svntest.actions.run_and_verify_merge(wc_dir, '3', '4',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status_short,\n expected_skip,\n [],\n True, False, '--allow-mixed-revisions',\n wc_dir)\n\n # OK, so let's commit the new lambda (r6), and then delete the\n # working file. Then re-run the -r3:4 merge, and see how svn deals\n # with a file being under version control, but missing.\n\n svntest.actions.run_and_verify_svn(None, [], 'add', lambda_path)\n\n # Mergeinfo prop changed so update to avoid out of date error.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n expected_output = wc.State(wc_dir, {\n '' : Item(verb='Sending'),\n 'A/B/lambda' : Item(verb='Adding'),\n })\n expected_mergeinfo_output = wc.State(wc_dir, {})\n expected_elision_output = wc.State(wc_dir, {})\n expected_status.tweak(wc_rev=5)\n expected_status.add({\n 'A/B/lambda' : Item(wc_rev=6, status=' '),\n })\n expected_status.tweak('', status=' ', wc_rev=6)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n os.unlink(lambda_path)\n\n expected_output = wc.State(wc_dir, { })\n expected_disk.remove('A/B/lambda')\n expected_status.tweak('A/B/lambda', status='! ')\n expected_status.tweak('', status=' ')\n expected_skip = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Skipped missing target'),\n })\n # Why do we need to --ignore-ancestry? Because the previous merge of r4,\n # despite being inoperative, set mergeinfo for r4 on the WC. With the\n # advent of merge tracking this repeat merge attempt would not be attempted.\n # By using --ignore-ancestry we disregard the mergeinfo and *really* try to\n # merge into a missing path. This is another facet of issue #2898.\n svntest.actions.run_and_verify_merge(wc_dir, '3', '4',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status.copy(wc_dir),\n expected_skip,\n [],\n 1, 0, '--ignore-ancestry',\n '--allow-mixed-revisions', wc_dir)\n\n#----------------------------------------------------------------------\n# At one time, a merge that added items with the same name as missing\n# items would attempt to add the items and fail, leaving the working\n# copy locked and broken.\n\n# This test involves tree conflicts.\n@SkipUnless(server_has_mergeinfo)\ndef merge_into_missing(sbox):\n \"merge into missing must not break working copy\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n F_path = sbox.ospath('A/B/F')\n F_url = sbox.repo_url + '/A/B/F'\n Q_path = os.path.join(F_path, 'Q')\n foo_path = os.path.join(F_path, 'foo')\n\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', Q_path)\n svntest.main.file_append(foo_path, \"foo\")\n svntest.actions.run_and_verify_svn(None, [], 'add', foo_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/F/Q' : Item(verb='Adding'),\n 'A/B/F/foo' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/Q' : Item(status=' ', wc_rev=2),\n 'A/B/F/foo' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n R_path = os.path.join(Q_path, 'R')\n bar_path = os.path.join(R_path, 'bar')\n baz_path = os.path.join(Q_path, 'baz')\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', R_path)\n svntest.main.file_append(bar_path, \"bar\")\n svntest.actions.run_and_verify_svn(None, [], 'add', bar_path)\n svntest.main.file_append(baz_path, \"baz\")\n svntest.actions.run_and_verify_svn(None, [], 'add', baz_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/F/Q/R' : Item(verb='Adding'),\n 'A/B/F/Q/R/bar' : Item(verb='Adding'),\n 'A/B/F/Q/baz' : Item(verb='Adding'),\n })\n expected_status.add({\n 'A/B/F/Q/R' : Item(status=' ', wc_rev=3),\n 'A/B/F/Q/R/bar' : Item(status=' ', wc_rev=3),\n 'A/B/F/Q/baz' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n os.unlink(foo_path)\n svntest.main.safe_rmtree(Q_path)\n\n expected_output = wc.State(F_path, {\n })\n expected_mergeinfo_output = wc.State(F_path, {\n })\n expected_elision_output = wc.State(F_path, {\n })\n expected_disk = wc.State('', {\n })\n expected_status = wc.State(F_path, {\n '' : Item(status=' ', wc_rev=1),\n 'foo' : Item(status='! ', wc_rev=2),\n 'Q' : Item(status='! ', wc_rev=2),\n # Missing data still available\n 'Q/R' : Item(status='! ', wc_rev=3),\n 'Q/R/bar' : Item(status='! ', wc_rev=3),\n 'Q/baz' : Item(status='! ', wc_rev=3),\n })\n expected_skip = wc.State(F_path, {\n 'Q' : Item(verb='Skipped missing target'),\n 'foo' : Item(verb='Skipped missing target'),\n })\n # Use --ignore-ancestry because merge tracking aware merges raise an\n # error when the merge target is missing subtrees due to OS-level\n # deletes.\n\n ### Need to real and dry-run separately since real merge notifies Q\n ### twice!\n svntest.actions.run_and_verify_merge(F_path, '1', '2', F_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], False, False,\n '--dry-run',\n '--ignore-ancestry',\n '--allow-mixed-revisions',\n F_path)\n\n expected_status = wc.State(F_path, {\n '' : Item(status=' ', wc_rev=1),\n 'foo' : Item(status='! ', wc_rev=2),\n 'Q' : Item(status='! ', wc_rev='2'),\n # Revision is known and we can record mergeinfo\n 'Q/R' : Item(status='! ', wc_rev='3'),\n 'Q/R/bar' : Item(status='! ', wc_rev='3'),\n 'Q/baz' : Item(status='! ', wc_rev='3'),\n })\n expected_mergeinfo_output = wc.State(F_path, {\n })\n\n svntest.actions.run_and_verify_merge(F_path, '1', '2', F_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], False, False,\n '--ignore-ancestry',\n '--allow-mixed-revisions',\n F_path)\n\n # This merge fails when it attempts to descend into the missing\n # directory. That's OK, there is no real need to support merge into\n # an incomplete working copy, so long as when it fails it doesn't\n # break the working copy.\n svntest.main.run_svn('Working copy not locked',\n 'merge', '-r1:3', '--dry-run', F_url, F_path)\n\n svntest.main.run_svn('Working copy not locked',\n 'merge', '-r1:3', F_url, F_path)\n\n # Check working copy is not locked.\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F' : Item(status=' ', wc_rev=1),\n 'A/B/F/foo' : Item(status='! ', wc_rev=2),\n 'A/B/F/Q' : Item(status='! ', wc_rev=2),\n 'A/B/F/Q/baz' : Item(status='! ', wc_rev='3'),\n 'A/B/F/Q/R' : Item(status='! ', wc_rev='3'),\n 'A/B/F/Q/R/bar' : Item(status='! ', wc_rev='3'),\n })\n\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n#----------------------------------------------------------------------\n# A test for issue 1738\n@Issue(1738)\n@SkipUnless(server_has_mergeinfo)\ndef dry_run_adds_file_with_prop(sbox):\n \"merge --dry-run adding a new file with props\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Commit a new file which has a property.\n zig_path = sbox.ospath('A/B/E/zig')\n svntest.main.file_append(zig_path, \"zig contents\")\n svntest.actions.run_and_verify_svn(None, [], 'add', zig_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val',\n zig_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/E/zig' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/E/zig' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Do a regular merge of that change into a different dir.\n F_path = sbox.ospath('A/B/F')\n E_url = sbox.repo_url + '/A/B/E'\n\n expected_output = wc.State(F_path, {\n 'zig' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(F_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(F_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:2'}),\n 'zig' : Item(\"zig contents\", {'foo':'foo_val'}),\n })\n expected_skip = wc.State('', { })\n expected_status = None # status is optional\n\n svntest.actions.run_and_verify_merge(F_path, '1', '2', E_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n#----------------------------------------------------------------------\n# Regression test for issue #1673\n# Merge a binary file from two URL with a common ancestry\n@Issue(1673)\ndef merge_binary_with_common_ancestry(sbox):\n \"merge binary files with common ancestry\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Create the common ancestry path\n I_path = sbox.ospath('I')\n svntest.main.run_svn(None, 'mkdir', I_path)\n\n # Add a binary file to the common ancestry path\n theta_contents = open(os.path.join(sys.path[0], \"theta.bin\"), 'rb').read()\n theta_I_path = os.path.join(I_path, 'theta')\n svntest.main.file_write(theta_I_path, theta_contents, mode='wb')\n svntest.main.run_svn(None, 'add', theta_I_path)\n svntest.main.run_svn(None, 'propset', 'svn:mime-type',\n 'application/octet-stream', theta_I_path)\n\n # Commit the ancestry\n expected_output = wc.State(wc_dir, {\n 'I' : Item(verb='Adding'),\n 'I/theta' : Item(verb='Adding (bin)'),\n })\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'I' : Item(status=' ', wc_rev=2),\n 'I/theta' : Item(status=' ', wc_rev=2),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n\n # Create the first branch\n J_path = sbox.ospath('J')\n svntest.main.run_svn(None, 'copy', I_path, J_path)\n\n # Commit the first branch\n expected_output = wc.State(wc_dir, {\n 'J' : Item(verb='Adding'),\n })\n\n expected_status.add({\n 'J' : Item(status=' ', wc_rev=3),\n 'J/theta' : Item(status=' ', wc_rev=3),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n\n # Create the path where the files will be merged\n K_path = sbox.ospath('K')\n svntest.main.run_svn(None, 'mkdir', K_path)\n\n # Commit the new path\n expected_output = wc.State(wc_dir, {\n 'K' : Item(verb='Adding'),\n })\n\n expected_status.add({\n 'K' : Item(status=' ', wc_rev=4),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n\n # Copy 'I/theta' to 'K/'. This file will be merged later.\n theta_K_path = os.path.join(K_path, 'theta')\n svntest.main.run_svn(None, 'copy', theta_I_path, theta_K_path)\n\n # Commit the new file\n expected_output = wc.State(wc_dir, {\n 'K/theta' : Item(verb='Adding (bin)'),\n })\n\n expected_status.add({\n 'K/theta' : Item(status=' ', wc_rev=5),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n\n # Modify the original ancestry 'I/theta'\n svntest.main.file_append(theta_I_path, \"some extra junk\")\n\n # Commit the modification\n expected_output = wc.State(wc_dir, {\n 'I/theta' : Item(verb='Sending'),\n })\n\n expected_status.tweak('I/theta', wc_rev=6)\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n\n # Create the second branch from the modified ancestry\n L_path = sbox.ospath('L')\n svntest.main.run_svn(None, 'copy', I_path, L_path)\n\n # Commit the second branch\n expected_output = wc.State(wc_dir, {\n 'L' : Item(verb='Adding'),\n 'L/theta' : Item(verb='Replacing'),\n })\n\n expected_status.add({\n 'L' : Item(status=' ', wc_rev=7),\n 'L/theta' : Item(status=' ', wc_rev=7),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n\n # Now merge first ('J/') and second ('L/') branches into 'K/'\n saved_cwd = os.getcwd()\n\n os.chdir(K_path)\n theta_J_url = sbox.repo_url + '/J/theta'\n theta_L_url = sbox.repo_url + '/L/theta'\n svntest.actions.run_and_verify_svn(expected_merge_output(None,\n ['U theta\\n',\n ' U theta\\n',\n ' G theta\\n',],\n two_url=True),\n [],\n 'merge', theta_J_url, theta_L_url)\n os.chdir(saved_cwd)\n\n expected_status.tweak('K/theta', status='MM')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n#----------------------------------------------------------------------\n# A test for issue 1905\n@Issue(1905)\n@SkipUnless(server_has_mergeinfo)\ndef merge_funny_chars_on_path(sbox):\n \"merge with funny characters\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # In following lists: 'd' stands for directory, 'f' for file\n # targets to be added by recursive add\n add_by_add = [\n ('d', 'dir_10', 'F%lename'),\n ('d', 'dir%20', 'F lename'),\n ('d', 'dir 30', 'Filename'),\n ('d', 'dir 40', None),\n ('f', 'F lename', None),\n ]\n\n # targets to be added by 'svn mkdir' + add\n add_by_mkdir = [\n ('d', 'dir_11', 'F%lename'),\n ('d', 'dir%21', 'Filename'),\n ('d', 'dir 31', 'F lename'),\n ('d', 'dir 41', None),\n ]\n\n for target in add_by_add:\n if target[0] == 'd':\n target_dir = os.path.join(wc_dir, 'A', 'B', 'E', target[1])\n os.mkdir(target_dir)\n if target[2]:\n target_path = os.path.join(wc_dir, 'A', 'B', 'E', '%s' % target[1],\n target[2])\n svntest.main.file_append(target_path, \"%s/%s\" % (target[1], target[2]))\n svntest.actions.run_and_verify_svn(None, [], 'add', target_dir)\n elif target[0] == 'f':\n target_path = os.path.join(wc_dir, 'A', 'B', 'E', '%s' % target[1])\n svntest.main.file_append(target_path, \"%s\" % target[1])\n svntest.actions.run_and_verify_svn(None, [], 'add', target_path)\n else:\n raise svntest.Failure\n\n\n for target in add_by_mkdir:\n if target[0] == 'd':\n target_dir = os.path.join(wc_dir, 'A', 'B', 'E', target[1])\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', target_dir)\n if target[2]:\n target_path = os.path.join(wc_dir, 'A', 'B', 'E', '%s' % target[1],\n target[2])\n svntest.main.file_append(target_path, \"%s/%s\" % (target[1], target[2]))\n svntest.actions.run_and_verify_svn(None, [], 'add', target_path)\n\n expected_output_dic = {}\n expected_status_dic = {}\n\n for targets in add_by_add,add_by_mkdir:\n for target in targets:\n key = 'A/B/E/%s' % target[1]\n expected_output_dic[key] = Item(verb='Adding')\n expected_status_dic[key] = Item(status=' ', wc_rev=2)\n\n if target[2]:\n key = 'A/B/E/%s/%s' % (target[1], target[2])\n expected_output_dic[key] = Item(verb='Adding')\n expected_status_dic[key] = Item(status=' ', wc_rev=2)\n\n\n expected_output = wc.State(wc_dir, expected_output_dic)\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add(expected_status_dic)\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Do a regular merge of that change into a different dir.\n F_path = sbox.ospath('A/B/F')\n E_url = sbox.repo_url + '/A/B/E'\n\n expected_output_dic = {}\n expected_disk_dic = {}\n\n for targets in add_by_add,add_by_mkdir:\n for target in targets:\n key = '%s' % target[1]\n expected_output_dic[key] = Item(status='A ')\n if target[0] == 'd':\n expected_disk_dic[key] = Item(None, {})\n elif target[0] == 'f':\n expected_disk_dic[key] = Item(\"%s\" % target[1], {})\n else:\n raise svntest.Failure\n if target[2]:\n key = '%s/%s' % (target[1], target[2])\n expected_output_dic[key] = Item(status='A ')\n expected_disk_dic[key] = Item('%s/%s' % (target[1], target[2]), {})\n\n expected_output = wc.State(F_path, expected_output_dic)\n expected_mergeinfo_output = wc.State(F_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(F_path, {\n })\n expected_disk = wc.State('', expected_disk_dic)\n expected_skip = wc.State('', { })\n expected_status = None # status is optional\n\n svntest.actions.run_and_verify_merge(F_path, '1', '2', E_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [],\n False, # don't check props\n True) # but do a dry-run\n\n expected_output_dic = {}\n\n for targets in add_by_add,add_by_mkdir:\n for target in targets:\n key = '%s' % target[1]\n expected_output_dic[key] = Item(verb='Adding')\n\n expected_output = wc.State(F_path, expected_output_dic)\n expected_output.add({\n '' : Item(verb='Sending'),\n })\n\n svntest.actions.run_and_verify_commit(F_path,\n expected_output,\n None)\n\n#-----------------------------------------------------------------------\n# Regression test for issue #2064\n@Issue(2064)\ndef merge_keyword_expansions(sbox):\n \"merge changes to keyword expansion property\"\n\n sbox.build()\n\n wcpath = sbox.wc_dir\n tpath = os.path.join(wcpath, \"t\")\n bpath = os.path.join(wcpath, \"b\")\n t_fpath = os.path.join(tpath, 'f')\n b_fpath = os.path.join(bpath, 'f')\n\n os.mkdir(tpath)\n svntest.main.run_svn(None, \"add\", tpath)\n # Commit r2.\n svntest.actions.run_and_verify_svn(None, [],\n \"ci\", \"-m\", \"r2\", wcpath)\n\n # Copy t to b.\n svntest.main.run_svn(None, \"cp\", tpath, bpath)\n # Commit r3\n svntest.actions.run_and_verify_svn(None, [],\n \"ci\", \"-m\", \"r3\", wcpath)\n\n # Add a file to t.\n svntest.main.file_append(t_fpath, \"$Revision$\")\n svntest.actions.run_and_verify_svn(None, [],\n 'add', t_fpath)\n # Ask for keyword expansion in the file.\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'svn:keywords', 'Revision',\n t_fpath)\n # Commit r4\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'r4', wcpath)\n\n # Update the wc before the merge.\n svntest.actions.run_and_verify_svn(None, [],\n 'update', wcpath)\n\n expected_status = svntest.actions.get_virginal_state(wcpath, 4)\n expected_status.add({\n 't' : Item(status=' ', wc_rev=4),\n 't/f' : Item(status=' ', wc_rev=4),\n 'b' : Item(status=' ', wc_rev=4),\n })\n svntest.actions.run_and_verify_status(wcpath, expected_status)\n\n # Do the merge.\n\n expected_output = wc.State(bpath, {\n 'f' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(bpath, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(bpath, {\n })\n expected_disk = wc.State('', {\n 'f' : Item(\"$Revision: 4 $\"),\n })\n expected_status = wc.State(bpath, {\n '' : Item(status=' M', wc_rev=4),\n 'f' : Item(status='A ', wc_rev='-', copied='+'),\n })\n expected_skip = wc.State(bpath, { })\n\n svntest.actions.run_and_verify_merge(bpath, '2', 'HEAD',\n sbox.repo_url + '/t', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip)\n\n#----------------------------------------------------------------------\n@Issue(2132)\ndef merge_prop_change_to_deleted_target(sbox):\n \"merge prop change into deleted target\"\n # For issue #2132.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Add a property to alpha.\n alpha_path = sbox.ospath('A/B/E/alpha')\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val',\n alpha_path)\n\n # Commit the property add as r2.\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E/alpha' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/B/E/alpha', wc_rev=2, status=' ')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n svntest.actions.run_and_verify_svn(None, [],\n 'up', wc_dir)\n\n # Remove alpha entirely.\n svntest.actions.run_and_verify_svn(None, [], 'rm', alpha_path)\n expected_output = wc.State(wc_dir, {\n 'A/B/E/alpha' : Item(verb='Deleting'),\n })\n expected_status.tweak(wc_rev=2)\n expected_status.remove('A/B/E/alpha')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status,\n [], alpha_path)\n\n # Try merging the original propset, which applies to a target that\n # no longer exists. The bug would only reproduce when run from\n # inside the wc, so we cd in there. We have to use\n # --ignore-ancestry here because our merge logic will otherwise\n # prevent a merge of changes we already have.\n os.chdir(wc_dir)\n svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], 'merge',\n '-r1:2', '--ignore-ancestry', '.')\n\n#----------------------------------------------------------------------\n# A merge that replaces a directory\n# Tests for Issue #2144 and Issue #2607\n@SkipUnless(server_has_mergeinfo)\n@Issue(2144,2607)\ndef merge_dir_replace(sbox):\n \"merge a replacement of a directory\"\n\n set_up_dir_replace(sbox)\n wc_dir = sbox.wc_dir\n\n C_path = sbox.ospath('A/C')\n F_path = sbox.ospath('A/B/F')\n F_url = sbox.repo_url + '/A/B/F'\n foo_path = os.path.join(F_path, 'foo')\n new_file2 = os.path.join(foo_path, \"new file 2\")\n\n # Recreate foo in F and add a new folder and two files\n bar_path = os.path.join(foo_path, 'bar')\n foo_file = os.path.join(foo_path, \"file foo\")\n new_file3 = os.path.join(bar_path, \"new file 3\")\n\n # Make a couple of directories, and add some files within them.\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', foo_path)\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', bar_path)\n svntest.main.file_append(new_file3, \"Initial text in new file 3.\\n\")\n svntest.main.run_svn(None, \"add\", new_file3)\n svntest.main.file_append(foo_file, \"Initial text in file foo.\\n\")\n svntest.main.run_svn(None, \"add\", foo_file)\n\n # Commit the new content, creating r5.\n expected_output = wc.State(wc_dir, {\n 'A/B/F/foo' : Item(verb='Adding'),\n 'A/B/F/foo/file foo' : Item(verb='Adding'),\n 'A/B/F/foo/bar' : Item(verb='Adding'),\n 'A/B/F/foo/bar/new file 3' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/foo' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/file foo' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar/new file 3' : Item(status=' ', wc_rev=5),\n 'A/C' : Item(status=' ', wc_rev=3),\n 'A/C/foo' : Item(status=' ', wc_rev=3),\n 'A/C/foo/new file' : Item(status=' ', wc_rev=3),\n 'A/C/foo/new file 2' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n # Merge replacement of foo onto C\n expected_output = wc.State(C_path, {\n 'foo' : Item(status='R '),\n 'foo/file foo' : Item(status='A '),\n 'foo/bar' : Item(status='A '),\n 'foo/bar/new file 3' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(C_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(C_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/F:2-5'}),\n 'foo' : Item(),\n 'foo/file foo' : Item(\"Initial text in file foo.\\n\"),\n 'foo/bar' : Item(),\n 'foo/bar/new file 3' : Item(\"Initial text in new file 3.\\n\"),\n })\n expected_status = wc.State(C_path, {\n '' : Item(status=' M', wc_rev=3),\n 'foo' : Item(status='R ', wc_rev='-', copied='+'),\n 'foo/new file 2' : Item(status='D ', wc_rev='3'),\n 'foo/file foo' : Item(status=' ', wc_rev='-', copied='+'),\n 'foo/bar' : Item(status=' ', wc_rev='-', copied='+'),\n 'foo/bar/new file 3' : Item(status=' ', wc_rev='-', copied='+'),\n 'foo/new file' : Item(status='D ', wc_rev='3'),\n })\n expected_skip = wc.State(C_path, { })\n svntest.actions.run_and_verify_merge(C_path, '2', '5', F_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True,\n False) # don't do a dry-run\n # the output differs\n\n # Commit merge of foo onto C\n expected_output = svntest.wc.State(wc_dir, {\n 'A/C' : Item(verb='Sending'),\n 'A/C/foo' : Item(verb='Replacing'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/foo' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/file foo' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar/new file 3' : Item(status=' ', wc_rev=5),\n 'A/C' : Item(status=' ', wc_rev=6),\n 'A/C/foo' : Item(status=' ', wc_rev=6),\n 'A/C/foo/file foo' : Item(status=' ', wc_rev=6),\n 'A/C/foo/bar' : Item(status=' ', wc_rev=6),\n 'A/C/foo/bar/new file 3' : Item(status=' ', wc_rev=6),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n#----------------------------------------------------------------------\n# A merge that replaces a directory and one of its children\n# Tests for Issue #2690\n@Issue(2690)\ndef merge_dir_and_file_replace(sbox):\n \"replace both dir and one of its children\"\n\n set_up_dir_replace(sbox)\n wc_dir = sbox.wc_dir\n\n C_path = sbox.ospath('A/C')\n F_path = sbox.ospath('A/B/F')\n F_url = sbox.repo_url + '/A/B/F'\n foo_path = os.path.join(F_path, 'foo')\n new_file2 = os.path.join(foo_path, \"new file 2\")\n\n # Recreate foo and 'new file 2' in F and add a new folder with a file\n bar_path = os.path.join(foo_path, 'bar')\n new_file3 = os.path.join(bar_path, \"new file 3\")\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', foo_path)\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', bar_path)\n svntest.main.file_append(new_file3, \"Initial text in new file 3.\\n\")\n svntest.main.run_svn(None, \"add\", new_file3)\n svntest.main.file_append(new_file2, \"New text in new file 2.\\n\")\n svntest.main.run_svn(None, \"add\", new_file2)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/F/foo' : Item(verb='Adding'),\n 'A/B/F/foo/new file 2' : Item(verb='Adding'),\n 'A/B/F/foo/bar' : Item(verb='Adding'),\n 'A/B/F/foo/bar/new file 3' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/foo' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/new file 2' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar/new file 3' : Item(status=' ', wc_rev=5),\n 'A/C/foo' : Item(status=' ', wc_rev=3),\n 'A/C/foo/new file' : Item(status=' ', wc_rev=3),\n 'A/C/foo/new file 2' : Item(status=' ', wc_rev=3),\n })\n expected_status.tweak('A/C', wc_rev=3) # From mergeinfo\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n # Merge replacement of foo onto C\n expected_output = wc.State(C_path, {\n 'foo' : Item(status='R '),\n 'foo/new file 2' : Item(status='A '),\n 'foo/bar' : Item(status='A '),\n 'foo/bar/new file 3' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(C_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(C_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/F:2-5'}),\n 'foo' : Item(),\n 'foo/new file 2' : Item(\"New text in new file 2.\\n\"),\n 'foo/bar' : Item(),\n 'foo/bar/new file 3' : Item(\"Initial text in new file 3.\\n\"),\n })\n expected_status = wc.State(C_path, {\n '' : Item(status=' M', wc_rev=3),\n 'foo' : Item(status='R ', wc_rev='-', copied='+'),\n 'foo/new file 2' : Item(status=' ', wc_rev='-', copied='+'),\n 'foo/bar' : Item(status=' ', wc_rev='-', copied='+'),\n 'foo/bar/new file 3' : Item(status=' ', wc_rev='-', copied='+'),\n 'foo/new file' : Item(status='D ', wc_rev=3),\n })\n expected_skip = wc.State(C_path, { })\n svntest.actions.run_and_verify_merge(C_path, '2', '5', F_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [],\n True,\n False) # don't do a dry-run\n # the output differs\n\n # Commit merge of foo onto C\n expected_output = svntest.wc.State(wc_dir, {\n 'A/C' : Item(verb='Sending'),\n 'A/C/foo' : Item(verb='Replacing'),\n })\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/foo' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/new file 2' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar' : Item(status=' ', wc_rev=5),\n 'A/B/F/foo/bar/new file 3' : Item(status=' ', wc_rev=5),\n 'A/C' : Item(status=' ', wc_rev=6),\n 'A/C/foo' : Item(status=' ', wc_rev=6),\n 'A/C/foo/new file 2' : Item(status=' ', wc_rev=6),\n 'A/C/foo/bar' : Item(status=' ', wc_rev=6),\n 'A/C/foo/bar/new file 3' : Item(status=' ', wc_rev=6),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Confirm the files are present in the repository.\n new_file_2_url = sbox.repo_url + '/A/C/foo/new file 2'\n svntest.actions.run_and_verify_svn([\"New text in new file 2.\\n\"],\n [], 'cat',\n new_file_2_url)\n new_file_3_url = sbox.repo_url + '/A/C/foo/bar/new file 3'\n svntest.actions.run_and_verify_svn([\"Initial text in new file 3.\\n\"],\n [], 'cat',\n new_file_3_url)\n\n#----------------------------------------------------------------------\n@Issue(2144)\ndef merge_file_with_space_in_its_name(sbox):\n \"merge a file whose name contains a space\"\n # For issue #2144\n sbox.build()\n wc_dir = sbox.wc_dir\n new_file = sbox.ospath('new file')\n\n # Make r2.\n svntest.main.file_append(new_file, \"Initial text in the file.\\n\")\n svntest.main.run_svn(None, \"add\", new_file)\n svntest.actions.run_and_verify_svn(None, [],\n \"ci\", \"-m\", \"r2\", wc_dir)\n\n # Make r3.\n svntest.main.file_append(new_file, \"Next line of text in the file.\\n\")\n svntest.actions.run_and_verify_svn(None, [],\n \"ci\", \"-m\", \"r3\", wc_dir)\n\n # Try to reverse merge.\n #\n # The reproduction recipe requires that no explicit merge target be\n # passed, so we run merge from inside the wc dir where the target\n # file (i.e., the URL basename) lives.\n os.chdir(wc_dir)\n target_url = sbox.repo_url + '/new%20file'\n svntest.actions.run_and_verify_svn(None, [],\n \"merge\", \"-r3:2\", target_url)\n\n#----------------------------------------------------------------------\n# A merge between two branches using no revision number with the dir being\n# created already existing as an unversioned directory.\n# Tests for Issue #2222\n@Issue(2222)\ndef merge_dir_branches(sbox):\n \"merge between branches\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_uuid = svntest.actions.get_wc_uuid(wc_dir)\n\n F_path = sbox.ospath('A/B/F')\n F_url = sbox.repo_url + '/A/B/F'\n C_url = sbox.repo_url + '/A/C'\n\n # Create foo in F\n foo_path = os.path.join(F_path, 'foo')\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', foo_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/F/foo' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B/F/foo' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Create an unversioned foo\n foo_path = sbox.ospath('foo')\n os.mkdir(foo_path)\n\n # Merge from C to F onto the wc_dir\n # We can't use run_and_verify_merge because it doesn't support this\n # syntax of the merge command.\n ### TODO: We can use run_and_verify_merge() here now.\n expected_output = expected_merge_output(None, \"A \" + foo_path + \"\\n\")\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '--allow-mixed-revisions',\n C_url, F_url, wc_dir)\n\n # Run info to check the copied rev to make sure it's right\n expected_info = {\"Path\" : re.escape(foo_path), # escape backslashes\n \"URL\" : sbox.repo_url + \"/foo\",\n \"Repository Root\" : sbox.repo_url,\n \"Repository UUID\" : wc_uuid,\n \"Revision\" : \"2\",\n \"Node Kind\" : \"directory\",\n \"Schedule\" : \"add\",\n \"Copied From URL\" : F_url + \"/foo\",\n \"Copied From Rev\" : \"2\",\n }\n svntest.actions.run_and_verify_info([expected_info], foo_path)\n\n\n#----------------------------------------------------------------------\ndef safe_property_merge(sbox):\n \"property merges don't overwrite existing prop-mods\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Add a property to two files and a directory, commit as r2.\n alpha_path = sbox.ospath('A/B/E/alpha')\n beta_path = sbox.ospath('A/B/E/beta')\n E_path = sbox.ospath('A/B/E')\n\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val',\n alpha_path, beta_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val',\n E_path)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E' : Item(verb='Sending'),\n 'A/B/E/alpha' : Item(verb='Sending'),\n 'A/B/E/beta' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/B/E', 'A/B/E/alpha', 'A/B/E/beta',\n wc_rev=2, status=' ')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Copy B to B2 as rev 3 (making a branch)\n B_url = sbox.repo_url + '/A/B'\n B2_url = sbox.repo_url + '/A/B2'\n\n svntest.actions.run_and_verify_svn(None, [],\n 'copy', '-m', 'copy B to B2',\n B_url, B2_url)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Change the properties underneath B again, and commit as r4\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val2',\n alpha_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'propdel', 'foo',\n beta_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val2',\n E_path)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E' : Item(verb='Sending'),\n 'A/B/E/alpha' : Item(verb='Sending'),\n 'A/B/E/beta' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, None)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Make local propchanges to E, alpha and beta in the branch.\n alpha_path2 = sbox.ospath('A/B2/E/alpha')\n beta_path2 = sbox.ospath('A/B2/E/beta')\n E_path2 = sbox.ospath('A/B2/E')\n\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'branchval',\n alpha_path2, beta_path2)\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'branchval',\n E_path2)\n\n # Now merge the recent B change to the branch. Because we already\n # have local propmods, we should get property conflicts.\n B2_path = sbox.ospath('A/B2')\n\n expected_output = wc.State(B2_path, {\n 'E' : Item(status=' C'),\n 'E/alpha' : Item(status=' C'),\n 'E/beta' : Item(status=' C'),\n })\n expected_mergeinfo_output = wc.State(B2_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(B2_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : \"/A/B:4\"}),\n 'E' : Item(),\n 'E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F' : Item(),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\"),\n })\n expected_disk.tweak('E', 'E/alpha', 'E/beta',\n props={'foo' : 'branchval'}) # local mods still present\n\n expected_status = wc.State(B2_path, {\n '' : Item(status=' M'),\n 'E' : Item(status=' C'),\n 'E/alpha' : Item(status=' C'),\n 'E/beta' : Item(status=' C'),\n 'F' : Item(status=' '),\n 'lambda' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=4)\n\n expected_skip = wc.State('', { })\n\n # should have 3 'prej' files left behind, describing prop conflicts:\n extra_files = ['alpha.*\\.prej', 'beta.*\\.prej', 'dir_conflicts.*\\.prej']\n\n svntest.actions.run_and_verify_merge(B2_path, '3', '4', B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n extra_files=extra_files)\n\n#----------------------------------------------------------------------\n# Test for issue 2035, whereby 'svn merge' wouldn't always mark\n# property conflicts when it should.\n@Issue(2035)\n@SkipUnless(server_has_mergeinfo)\ndef property_merge_from_branch(sbox):\n \"property merge conflict even without local mods\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Add a property to a file and a directory, commit as r2.\n alpha_path = sbox.ospath('A/B/E/alpha')\n E_path = sbox.ospath('A/B/E')\n\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val',\n alpha_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val',\n E_path)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E' : Item(verb='Sending'),\n 'A/B/E/alpha' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/B/E', 'A/B/E/alpha', wc_rev=2, status=' ')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Copy B to B2 as rev 3 (making a branch)\n B_url = sbox.repo_url + '/A/B'\n B2_url = sbox.repo_url + '/A/B2'\n\n svntest.actions.run_and_verify_svn(None, [],\n 'copy', '-m', 'copy B to B2',\n B_url, B2_url)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Change the properties underneath B again, and commit as r4\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val2',\n alpha_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val2',\n E_path)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E' : Item(verb='Sending'),\n 'A/B/E/alpha' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, None)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Make different propchanges changes to the B2 branch and commit as r5.\n alpha_path2 = sbox.ospath('A/B2/E/alpha')\n E_path2 = sbox.ospath('A/B2/E')\n\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'branchval',\n alpha_path2)\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'branchval',\n E_path2)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B2/E' : Item(verb='Sending'),\n 'A/B2/E/alpha' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, None)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Now merge the recent B change to the branch. There are no local\n # mods anywhere, but we should still get property conflicts anyway!\n B2_path = sbox.ospath('A/B2')\n\n expected_output = wc.State(B2_path, {\n 'E' : Item(status=' C'),\n 'E/alpha' : Item(status=' C'),\n })\n expected_mergeinfo_output = wc.State(B2_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(B2_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:4'}),\n 'E' : Item(),\n 'E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F' : Item(),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\"),\n })\n expected_disk.tweak('E', 'E/alpha',\n props={'foo' : 'branchval'})\n\n expected_status = wc.State(B2_path, {\n '' : Item(status=' M'),\n 'E' : Item(status=' C'),\n 'E/alpha' : Item(status=' C'),\n 'E/beta' : Item(status=' '),\n 'F' : Item(status=' '),\n 'lambda' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=5)\n\n expected_skip = wc.State('', { })\n\n # should have 2 'prej' files left behind, describing prop conflicts:\n extra_files = ['alpha.*\\.prej', 'dir_conflicts.*\\.prej']\n\n svntest.actions.run_and_verify_merge(B2_path, '3', '4', B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n extra_files=extra_files)\n\n#----------------------------------------------------------------------\n# Another test for issue 2035, whereby sometimes 'svn merge' marked\n# property conflicts when it shouldn't!\n@Issue(2035)\ndef property_merge_undo_redo(sbox):\n \"undo, then redo a property merge\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Add a property to a file, commit as r2.\n alpha_path = sbox.ospath('A/B/E/alpha')\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'foo', 'foo_val',\n alpha_path)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E/alpha' : Item(verb='Sending'),\n })\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/B/E/alpha', wc_rev=2, status=' ')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Use 'svn merge' to undo the commit. ('svn merge -r2:1')\n # Result should be a single local-prop-mod.\n expected_output = wc.State(wc_dir, {'A/B/E/alpha' : Item(status=' U'), })\n expected_mergeinfo_output = wc.State(wc_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_dir, {\n '' : Item(status=' U'),\n })\n expected_disk = svntest.main.greek_state.copy()\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 2)\n expected_status.tweak('A/B/E/alpha', status=' M')\n\n expected_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge(wc_dir, '2', '1',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n\n # Change mind, re-apply the change ('svn merge -r1:2').\n # This should merge cleanly into existing prop-mod, status shows nothing.\n expected_output = wc.State(wc_dir, {'A/B/E/alpha' : Item(status=' C'), })\n expected_mergeinfo_output = wc.State(wc_dir, {})\n expected_elision_output = wc.State(wc_dir, {})\n expected_elision_output = wc.State(wc_dir, {})\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({'A/B/E/alpha.prej'\n : Item(\"Trying to add new property 'foo'\\n\"\n + \"but the property has been locally deleted.\\n\"\n + \"Incoming property value:\\nfoo_val\\n\")})\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 2)\n expected_status.tweak('A/B/E/alpha', status=' C')\n\n expected_skip = wc.State('', { })\n\n # Re-merge r1. We have to use --ignore-ancestry here. Otherwise\n # the merge logic will claim we already have this change (because it\n # was unable to record the previous undoing merge).\n svntest.actions.run_and_verify_merge(wc_dir, '1', '2',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--ignore-ancestry', wc_dir)\n\n\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef cherry_pick_text_conflict(sbox):\n \"cherry-pick a dependent change, get conflict\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n A_url = sbox.repo_url + '/A'\n mu_path = os.path.join(A_path, 'mu')\n branch_A_url = sbox.repo_url + '/copy-of-A'\n branch_mu_path = sbox.ospath('copy-of-A/mu')\n\n # Create a branch of A.\n svntest.actions.run_and_verify_svn(None, [], 'cp',\n A_url, branch_A_url,\n '-m', \"Creating copy-of-A\")\n\n # Update to get the branch.\n svntest.actions.run_and_verify_svn(None, [],\n 'update', wc_dir)\n\n # Change mu's text on the branch, producing r3 through r6.\n for rev in range(3, 7):\n svntest.main.file_append(branch_mu_path, (\"r%d\\n\" % rev) * 3)\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m',\n 'Add lines to mu in r%d.' % rev, wc_dir)\n\n # Mark r5 as merged into trunk, to create disparate revision ranges\n # which need to be merged.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]],\n [' U ' + A_path + '\\n']),\n [], 'merge', '-c5', '--record-only',\n branch_A_url, A_path)\n\n\n # Try to merge r4:6 into trunk, without r3. It should fail.\n expected_output = wc.State(A_path, {\n 'mu' : Item(status='C '),\n })\n expected_mergeinfo_output = wc.State(A_path, {\n '' : Item(status=' G')\n })\n expected_elision_output = wc.State(A_path, {\n })\n expected_disk = wc.State('', {\n 'mu' : Item(\"This is the file 'mu'.\\n\"\n + make_conflict_marker_text('', \"r3\\n\" * 3 + \"r4\\n\" * 3, 3, 4,\n old_text='r3\\n' * 3)),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_status = wc.State(A_path, {\n '' : Item(status=' M'),\n 'mu' : Item(status='C '),\n 'B' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=2)\n expected_skip = wc.State('', { })\n expected_error = \".*conflicts were produced while merging r3:4.*\"\n svntest.actions.run_and_verify_merge(A_path, '3', '6', branch_A_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n expected_error,\n extra_files=\n [\"mu\\.working\",\n \"mu\\.merge-right\\.r4\",\n \"mu\\.merge-left\\.r3\"])\n\n#----------------------------------------------------------------------\n# Test for issue 2135\n@Issue(2135)\ndef merge_file_replace(sbox):\n \"merge a replacement of a file\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # File scheduled for deletion\n rho_path = sbox.ospath('A/D/G/rho')\n svntest.actions.run_and_verify_svn(None, [], 'rm', rho_path)\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/D/G/rho', status='D ')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(verb='Deleting'),\n })\n\n expected_status.remove('A/D/G/rho')\n\n # Commit rev 2\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n # Create and add a new file.\n svntest.main.file_write(rho_path, \"new rho\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', rho_path)\n\n # Commit revsion 3\n expected_status.add({\n 'A/D/G/rho' : Item(status='A ', wc_rev='0')\n })\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(verb='Adding'),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n None)\n\n # Update working copy\n expected_output = svntest.wc.State(wc_dir, {})\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak('A/D/G/rho', contents='new rho\\n' )\n expected_status.tweak(wc_rev='3')\n expected_status.tweak('A/D/G/rho', status=' ')\n\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n expected_disk,\n expected_status)\n\n # merge changes from r3:1\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(status='R ')\n })\n expected_mergeinfo_output = svntest.wc.State(wc_dir, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(wc_dir, {\n '' : Item(status=' U')\n })\n expected_status.tweak('A/D/G/rho', status='R ', copied='+', wc_rev='-')\n expected_skip = wc.State(wc_dir, { })\n expected_disk.tweak('A/D/G/rho', contents=\"This is the file 'rho'.\\n\")\n svntest.actions.run_and_verify_merge(wc_dir, '3', '1',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip)\n\n # Now commit merged wc\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(verb='Replacing'),\n })\n expected_status.tweak('A/D/G/rho', status=' ', copied=None, wc_rev='4')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n#----------------------------------------------------------------------\n# Test for issue 2522\n# Same as merge_file_replace, but without update before merge.\n@Issue(2522)\ndef merge_file_replace_to_mixed_rev_wc(sbox):\n \"merge a replacement of a file to mixed rev wc\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # File scheduled for deletion\n rho_path = sbox.ospath('A/D/G/rho')\n svntest.actions.run_and_verify_svn(None, [], 'rm', rho_path)\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/D/G/rho', status='D ')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(verb='Deleting'),\n })\n\n expected_status.remove('A/D/G/rho')\n\n # Commit rev 2\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Update working copy\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/D/G/rho' )\n expected_output = svntest.wc.State(wc_dir, {})\n expected_status.tweak(wc_rev='2')\n\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n expected_disk,\n expected_status)\n\n # Create and add a new file.\n svntest.main.file_write(rho_path, \"new rho\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', rho_path)\n\n # Commit revsion 3\n expected_status.add({\n 'A/D/G/rho' : Item(status='A ', wc_rev='0')\n })\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(verb='Adding'),\n })\n\n expected_disk.add({'A/D/G/rho' : Item(contents='new rho\\n')} )\n expected_status.tweak(wc_rev='2')\n expected_status.tweak('A/D/G/rho', status=' ', wc_rev='3')\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # merge changes from r3:1...\n #\n # ...but first:\n #\n # Since \".\" is at revision 2, r3 is not part of \".\"'s implicit mergeinfo.\n # Merge tracking permits only reverse merges from explicit or implicit\n # mergeinfo, so only r2 would be reverse merged if we left the WC as is.\n # Normally we'd simply update the whole working copy, but since that would\n # defeat the purpose of this test (see the comment below), instead we'll\n # update only \".\" using --depth empty. This preserves the intent of the\n # original mixed-rev test for this issue, but allows the merge tracking\n # logic to consider r3 as valid for reverse merging.\n svntest.actions.run_and_verify_svn(None, [],\n 'up', '--depth', 'empty', wc_dir)\n expected_status.tweak('', wc_rev=3)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(status='R ')\n })\n expected_mergeinfo_output = svntest.wc.State(wc_dir, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(wc_dir, {\n '' : Item(status=' U')\n })\n expected_status.tweak('A/D/G/rho', status='R ', copied='+', wc_rev='-')\n expected_skip = wc.State(wc_dir, { })\n expected_disk.tweak('A/D/G/rho', contents=\"This is the file 'rho'.\\n\")\n svntest.actions.run_and_verify_merge(wc_dir, '3', '1',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [],\n True, False, '--allow-mixed-revisions',\n wc_dir)\n\n # When issue #2522 was filed, svn used to break the WC if we didn't\n # update here. But nowadays, this no longer happens, so the separate\n # update step which was done here originally has been removed.\n\n # Now commit merged wc\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/rho': Item(verb='Replacing'),\n })\n expected_status.tweak('A/D/G/rho', status=' ', copied=None, wc_rev='4')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n#----------------------------------------------------------------------\n# use -x -w option for ignoring whitespace during merge\n@SkipUnless(server_has_mergeinfo)\ndef merge_ignore_whitespace(sbox):\n \"ignore whitespace when merging\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # commit base version of iota\n file_name = \"iota\"\n file_path = os.path.join(wc_dir, file_name)\n file_url = sbox.repo_url + '/iota'\n\n svntest.main.file_write(file_path,\n \"Aa\\n\"\n \"Bb\\n\"\n \"Cc\\n\")\n expected_output = svntest.wc.State(wc_dir, {\n 'iota' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, None)\n\n # change the file, mostly whitespace changes + an extra line\n svntest.main.file_write(file_path, \"A a\\nBb \\n Cc\\nNew line in iota\\n\")\n expected_output = wc.State(wc_dir, { file_name : Item(verb='Sending'), })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak(file_name, wc_rev=3)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Backdate iota to revision 2, so we can merge in the rev 3 changes.\n svntest.actions.run_and_verify_svn(None, [],\n 'up', '-r', '2', file_path)\n # Make some local whitespace changes, these should not conflict\n # with the remote whitespace changes as both will be ignored.\n svntest.main.file_write(file_path, \" Aa\\nB b\\nC c\\n\")\n\n # Lines changed only by whitespace - both in local or remote -\n # should be ignored\n expected_output = wc.State(sbox.wc_dir, { file_name : Item(status='G ') })\n expected_mergeinfo_output = wc.State(sbox.wc_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(sbox.wc_dir, {\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak(file_name,\n contents=\" Aa\\n\"\n \"B b\\n\"\n \"C c\\n\"\n \"New line in iota\\n\")\n expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)\n expected_status.tweak('', status=' M', wc_rev=1)\n expected_status.tweak(file_name, status='M ', wc_rev=2)\n expected_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge(sbox.wc_dir, '2', '3',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], False, False,\n '--allow-mixed-revisions',\n '-x', '-w', wc_dir)\n\n#----------------------------------------------------------------------\n# use -x --ignore-eol-style option for ignoring eolstyle during merge\n@SkipUnless(server_has_mergeinfo)\ndef merge_ignore_eolstyle(sbox):\n \"ignore eolstyle when merging\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # commit base version of iota\n file_name = \"iota\"\n file_path = os.path.join(wc_dir, file_name)\n file_url = sbox.repo_url + '/iota'\n\n svntest.main.file_write(file_path,\n \"Aa\\r\\n\"\n \"Bb\\r\\n\"\n \"Cc\\r\\n\",\n \"wb\")\n expected_output = svntest.wc.State(wc_dir, {\n 'iota' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, None)\n\n # change the file, mostly eol changes + an extra line\n svntest.main.file_write(file_path,\n \"Aa\\r\"\n \"Bb\\n\"\n \"Cc\\r\"\n \"New line in iota\\n\",\n \"wb\")\n expected_output = wc.State(wc_dir, { file_name : Item(verb='Sending'), })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak(file_name, wc_rev=3)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Backdate iota to revision 2, so we can merge in the rev 3 changes.\n svntest.actions.run_and_verify_svn(None, [],\n 'up', '-r', '2', file_path)\n # Make some local eol changes, these should not conflict\n # with the remote eol changes as both will be ignored.\n svntest.main.file_write(file_path,\n \"Aa\\n\"\n \"Bb\\r\"\n \"Cc\\n\",\n \"wb\")\n\n # Lines changed only by eolstyle - both in local or remote -\n # should be ignored\n expected_output = wc.State(sbox.wc_dir, { file_name : Item(status='G ') })\n expected_mergeinfo_output = wc.State(sbox.wc_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(sbox.wc_dir, {\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak(file_name,\n contents=\"Aa\\n\"\n \"Bb\\r\"\n \"Cc\\n\"\n \"New line in iota\\n\")\n expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)\n expected_status.tweak('', status=' M')\n expected_status.tweak(file_name, status='M ', wc_rev=2)\n expected_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge2(sbox.wc_dir, '2', '3',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], False, False, True,\n '--allow-mixed-revisions',\n '-x', '--ignore-eol-style', wc_dir)\n\n#----------------------------------------------------------------------\n# eol-style handling during merge with conflicts, scenario 1:\n# when a merge creates a conflict on a file, make sure the file and files\n# r<left>, r<right> and .mine are in the eol-style defined for that file.\n#\n# This test for 'svn update' can be found in update_tests.py as\n# conflict_markers_matching_eol.\n@SkipUnless(server_has_mergeinfo)\ndef merge_conflict_markers_matching_eol(sbox):\n \"conflict markers should match the file's eol style\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n filecount = 1\n\n mu_path = sbox.ospath('A/mu')\n\n # CRLF is a string that will match a CRLF sequence read from a text file.\n # ### On Windows, we assume CRLF will be read as LF, so it's a poor test.\n if os.name == 'nt':\n crlf = '\\n'\n else:\n crlf = '\\r\\n'\n\n # Strict EOL style matching breaks Windows tests at least with Python 2\n keep_eol_style = not svntest.main.is_os_windows()\n\n # Checkout a second working copy\n wc_backup = sbox.add_wc_path('backup')\n svntest.actions.run_and_verify_svn(None, [], 'checkout',\n sbox.repo_url, wc_backup)\n\n # set starting revision\n cur_rev = 1\n\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, cur_rev)\n expected_backup_status = svntest.actions.get_virginal_state(wc_backup,\n cur_rev)\n\n path_backup = os.path.join(wc_backup, 'A', 'mu')\n\n # do the test for each eol-style\n for eol, eolchar in zip(['CRLF', 'CR', 'native', 'LF'],\n [crlf, '\\015', '\\n', '\\012']):\n # rewrite file mu and set the eol-style property.\n svntest.main.file_write(mu_path, \"This is the file 'mu'.\"+ eolchar, 'wb')\n svntest.main.run_svn(None, 'propset', 'svn:eol-style', eol, mu_path)\n\n expected_disk.add({\n 'A/mu' : Item(\"This is the file 'mu'.\" + eolchar)\n })\n expected_output = svntest.wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n })\n expected_status.tweak(wc_rev = cur_rev)\n expected_status.add({\n 'A/mu' : Item(status=' ', wc_rev = cur_rev + 1),\n })\n\n # Commit the original change and note the 'base' revision number\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n cur_rev = cur_rev + 1\n base_rev = cur_rev\n\n svntest.main.run_svn(None, 'update', wc_backup)\n\n # Make a local mod to mu\n svntest.main.file_append(mu_path,\n 'Original appended text for mu' + eolchar)\n\n # Commit the original change and note the 'theirs' revision number\n svntest.main.run_svn(None, 'commit', '-m', 'test log', wc_dir)\n cur_rev = cur_rev + 1\n theirs_rev = cur_rev\n\n # Make a local mod to mu, will conflict with the previous change\n svntest.main.file_append(path_backup,\n 'Conflicting appended text for mu' + eolchar)\n\n # Create expected output tree for an update of the wc_backup.\n expected_backup_output = svntest.wc.State(wc_backup, {\n 'A/mu' : Item(status='C '),\n })\n\n # Create expected disk tree for the update.\n expected_backup_disk = expected_disk.copy()\n\n # verify content of resulting conflicted file\n expected_backup_disk.add({\n 'A/mu' : Item(contents= \"This is the file 'mu'.\" + eolchar +\n \"<<<<<<< .working\" + eolchar +\n \"Conflicting appended text for mu\" + eolchar +\n \"||||||| .merge-left.r\" + str(cur_rev - 1) + eolchar +\n \"=======\" + eolchar +\n \"Original appended text for mu\" + eolchar +\n \">>>>>>> .merge-right.r\" + str(cur_rev) + eolchar),\n })\n # verify content of base(left) file\n expected_backup_disk.add({\n 'A/mu.merge-left.r' + str(base_rev) :\n Item(contents= \"This is the file 'mu'.\" + eolchar)\n })\n # verify content of theirs(right) file\n expected_backup_disk.add({\n 'A/mu.merge-right.r' + str(theirs_rev) :\n Item(contents= \"This is the file 'mu'.\" + eolchar +\n \"Original appended text for mu\" + eolchar)\n })\n # verify content of mine file\n expected_backup_disk.add({\n 'A/mu.working' : Item(contents= \"This is the file 'mu'.\" +\n eolchar +\n \"Conflicting appended text for mu\" + eolchar)\n })\n\n # Create expected status tree for the update.\n expected_backup_status.add({\n 'A/mu' : Item(status=' ', wc_rev=cur_rev),\n })\n expected_backup_status.tweak('A/mu', status='C ')\n expected_backup_status.tweak(wc_rev = cur_rev - 1)\n expected_backup_status.tweak('', status= ' M')\n expected_mergeinfo_output = wc.State(wc_backup, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_backup, {\n })\n expected_backup_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge2(wc_backup, cur_rev - 1, cur_rev,\n sbox.repo_url, None,\n expected_backup_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_backup_disk,\n expected_backup_status,\n expected_backup_skip,\n keep_eol_style=keep_eol_style)\n\n # cleanup for next run\n svntest.main.run_svn(None, 'revert', '-R', wc_backup)\n svntest.main.run_svn(None, 'update', wc_dir)\n\n#----------------------------------------------------------------------\n# eol-style handling during merge, scenario 2:\n# if part of that merge is a propchange (add, change, delete) of\n# svn:eol-style, make sure the correct eol-style is applied before\n# calculating the merge (and conflicts if any)\n#\n# This test for 'svn update' can be found in update_tests.py as\n# update_eolstyle_handling.\n@SkipUnless(server_has_mergeinfo)\ndef merge_eolstyle_handling(sbox):\n \"handle eol-style propchange during merge\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n mu_path = sbox.ospath('A/mu')\n\n # CRLF is a string that will match a CRLF sequence read from a text file.\n # ### On Windows, we assume CRLF will be read as LF, so it's a poor test.\n if os.name == 'nt':\n crlf = '\\n'\n else:\n crlf = '\\r\\n'\n\n # Strict EOL style matching breaks Windows tests at least with Python 2\n keep_eol_style = not svntest.main.is_os_windows()\n\n # Checkout a second working copy\n wc_backup = sbox.add_wc_path('backup')\n svntest.actions.run_and_verify_svn(None, [], 'checkout',\n sbox.repo_url, wc_backup)\n path_backup = os.path.join(wc_backup, 'A', 'mu')\n\n # Test 1: add the eol-style property and commit, change mu in the second\n # working copy and merge the last revision; there should be no conflict!\n svntest.main.run_svn(None, 'propset', 'svn:eol-style', \"CRLF\", mu_path)\n svntest.main.run_svn(None,\n 'commit', '-m', 'set eol-style property', wc_dir)\n\n svntest.main.file_append_binary(path_backup, 'Added new line of text.\\012')\n\n expected_backup_disk = svntest.main.greek_state.copy()\n expected_backup_disk.tweak(\n 'A/mu', contents= \"This is the file 'mu'.\" + crlf +\n \"Added new line of text.\" + crlf)\n expected_backup_output = svntest.wc.State(wc_backup, {\n 'A/mu' : Item(status='GU'),\n })\n expected_mergeinfo_output = svntest.wc.State(wc_backup, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_backup, {\n })\n expected_backup_status = svntest.actions.get_virginal_state(wc_backup, 1)\n expected_backup_status.tweak('', status=' M')\n expected_backup_status.tweak('A/mu', status='MM')\n\n expected_backup_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge2(wc_backup, '1', '2', sbox.repo_url,\n None,\n expected_backup_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_backup_disk,\n expected_backup_status,\n expected_backup_skip,\n keep_eol_style=keep_eol_style)\n\n # Test 2: now change the eol-style property to another value and commit,\n # merge this revision in the still changed mu in the second working copy;\n # there should be no conflict!\n svntest.main.run_svn(None, 'propset', 'svn:eol-style', \"CR\", mu_path)\n svntest.main.run_svn(None,\n 'commit', '-m', 'set eol-style property', wc_dir)\n\n expected_backup_disk = svntest.main.greek_state.copy()\n expected_backup_disk.add({\n 'A/mu' : Item(contents= \"This is the file 'mu'.\\015\" +\n \"Added new line of text.\\015\")\n })\n expected_backup_output = svntest.wc.State(wc_backup, {\n 'A/mu' : Item(status='GU'),\n })\n expected_mergeinfo_output = svntest.wc.State(wc_backup, {\n '' : Item(status=' G'),\n })\n expected_backup_status = svntest.actions.get_virginal_state(wc_backup, 1)\n expected_backup_status.tweak('', status=' M')\n expected_backup_status.tweak('A/mu', status='MM')\n svntest.actions.run_and_verify_merge2(wc_backup, '2', '3', sbox.repo_url,\n None,\n expected_backup_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_backup_disk,\n expected_backup_status,\n expected_backup_skip,\n keep_eol_style=keep_eol_style)\n\n # Test 3: now delete the eol-style property and commit, merge this revision\n # in the still changed mu in the second working copy; there should be no\n # conflict!\n # EOL of mu should be unchanged (=CRLF).\n svntest.main.run_svn(None, 'propdel', 'svn:eol-style', mu_path)\n svntest.main.run_svn(None,\n 'commit', '-m', 'del eol-style property', wc_dir)\n\n expected_backup_disk = svntest.main.greek_state.copy()\n expected_backup_disk.add({\n 'A/mu' : Item(contents= \"This is the file 'mu'.\\015\" +\n \"Added new line of text.\\015\")\n })\n expected_backup_output = svntest.wc.State(wc_backup, {\n 'A/mu' : Item(status=' G'),\n })\n expected_backup_status = svntest.actions.get_virginal_state(wc_backup, 1)\n expected_backup_status.tweak('', status=' M')\n expected_backup_status.tweak('A/mu', status='M ')\n svntest.actions.run_and_verify_merge2(wc_backup, '3', '4', sbox.repo_url,\n None,\n expected_backup_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_backup_disk,\n expected_backup_status,\n expected_backup_skip,\n keep_eol_style=keep_eol_style)\n\n#----------------------------------------------------------------------\ndef create_deep_trees(wc_dir):\n \"\"\"Create A/B/F/E by moving A/B/E to A/B/F/E.\n Copy A/B/F/E to A/B/F/E1.\n Copy A/B to A/copy-of-B, and return the expected status.\n At the end of this function WC would be at r4\"\"\"\n\n A_path = os.path.join(wc_dir, 'A')\n A_B_path = os.path.join(A_path, 'B')\n A_B_E_path = os.path.join(A_B_path, 'E')\n A_B_F_path = os.path.join(A_B_path, 'F')\n A_B_F_E_path = os.path.join(A_B_F_path, 'E')\n A_B_F_E1_path = os.path.join(A_B_F_path, 'E1')\n\n # Deepen the directory structure we're working with by moving E to\n # underneath F and committing, creating revision 2.\n svntest.main.run_svn(None, 'mv', A_B_E_path, A_B_F_path)\n\n expected_output = wc.State(wc_dir, {\n 'A/B/E' : Item(verb='Deleting'),\n 'A/B/F/E' : Item(verb='Adding')\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.remove('A/B/E', 'A/B/E/alpha', 'A/B/E/beta')\n expected_status.add({\n 'A/B/F/E' : Item(status=' ', wc_rev=2),\n 'A/B/F/E/alpha' : Item(status=' ', wc_rev=2),\n 'A/B/F/E/beta' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n svntest.main.run_svn(None, 'cp', A_B_F_E_path, A_B_F_E1_path)\n\n\n expected_output = wc.State(wc_dir, {\n 'A/B/F/E1' : Item(verb='Adding')\n })\n expected_status.add({\n 'A/B/F/E1' : Item(status=' ', wc_rev=3),\n 'A/B/F/E1/alpha' : Item(status=' ', wc_rev=3),\n 'A/B/F/E1/beta' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Bring the entire WC up to date with rev 3.\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n expected_status.tweak(wc_rev=3)\n\n # Copy B and commit, creating revision 4.\n copy_of_B_path = os.path.join(A_path, 'copy-of-B')\n svntest.main.run_svn(None, \"cp\", A_B_path, copy_of_B_path)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/copy-of-B' : Item(verb='Adding'),\n })\n expected_status.add({\n 'A/copy-of-B' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/F' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/F/E' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/F/E/alpha' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/F/E/beta' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/F/E1' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/F/E1/alpha' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/F/E1/beta' : Item(status=' ', wc_rev=4),\n 'A/copy-of-B/lambda' : Item(status=' ', wc_rev=4),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/B/E', 'A/B/E/alpha', 'A/B/E/beta')\n expected_disk.add({\n 'A/B/F/E' : Item(),\n 'A/B/F/E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'A/B/F/E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'A/B/F/E1' : Item(),\n 'A/B/F/E1/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'A/B/F/E1/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'A/copy-of-B' : Item(),\n 'A/copy-of-B/F' : Item(props={}),\n 'A/copy-of-B/F/E' : Item(),\n 'A/copy-of-B/F/E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'A/copy-of-B/F/E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'A/copy-of-B/F/E1' : Item(),\n 'A/copy-of-B/F/E1/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'A/copy-of-B/F/E1/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'A/copy-of-B/lambda' : Item(contents=\"This is the file 'lambda'.\\n\"),\n })\n svntest.actions.verify_disk(wc_dir, expected_disk, True)\n\n # Bring the entire WC up to date with rev 4.\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n\n svntest.actions.verify_disk(wc_dir, expected_disk, True)\n\n expected_status.tweak(wc_rev=4)\n expected_disk.tweak('A/copy-of-B/F/E', 'A/copy-of-B/F/E1', status=' M')\n return expected_status\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef avoid_repeated_merge_using_inherited_merge_info(sbox):\n \"use inherited mergeinfo to avoid repeated merge\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n A_B_path = os.path.join(A_path, 'B')\n A_B_E_path = os.path.join(A_B_path, 'E')\n A_B_F_path = os.path.join(A_B_path, 'F')\n copy_of_B_path = os.path.join(A_path, 'copy-of-B')\n\n # Create a deeper directory structure.\n expected_status = create_deep_trees(wc_dir)\n\n # Edit alpha and commit it, creating revision 5.\n alpha_path = os.path.join(A_B_F_path, 'E', 'alpha')\n new_content_for_alpha = 'new content to alpha\\n'\n svntest.main.file_write(alpha_path, new_content_for_alpha)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/F/E/alpha' : Item(verb='Sending'),\n })\n expected_status.tweak('A/B/F/E/alpha', wc_rev=5)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Bring the entire WC up to date with rev 5.\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n\n # Merge changes from rev 5 of B (to alpha) into copy_of_B.\n expected_output = wc.State(copy_of_B_path, {\n 'F/E/alpha' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(copy_of_B_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(copy_of_B_path, {\n })\n expected_status = wc.State(copy_of_B_path, {\n '' : Item(status=' M', wc_rev=5),\n 'F/E' : Item(status=' ', wc_rev=5),\n 'F/E/alpha' : Item(status='M ', wc_rev=5),\n 'F/E/beta' : Item(status=' ', wc_rev=5),\n 'F/E1' : Item(status=' ', wc_rev=5),\n 'F/E1/alpha' : Item(status=' ', wc_rev=5),\n 'F/E1/beta' : Item(status=' ', wc_rev=5),\n 'lambda' : Item(status=' ', wc_rev=5),\n 'F' : Item(status=' ', wc_rev=5),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:5'}),\n 'F/E' : Item(),\n 'F/E/alpha' : Item(new_content_for_alpha),\n 'F/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F/E1' : Item(),\n 'F/E1/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'F/E1/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F' : Item(),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\")\n })\n expected_skip = wc.State(copy_of_B_path, { })\n\n svntest.actions.run_and_verify_merge(copy_of_B_path, '4', '5',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Commit the result of the merge, creating revision 6.\n expected_output = svntest.wc.State(copy_of_B_path, {\n '' : Item(verb='Sending'),\n 'F/E/alpha' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(copy_of_B_path, expected_output,\n None)\n\n # Update the WC to bring /A/copy_of_B/F from rev 4 to rev 6.\n # Without this update, a subsequent merge will not find any merge\n # info for /A/copy_of_B/F -- nor its parent dir in the repos -- at\n # rev 4. Mergeinfo wasn't introduced until rev 6.\n copy_of_B_F_E_path = os.path.join(copy_of_B_path, 'F', 'E')\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n\n # Attempt to re-merge changes to alpha from rev 4. Use the merge\n # info inherited from the grandparent (copy-of-B) of our merge\n # target (/A/copy-of-B/F/E) to avoid a repeated merge.\n expected_status = wc.State(copy_of_B_F_E_path, {\n '' : Item(status=' ', wc_rev=6),\n 'alpha' : Item(status=' ', wc_rev=6),\n 'beta' : Item(status=' ', wc_rev=6),\n })\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]],\n [' U ' + copy_of_B_F_E_path + '\\n',\n ' G ' + copy_of_B_F_E_path + '\\n'],\n elides=True),\n [], 'merge', '-r4:5',\n sbox.repo_url + '/A/B/F/E',\n copy_of_B_F_E_path)\n svntest.actions.run_and_verify_status(copy_of_B_F_E_path,\n expected_status)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(2821)\ndef avoid_repeated_merge_on_subtree_with_merge_info(sbox):\n \"use subtree's mergeinfo to avoid repeated merge\"\n # Create deep trees A/B/F/E and A/B/F/E1 and copy A/B to A/copy-of-B\n # with the help of 'create_deep_trees'\n # As /A/copy-of-B/F/E1 is not a child of /A/copy-of-B/F/E,\n # set_path should not be called on /A/copy-of-B/F/E1 while\n # doing a implicit subtree merge on /A/copy-of-B/F/E.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n A_B_path = os.path.join(A_path, 'B')\n A_B_E_path = os.path.join(A_B_path, 'E')\n A_B_F_path = os.path.join(A_B_path, 'F')\n A_B_F_E_path = os.path.join(A_B_F_path, 'E')\n copy_of_B_path = os.path.join(A_path, 'copy-of-B')\n copy_of_B_F_path = os.path.join(A_path, 'copy-of-B', 'F')\n A_copy_of_B_F_E_alpha_path = os.path.join(A_path, 'copy-of-B', 'F',\n 'E', 'alpha')\n\n # Create a deeper directory structure.\n expected_status = create_deep_trees(wc_dir)\n\n # Edit alpha and commit it, creating revision 5.\n alpha_path = os.path.join(A_B_F_E_path, 'alpha')\n new_content_for_alpha1 = 'new content to alpha\\n'\n svntest.main.file_write(alpha_path, new_content_for_alpha1)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/F/E/alpha' : Item(verb='Sending'),\n })\n expected_status.tweak('A/B/F/E/alpha', wc_rev=5)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n for path_and_mergeinfo in (('E', '/A/B/F/E:5'),\n ('E1', '/A/B/F/E:5')):\n path_name = os.path.join(copy_of_B_path, 'F', path_and_mergeinfo[0])\n\n # Merge r5 to path_name.\n expected_output = wc.State(path_name, {\n 'alpha' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(path_name, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(path_name, {})\n expected_status = wc.State(path_name, {\n '' : Item(status=' M', wc_rev=4),\n 'alpha' : Item(status='M ', wc_rev=4),\n 'beta' : Item(status=' ', wc_rev=4),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : path_and_mergeinfo[1]}),\n 'alpha' : Item(new_content_for_alpha1),\n 'beta' : Item(\"This is the file 'beta'.\\n\"),\n })\n expected_skip = wc.State(path_name, { })\n\n svntest.actions.run_and_verify_merge(path_name, '4', '5',\n sbox.repo_url + '/A/B/F/E', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Commit the result of the merge, creating new revision.\n expected_output = svntest.wc.State(path_name, {\n '' : Item(verb='Sending'),\n 'alpha' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(path_name,\n expected_output, None, [], wc_dir)\n\n # Edit A/B/F/E/alpha and commit it, creating revision 8.\n new_content_for_alpha = 'new content to alpha\\none more line\\n'\n svntest.main.file_write(alpha_path, new_content_for_alpha)\n\n expected_output = svntest.wc.State(A_B_F_E_path, {\n 'alpha' : Item(verb='Sending'),\n })\n expected_status = wc.State(A_B_F_E_path, {\n '' : Item(status=' ', wc_rev=4),\n 'alpha' : Item(status=' ', wc_rev=8),\n 'beta' : Item(status=' ', wc_rev=4),\n })\n svntest.actions.run_and_verify_commit(A_B_F_E_path, expected_output,\n expected_status, [], wc_dir)\n\n # Update the WC to bring /A/copy_of_B to rev 8.\n # Without this update expected_status tree would be cumbersome to\n # understand.\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n\n # Merge changes from rev 4:8 of A/B into A/copy_of_B. A/copy_of_B/F/E1\n # has explicit mergeinfo and exists at r4 in the merge source, so it\n # should be treated as a subtree with intersecting mergeinfo and its\n # mergeinfo updated.\n expected_output = wc.State(copy_of_B_path, {\n 'F/E/alpha' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(copy_of_B_path, {\n '' : Item(status=' U'),\n 'F/E' : Item(status=' U')\n })\n expected_elision_output = wc.State(copy_of_B_path, {\n 'F/E' : Item(status=' U')\n })\n expected_status = wc.State(copy_of_B_path, {\n # The subtree mergeinfo on F/E1 is not updated because\n # this merge does not affect that subtree.\n '' : Item(status=' M', wc_rev=8),\n 'F/E' : Item(status=' M', wc_rev=8),\n 'F/E/alpha' : Item(status='M ', wc_rev=8),\n 'F/E/beta' : Item(status=' ', wc_rev=8),\n 'F/E1' : Item(status=' ', wc_rev=8),\n 'F/E1/alpha' : Item(status=' ', wc_rev=8),\n 'F/E1/beta' : Item(status=' ', wc_rev=8),\n 'lambda' : Item(status=' ', wc_rev=8),\n 'F' : Item(status=' ', wc_rev=8)\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:5-8'}),\n 'F/E' : Item(props={}), # elision!\n 'F/E/alpha' : Item(new_content_for_alpha),\n 'F/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F' : Item(),\n 'F/E1' : Item(props={SVN_PROP_MERGEINFO :\n '/A/B/F/E:5'}),\n 'F/E1/alpha' : Item(new_content_for_alpha1),\n 'F/E1/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\")\n })\n expected_skip = wc.State(copy_of_B_path, { })\n svntest.actions.run_and_verify_merge(copy_of_B_path, '4', '8',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Test for part of Issue #2821, see\n # https://issues.apache.org/jira/browse/SVN-2821#desc22\n #\n # Revert all local changes.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # Make a text mod to A/copy-of-B/F/E/alpha\n newer_content_for_alpha = \"Conflicting content\"\n svntest.main.file_write(A_copy_of_B_F_E_alpha_path,\n newer_content_for_alpha)\n\n # Re-merge r5 to A/copy-of-B/F, this *should* be a no-op as the mergeinfo\n # on A/copy-of-B/F/E should prevent any attempt to merge r5 into that\n # subtree. The merge will leave a few local changes as mergeinfo is set\n # on A/copy-of-B/F, the mergeinfo on A/copy-of-B/F/E elides to it. The\n # mergeinfo on A/copy-of-B/F/E1 remains unchanged as that subtree was\n # untouched by the merge.\n expected_output = wc.State(copy_of_B_F_path, {})\n expected_mergeinfo_output = wc.State(copy_of_B_F_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(copy_of_B_F_path, {\n 'E' : Item(status=' U')\n })\n expected_status = wc.State(copy_of_B_F_path, {\n '' : Item(status=' M', wc_rev=8),\n 'E' : Item(status=' M', wc_rev=8),\n 'E/alpha' : Item(status='M ', wc_rev=8),\n 'E/beta' : Item(status=' ', wc_rev=8),\n 'E1' : Item(status=' ', wc_rev=8),\n 'E1/alpha' : Item(status=' ', wc_rev=8),\n 'E1/beta' : Item(status=' ', wc_rev=8),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/F:5'}),\n 'E' : Item(props={}),\n 'E/alpha' : Item(newer_content_for_alpha),\n 'E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'E1' : Item(props={SVN_PROP_MERGEINFO :\n '/A/B/F/E:5'}),\n 'E1/alpha' : Item(new_content_for_alpha1),\n 'E1/beta' : Item(\"This is the file 'beta'.\\n\")\n })\n expected_skip = wc.State(copy_of_B_F_path, { })\n svntest.actions.run_and_verify_merge(copy_of_B_F_path, '4', '5',\n sbox.repo_url + '/A/B/F', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\ndef tweak_src_then_merge_to_dest(sbox, src_path, dst_path,\n canon_src_path, contents, cur_rev):\n \"\"\"Edit src and commit it. This results in new_rev.\n Merge new_rev to dst_path. Return new_rev.\"\"\"\n\n wc_dir = sbox.wc_dir\n new_rev = cur_rev + 1\n svntest.main.file_write(src_path, contents)\n\n expected_output = svntest.wc.State(src_path, {\n '': Item(verb='Sending'),\n })\n\n expected_status = wc.State(src_path,\n { '': Item(wc_rev=new_rev, status=' ')})\n\n svntest.actions.run_and_verify_commit(src_path, expected_output,\n expected_status)\n\n # Update the WC to new_rev so that it would be easier to expect everyone\n # to be at new_rev.\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n\n # Merge new_rev of src_path to dst_path.\n\n expected_status = wc.State(dst_path,\n { '': Item(wc_rev=new_rev, status='MM')})\n\n merge_url = sbox.repo_url + '/' + canon_src_path\n if sys.platform == 'win32':\n merge_url = merge_url.replace('\\\\', '/')\n\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[new_rev]],\n ['U ' + dst_path + '\\n',\n ' U ' + dst_path + '\\n']),\n [], 'merge', '-c', str(new_rev), merge_url, dst_path)\n\n svntest.actions.run_and_verify_status(dst_path, expected_status)\n\n return new_rev\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef obey_reporter_api_semantics_while_doing_subtree_merges(sbox):\n \"drive reporter api in depth first order\"\n\n # Copy /A/D to /A/copy-of-D it results in rONE.\n # Create children at different hierarchies having some merge-info\n # to test the set_path calls on a reporter in a depth-first order.\n # On all 'file' descendants of /A/copy-of-D/ we run merges.\n # We create /A/D/umlaut directly over URL it results in rev rTWO.\n # When we merge rONE+1:TWO of /A/D on /A/copy-of-D it should merge smoothly.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n A_D_path = sbox.ospath('A/D')\n copy_of_A_D_path = sbox.ospath('A/copy-of-D')\n\n svntest.main.run_svn(None, \"cp\", A_D_path, copy_of_A_D_path)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/copy-of-D' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/copy-of-D' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/G' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/G/pi' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/G/rho' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/G/tau' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/H' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/H/chi' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/H/omega' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/H/psi' : Item(status=' ', wc_rev=2),\n 'A/copy-of-D/gamma' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n\n cur_rev = 2\n for path in ([\"A\", \"D\", \"G\", \"pi\"],\n [\"A\", \"D\", \"G\", \"rho\"],\n [\"A\", \"D\", \"G\", \"tau\"],\n [\"A\", \"D\", \"H\", \"chi\"],\n [\"A\", \"D\", \"H\", \"omega\"],\n [\"A\", \"D\", \"H\", \"psi\"],\n [\"A\", \"D\", \"gamma\"]):\n path_name = os.path.join(wc_dir, *path)\n canon_path_name = os.path.join(*path)\n path[1] = \"copy-of-D\"\n copy_of_path_name = os.path.join(wc_dir, *path)\n var_name = 'new_content_for_' + path[len(path) - 1]\n file_contents = \"new content to \" + path[len(path) - 1] + \"\\n\"\n globals()[var_name] = file_contents\n cur_rev = tweak_src_then_merge_to_dest(sbox, path_name,\n copy_of_path_name, canon_path_name,\n file_contents, cur_rev)\n\n copy_of_A_D_wc_rev = cur_rev\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision ' + str(cur_rev+1) +\n '.\\n'],\n [],\n 'mkdir', sbox.repo_url + '/A/D/umlaut',\n '-m', \"log msg\")\n rev_to_merge_to_copy_of_D = cur_rev + 1\n\n # All the file descendants of /A/copy-of-D/ have already been merged\n # so the only notification we expect is for the added 'umlaut'.\n expected_output = wc.State(copy_of_A_D_path, {\n 'umlaut' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(copy_of_A_D_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(copy_of_A_D_path, {\n })\n # No subtree with explicit mergeinfo is affected by this merge, so they\n # all remain unchanged from before the merge. The only mergeinfo updated\n # is that on the target 'A/copy-of-D.\n expected_status = wc.State(copy_of_A_D_path, {\n '' : Item(status=' M', wc_rev=copy_of_A_D_wc_rev),\n 'G' : Item(status=' ', wc_rev=copy_of_A_D_wc_rev),\n 'G/pi' : Item(status='MM', wc_rev=copy_of_A_D_wc_rev),\n 'G/rho' : Item(status='MM', wc_rev=copy_of_A_D_wc_rev),\n 'G/tau' : Item(status='MM', wc_rev=copy_of_A_D_wc_rev),\n 'H' : Item(status=' ', wc_rev=copy_of_A_D_wc_rev),\n 'H/chi' : Item(status='MM', wc_rev=copy_of_A_D_wc_rev),\n 'H/omega' : Item(status='MM', wc_rev=copy_of_A_D_wc_rev),\n 'H/psi' : Item(status='MM', wc_rev=copy_of_A_D_wc_rev),\n 'gamma' : Item(status='MM', wc_rev=copy_of_A_D_wc_rev),\n 'umlaut' : Item(status='A ', copied='+', wc_rev='-'),\n })\n\n merged_rangelist = \"3-%d\" % rev_to_merge_to_copy_of_D\n\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D:' + merged_rangelist}),\n 'G' : Item(),\n 'G/pi' : Item(new_content_for_pi,\n props={SVN_PROP_MERGEINFO : '/A/D/G/pi:3'}),\n 'G/rho' : Item(new_content_for_rho,\n props={SVN_PROP_MERGEINFO : '/A/D/G/rho:4'}),\n 'G/tau' : Item(new_content_for_tau,\n props={SVN_PROP_MERGEINFO : '/A/D/G/tau:5'}),\n 'H' : Item(),\n 'H/chi' : Item(new_content_for_chi,\n props={SVN_PROP_MERGEINFO : '/A/D/H/chi:6'}),\n 'H/omega' : Item(new_content_for_omega,\n props={SVN_PROP_MERGEINFO : '/A/D/H/omega:7'}),\n 'H/psi' : Item(new_content_for_psi,\n props={SVN_PROP_MERGEINFO : '/A/D/H/psi:8'}),\n 'gamma' : Item(new_content_for_gamma,\n props={SVN_PROP_MERGEINFO : '/A/D/gamma:9'}),\n 'umlaut' : Item(),\n })\n expected_skip = wc.State(copy_of_A_D_path, { })\n svntest.actions.run_and_verify_merge(copy_of_A_D_path,\n 2,\n str(rev_to_merge_to_copy_of_D),\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issues(2733,2734)\ndef mergeinfo_inheritance(sbox):\n \"target inherits mergeinfo from nearest ancestor\"\n\n # Test for Issues #2733 and #2734.\n #\n # When the target of a merge has no explicit mergeinfo and the merge\n # would result in mergeinfo being added to the target which...\n #\n # ...is a subset of the *local* mergeinfo on one of the target's\n # ancestors (it's nearest ancestor takes precedence), then the merge is\n # not repeated and no mergeinfo should be set on the target (Issue #2734).\n #\n # OR\n #\n # ...is not a subset it's nearest ancestor, the target should inherit the\n # non-inersecting mergeinfo (local or committed, the former takes\n # precedence) from it's nearest ancestor (Issue #2733).\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n B_COPY_path = sbox.ospath('A_COPY/B')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n E_COPY_path = sbox.ospath('A_COPY/B/E')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n D_COPY_path = sbox.ospath('A_COPY/D')\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n\n # Now start merging...\n\n # Merge r4 into A_COPY/D/\n expected_output = wc.State(D_COPY_path, {\n 'G/rho' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(D_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(D_COPY_path, {\n })\n expected_status = wc.State(D_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'G' : Item(status=' ', wc_rev=2),\n 'G/pi' : Item(status=' ', wc_rev=2),\n 'G/rho' : Item(status='M ', wc_rev=2),\n 'G/tau' : Item(status=' ', wc_rev=2),\n 'H' : Item(status=' ', wc_rev=2),\n 'H/chi' : Item(status=' ', wc_rev=2),\n 'H/psi' : Item(status=' ', wc_rev=2),\n 'H/omega' : Item(status=' ', wc_rev=2),\n 'gamma' : Item(status=' ', wc_rev=2),\n })\n # We test issue #2733 here (with a directory as the merge target).\n # r1 should be inherited from 'A_COPY'.\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D:4'}),\n 'G' : Item(),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"New content\"),\n 'G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'H' : Item(),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\")\n })\n expected_skip = wc.State(D_COPY_path, { })\n svntest.actions.run_and_verify_merge(D_COPY_path, '3', '4',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merge r4 again, this time into A_COPY/D/G. An ancestor directory\n # (A_COPY/D) exists with identical local mergeinfo, so the merge\n # should not be repeated. We test issue #2734 here with (with a\n # directory as the merge target).\n expected_output = wc.State(G_COPY_path, { })\n # A_COPY/D/G gets mergeinfo set, but it immediately elides to A_COPY/D.\n expected_mergeinfo_output = wc.State(G_COPY_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(G_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_status = wc.State(G_COPY_path, {\n '' : Item(status=' ', wc_rev=2),\n 'pi' : Item(status=' ', wc_rev=2),\n 'rho' : Item(status='M ', wc_rev=2),\n 'tau' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n 'pi' : Item(\"This is the file 'pi'.\\n\"),\n 'rho' : Item(\"New content\"),\n 'tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_skip = wc.State(G_COPY_path, { })\n svntest.actions.run_and_verify_merge(G_COPY_path, '3', '4',\n sbox.repo_url + '/A/D/G', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merge r5 into A_COPY/B. Again, r1 should be inherited from\n # A_COPY (Issue #2733)\n expected_output = wc.State(B_COPY_path, {\n 'E/beta' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(B_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(B_COPY_path, {\n })\n expected_status = wc.State(B_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'E' : Item(status=' ', wc_rev=2),\n 'E/alpha' : Item(status=' ', wc_rev=2),\n 'E/beta' : Item(status='M ', wc_rev=2),\n 'lambda' : Item(status=' ', wc_rev=2),\n 'F' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:5'}),\n 'E' : Item(),\n 'E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(\"New content\"),\n 'F' : Item(),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\")\n })\n expected_skip = wc.State(B_COPY_path, { })\n\n svntest.actions.run_and_verify_merge(B_COPY_path, '4', '5',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merge r5 again, this time into A_COPY/B/E/beta. An ancestor\n # directory (A_COPY/B) exists with identical local mergeinfo, so\n # the merge should not be repeated (Issue #2734 with a file as the\n # merge target).\n expected_skip = wc.State(beta_COPY_path, { })\n\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn([], [], 'merge', '-c5',\n sbox.repo_url + '/A/B/E/beta',\n beta_COPY_path)\n\n # The merge wasn't repeated so beta shouldn't have any mergeinfo.\n # We are implicitly testing that without looking at the prop value\n # itself, just beta's prop modification status.\n expected_status = wc.State(beta_COPY_path, {\n '' : Item(status='M ', wc_rev=2),\n })\n svntest.actions.run_and_verify_status(beta_COPY_path, expected_status)\n\n # Merge r3 into A_COPY. A_COPY's has two subtrees with mergeinfo,\n # A_COPY/B/E/beta and A_COPY/D. Only the latter is effected by this\n # merge so only its mergeinfo is updated to include r3.\n expected_output = wc.State(A_COPY_path, {\n 'D/H/psi' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'B' : Item(status=' M', wc_rev=2),\n 'mu' : Item(status=' ', wc_rev=2),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha' : Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status='M ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' M', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status='M ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' ', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status='M ', wc_rev=2),\n 'D/H/omega' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3'}),\n 'B' : Item(props={SVN_PROP_MERGEINFO : '/A/B:5'}),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(props={SVN_PROP_MERGEINFO : '/A/D:3-4'}),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merge r6 into A_COPY/D/H/omega, it should inherit it's nearest\n # ancestor's (A_COPY/D) mergeinfo (Issue #2733 with a file as the\n # merge target).\n expected_skip = wc.State(omega_COPY_path, { })\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[6]],\n ['U ' + omega_COPY_path + '\\n',\n ' G ' + omega_COPY_path + '\\n']),\n [], 'merge', '-c6',\n sbox.repo_url + '/A/D/H/omega',\n omega_COPY_path)\n\n # Check that mergeinfo was properly set on A_COPY/D/H/omega\n svntest.actions.run_and_verify_svn([\"/A/D/H/omega:3-4,6\\n\"],\n [],\n 'propget', SVN_PROP_MERGEINFO,\n omega_COPY_path)\n\n # Given a merge target *without* any of the following:\n #\n # 1) Explicit mergeinfo set on itself in the WC\n # 2) Any WC ancestor to inherit mergeinfo from\n # 3) Any mergeinfo for the target in the repository\n #\n # Check that the target still inherits mergeinfo from it's nearest\n # repository ancestor.\n #\n # Commit all the merges thus far\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/B' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D' : Item(verb='Sending'),\n 'A_COPY/D/G/rho' : Item(verb='Sending'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY', 'A_COPY/B', 'A_COPY/B/E/beta', 'A_COPY/D',\n 'A_COPY/D/G/rho', 'A_COPY/D/H/omega', 'A_COPY/D/H/psi',\n wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n # In single-db mode you can't create a disconnected working copy by just\n # copying a subdir\n\n ## Copy the subtree A_COPY/B/E from the working copy, making the\n ## disconnected WC E_only.\n #other_wc = sbox.add_wc_path('E_only')\n #svntest.actions.duplicate_dir(E_COPY_path, other_wc)\n #\n ## Update the disconnected WC it so it will get the most recent mergeinfo\n ## from the repos when merging.\n #svntest.actions.run_and_verify_svn(exp_noop_up_out(7), [], 'up',\n # other_wc)\n #\n ## Merge r5:4 into the root of the disconnected WC.\n ## E_only has no explicit mergeinfo and since it's the root of the WC\n ## cannot inherit any mergeinfo from a working copy ancestor path. Nor\n ## does it have any mergeinfo explicitly set on it in the repository.\n ## An ancestor path on the repository side, A_COPY/B does have the merge\n ## info '/A/B:5' however and E_only should inherit this, resulting in\n ## empty mergeinfo after the removal of r5 (A_COPY has mergeinfo of\n ## '/A:3' so this empty mergeinfo is needed to override that.\n #expected_output = wc.State(other_wc,\n # {'beta' : Item(status='U ')})\n #expected_mergeinfo_output = wc.State(other_wc, {\n # '' : Item(status=' G')\n # })\n #expected_elision_output = wc.State(other_wc, {\n # })\n #expected_status = wc.State(other_wc, {\n # '' : Item(status=' M', wc_rev=7),\n # 'alpha' : Item(status=' ', wc_rev=7),\n # 'beta' : Item(status='M ', wc_rev=7),\n # })\n #expected_disk = wc.State('', {\n # '' : Item(props={SVN_PROP_MERGEINFO : ''}),\n # 'alpha' : Item(\"This is the file 'alpha'.\\n\"),\n # 'beta' : Item(\"This is the file 'beta'.\\n\"),\n # })\n #expected_skip = wc.State(other_wc, { })\n #\n #svntest.actions.run_and_verify_merge(other_wc, '5', '4',\n # sbox.repo_url + '/A/B/E', None,\n # expected_output,\n # expected_mergeinfo_output,\n # expected_elision_output,\n # expected_disk,\n # expected_status,\n # expected_skip,\n # check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef mergeinfo_elision(sbox):\n \"mergeinfo elides to ancestor with identical info\"\n\n # When a merge would result in mergeinfo on a target which is identical\n # to mergeinfo (local or committed) on one of the node's ancestors (the\n # nearest ancestor takes precedence), then the mergeinfo elides from the\n # target to the nearest ancestor (e.g. no mergeinfo is set on the target\n # or committed mergeinfo is removed).\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n\n # Now start merging...\n\n # Merge r5 into A_COPY/B/E/beta.\n expected_skip = wc.State(beta_COPY_path, { })\n\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]],\n ['U ' + beta_COPY_path + '\\n',\n ' U ' + beta_COPY_path + '\\n']),\n [], 'merge', '-c5',\n sbox.repo_url + '/A/B/E/beta',\n beta_COPY_path)\n\n # Check beta's status and props.\n expected_status = wc.State(beta_COPY_path, {\n '' : Item(status='MM', wc_rev=2),\n })\n svntest.actions.run_and_verify_status(beta_COPY_path, expected_status)\n\n svntest.actions.run_and_verify_svn([\"/A/B/E/beta:5\\n\"], [],\n 'propget', SVN_PROP_MERGEINFO,\n beta_COPY_path)\n\n # Commit the merge\n expected_output = wc.State(wc_dir, {\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY/B/E/beta', wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n # Update A_COPY to get all paths to the same working revision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(7), [],\n 'up', wc_dir)\n wc_status.tweak(wc_rev=7)\n\n # Merge r4 into A_COPY/D/G.\n expected_output = wc.State(G_COPY_path, {\n 'rho' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(G_COPY_path, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(G_COPY_path, {\n })\n expected_status = wc.State(G_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n 'pi' : Item(status=' ', wc_rev=7),\n 'rho' : Item(status='M ', wc_rev=7),\n 'tau' : Item(status=' ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G:4'}),\n 'pi' : Item(\"This is the file 'pi'.\\n\"),\n 'rho' : Item(\"New content\"),\n 'tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_skip = wc.State(G_COPY_path, { })\n\n svntest.actions.run_and_verify_merge(G_COPY_path, '3', '4',\n sbox.repo_url + '/A/D/G', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merge r3:6 into A_COPY. The merge doesn't touch either of A_COPY's\n # subtrees with explicit mergeinfo, so those are left alone.\n expected_output = wc.State(A_COPY_path, {\n 'D/H/omega' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n 'B' : Item(status=' ', wc_rev=7),\n 'mu' : Item(status=' ', wc_rev=7),\n 'B/E' : Item(status=' ', wc_rev=7),\n 'B/E/alpha' : Item(status=' ', wc_rev=7),\n 'B/E/beta' : Item(status=' ', wc_rev=7),\n 'B/lambda' : Item(status=' ', wc_rev=7),\n 'B/F' : Item(status=' ', wc_rev=7),\n 'C' : Item(status=' ', wc_rev=7),\n 'D' : Item(status=' ', wc_rev=7),\n 'D/G' : Item(status=' M', wc_rev=7),\n 'D/G/pi' : Item(status=' ', wc_rev=7),\n 'D/G/rho' : Item(status='M ', wc_rev=7),\n 'D/G/tau' : Item(status=' ', wc_rev=7),\n 'D/gamma' : Item(status=' ', wc_rev=7),\n 'D/H' : Item(status=' ', wc_rev=7),\n 'D/H/chi' : Item(status=' ', wc_rev=7),\n 'D/H/psi' : Item(status=' ', wc_rev=7),\n 'D/H/omega' : Item(status='M ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:4-6'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/B/E/beta:5'}),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G:4'}),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '3', '6',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n # New repeat the above merge but with the --record-only option.\n # This would result in identical mergeinfo\n # (r4-6) on A_COPY and two of its descendants, A_COPY/D/G and\n # A_COPY/B/E/beta, so the mergeinfo on the latter two should elide\n # to A_COPY. In the case of A_COPY/D/G this means its wholly uncommitted\n # mergeinfo is removed leaving no prop mods. In the case of\n # A_COPY/B/E/beta its committed mergeinfo prop is removed leaving a prop\n # change.\n\n # to A_COPY.\n expected_output = wc.State(A_COPY_path, {})\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' G'),\n 'D/G' : Item(status=' G'),\n 'B/E/beta' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_elision_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status=' U'),\n 'D/G' : Item(status=' U'),\n })\n expected_status.tweak('B/E/beta', status=' M')\n expected_status.tweak('D/G', status=' ')\n expected_disk.tweak('B/E/beta', 'D/G', props={})\n svntest.actions.run_and_verify_merge(A_COPY_path, '3', '6',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--record-only',\n A_COPY_path)\n\n # Reverse merge r5 out of A_COPY/B/E/beta. The mergeinfo on\n # A_COPY/B/E/beta which previously elided will now return,\n # minus r5 of course.\n expected_skip = wc.State(beta_COPY_path, { })\n\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[-5]],\n ['U ' + beta_COPY_path + '\\n',\n ' G ' + beta_COPY_path + '\\n']),\n [], 'merge', '-c-5',\n sbox.repo_url + '/A/B/E/beta',\n beta_COPY_path)\n\n # Check beta's status and props.\n expected_status = wc.State(beta_COPY_path, {\n '' : Item(status='MM', wc_rev=7),\n })\n svntest.actions.run_and_verify_status(beta_COPY_path, expected_status)\n\n svntest.actions.run_and_verify_svn([\"/A/B/E/beta:4,6\\n\"], [],\n 'propget', SVN_PROP_MERGEINFO,\n beta_COPY_path)\n\n # Merge r5 back into A_COPY/B/E/beta. Now the mergeinfo on the merge\n # target (A_COPY/B/E/beta) is identical to it's nearest ancestor with\n # mergeinfo (A_COPY) and so the former should elide.\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]],\n ['G ' + beta_COPY_path + '\\n',\n ' G ' + beta_COPY_path + '\\n', # Update mergeinfo\n ' U ' + beta_COPY_path + '\\n',], # Elide mereginfo,\n elides=True),\n [], 'merge', '-c5',\n sbox.repo_url + '/A/B/E/beta',\n beta_COPY_path)\n\n # Check beta's status and props.\n expected_status = wc.State(beta_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n })\n svntest.actions.run_and_verify_status(beta_COPY_path, expected_status)\n\n # Once again A_COPY/B/E/beta has no mergeinfo.\n svntest.actions.run_and_verify_svn([], '.*W200017: Property.*not found',\n 'propget', SVN_PROP_MERGEINFO,\n beta_COPY_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef mergeinfo_inheritance_and_discontinuous_ranges(sbox):\n \"discontinuous merges produce correct mergeinfo\"\n\n # When a merge target has no explicit mergeinfo and is subject\n # to multiple merges, the resulting mergeinfo on the target\n # should reflect the combination of the inherited mergeinfo\n # with each merge performed.\n #\n # Also tests implied merge source and target when only a revision\n # range is specified.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n A_url = sbox.repo_url + '/A'\n A_COPY_path = sbox.ospath('A_COPY')\n D_COPY_path = sbox.ospath('A_COPY/D')\n A_COPY_rho_path = sbox.ospath('A_COPY/D/G/rho')\n\n expected_disk, expected_status = set_up_branch(sbox)\n\n # Merge r4 into A_COPY\n saved_cwd = os.getcwd()\n\n os.chdir(A_COPY_path)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[4]],\n ['U ' + os.path.join(\"D\", \"G\", \"rho\") + '\\n',\n ' U .\\n']),\n [], 'merge', '-c4', A_url)\n os.chdir(saved_cwd)\n\n # Check the results of the merge.\n expected_status.tweak(\"A_COPY\", status=' M')\n expected_status.tweak(\"A_COPY/D/G/rho\", status='M ')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n svntest.actions.run_and_verify_svn([\"/A:4\\n\"], [],\n 'propget', SVN_PROP_MERGEINFO,\n A_COPY_path)\n\n # Merge r2:6 into A_COPY/D\n #\n # A_COPY/D should inherit the mergeinfo '/A:4' from A_COPY\n # combine it with the discontinous merges performed directly on\n # it (A/D/ 2:3 and A/D 4:6) resulting in '/A/D:3-6'.\n expected_output = wc.State(D_COPY_path, {\n 'H/psi' : Item(status='U '),\n 'H/omega' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(D_COPY_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(D_COPY_path, {\n })\n expected_status = wc.State(D_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'G' : Item(status=' ', wc_rev=2),\n 'G/pi' : Item(status=' ', wc_rev=2),\n 'G/rho' : Item(status='M ', wc_rev=2),\n 'G/tau' : Item(status=' ', wc_rev=2),\n 'H' : Item(status=' ', wc_rev=2),\n 'H/chi' : Item(status=' ', wc_rev=2),\n 'H/psi' : Item(status='M ', wc_rev=2),\n 'H/omega' : Item(status='M ', wc_rev=2),\n 'gamma' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D:3-6'}),\n 'G' : Item(),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"New content\"),\n 'G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'H' : Item(),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/psi' : Item(\"New content\"),\n 'H/omega' : Item(\"New content\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\")\n })\n expected_skip = wc.State(D_COPY_path, { })\n\n svntest.actions.run_and_verify_merge(D_COPY_path, '2', '6',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Wipe the memory of a portion of the previous merge...\n ### It'd be nice to use 'merge --record-only' here, but we can't (yet)\n ### wipe all ranges for a file due to the bug pointed out in r864719.\n mu_copy_path = os.path.join(A_COPY_path, 'mu')\n svntest.actions.run_and_verify_svn([\"property '\" + SVN_PROP_MERGEINFO\n + \"' set on '\" +\n mu_copy_path + \"'\\n\"], [], 'propset',\n SVN_PROP_MERGEINFO, '', mu_copy_path)\n # ...and confirm that we can commit the wiped mergeinfo...\n expected_output = wc.State(wc_dir, {\n 'A_COPY/mu' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n None,\n [],\n mu_copy_path)\n # ...and that the presence of the property is retained, even when\n # the value has been wiped.\n svntest.actions.run_and_verify_svn(['\\n'], [], 'propget',\n SVN_PROP_MERGEINFO, mu_copy_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(2754)\ndef merge_to_target_with_copied_children(sbox):\n \"merge works when target has copied children\"\n\n # Test for Issue #2754 Can't merge to target with copied/moved children\n\n sbox.build()\n wc_dir = sbox.wc_dir\n expected_disk, expected_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n D_COPY_path = sbox.ospath('A_COPY/D')\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n rho_COPY_COPY_path = sbox.ospath('A_COPY/D/G/rho_copy')\n\n # URL to URL copy A_COPY/D/G/rho to A_COPY/D/G/rho_copy\n svntest.actions.run_and_verify_svn(None, [], 'copy',\n sbox.repo_url + '/A_COPY/D/G/rho',\n sbox.repo_url + '/A_COPY/D/G/rho_copy',\n '-m', 'copy')\n\n # Update WC.\n expected_output = wc.State(wc_dir,\n {'A_COPY/D/G/rho_copy' : Item(status='A ')})\n expected_disk.add({\n 'A_COPY/D/G/rho_copy' : Item(\"This is the file 'rho'.\\n\", props={})\n })\n expected_status.tweak(wc_rev=7)\n expected_status.add({'A_COPY/D/G/rho_copy' : Item(status=' ', wc_rev=7)})\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n expected_disk,\n expected_status,\n check_props=True)\n\n # Merge r4 into A_COPY/D/G/rho_copy.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[4]],\n ['U ' + rho_COPY_COPY_path + '\\n',\n ' U ' + rho_COPY_COPY_path + '\\n']),\n [], 'merge', '-c4',\n sbox.repo_url + '/A/D/G/rho',\n rho_COPY_COPY_path)\n\n # Merge r3:5 into A_COPY/D/G.\n expected_output = wc.State(G_COPY_path, {\n 'rho' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(G_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(G_COPY_path, {\n })\n expected_status = wc.State(G_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n 'pi' : Item(status=' ', wc_rev=7),\n 'rho' : Item(status='M ', wc_rev=7),\n 'rho_copy' : Item(status='MM', wc_rev=7),\n 'tau' : Item(status=' ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G:4-5'}),\n 'pi' : Item(\"This is the file 'pi'.\\n\"),\n 'rho' : Item(\"New content\"),\n 'rho_copy' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/G/rho:4'}),\n 'tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_skip = wc.State(G_COPY_path, { })\n svntest.actions.run_and_verify_merge(G_COPY_path, '3', '5',\n sbox.repo_url + '/A/D/G', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(3188)\ndef merge_to_switched_path(sbox):\n \"merge to switched path does not inherit or elide\"\n\n # When the target of a merge is a switched path we don't inherit WC\n # mergeinfo from above the target or attempt to elide the mergeinfo\n # set on the target as a result of the merge.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n A_COPY_D_path = sbox.ospath('A_COPY/D')\n G_COPY_path = sbox.ospath('A/D/G_COPY')\n A_COPY_D_G_path = sbox.ospath('A_COPY/D/G')\n A_COPY_D_G_rho_path = sbox.ospath('A_COPY/D/G/rho')\n\n expected = svntest.verify.UnorderedOutput(\n [\"A \" + os.path.join(G_COPY_path, \"pi\") + \"\\n\",\n \"A \" + os.path.join(G_COPY_path, \"rho\") + \"\\n\",\n \"A \" + os.path.join(G_COPY_path, \"tau\") + \"\\n\",\n \"Checked out revision 6.\\n\",\n \"A \" + G_COPY_path + \"\\n\"])\n\n # r7 - Copy A/D/G to A/D/G_COPY and commit.\n svntest.actions.run_and_verify_svn(expected, [], 'copy',\n sbox.repo_url + \"/A/D/G\",\n G_COPY_path)\n\n expected_output = wc.State(wc_dir, {'A/D/G_COPY' : Item(verb='Adding')})\n wc_status.add({\n \"A/D/G_COPY\" : Item(status=' ', wc_rev=7),\n \"A/D/G_COPY/pi\" : Item(status=' ', wc_rev=7),\n \"A/D/G_COPY/rho\" : Item(status=' ', wc_rev=7),\n \"A/D/G_COPY/tau\" : Item(status=' ', wc_rev=7),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # r8 - modify and commit A/D/G_COPY/rho\n svntest.main.file_write(sbox.ospath('A/D/G_COPY/rho'),\n \"New *and* improved rho content\")\n expected_output = wc.State(wc_dir, {'A/D/G_COPY/rho' : Item(verb='Sending')})\n wc_status.tweak('A/D/G_COPY/rho', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Switch A_COPY/D/G to A/D/G.\n wc_disk.add({\n \"A\" : Item(),\n \"A/D/G_COPY\" : Item(),\n \"A/D/G_COPY/pi\" : Item(\"This is the file 'pi'.\\n\"),\n \"A/D/G_COPY/rho\" : Item(\"New *and* improved rho content\"),\n \"A/D/G_COPY/tau\" : Item(\"This is the file 'tau'.\\n\"),\n })\n wc_disk.tweak('A_COPY/D/G/rho',contents=\"New content\")\n wc_status.tweak(\"A_COPY/D/G\", wc_rev=8, switched='S')\n wc_status.tweak(\"A_COPY/D/G/pi\", wc_rev=8)\n wc_status.tweak(\"A_COPY/D/G/rho\", wc_rev=8)\n wc_status.tweak(\"A_COPY/D/G/tau\", wc_rev=8)\n expected_output = svntest.wc.State(sbox.wc_dir, {\n \"A_COPY/D/G/rho\" : Item(status='U '),\n })\n svntest.actions.run_and_verify_switch(sbox.wc_dir, A_COPY_D_G_path,\n sbox.repo_url + \"/A/D/G\",\n expected_output, wc_disk, wc_status,\n [], 1)\n\n # Update working copy to allow elision (if any).\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [],\n 'up', wc_dir)\n\n # Set some mergeinfo on a working copy parent of our switched subtree\n # A_COPY/D/G. Because the subtree is switched it should *not* inherit\n # this mergeinfo.\n svntest.actions.run_and_verify_svn([\"property '\" + SVN_PROP_MERGEINFO +\n \"' set on '\" + A_COPY_path + \"'\" +\n \"\\n\"], [], 'ps', SVN_PROP_MERGEINFO,\n '/A:4', A_COPY_path)\n\n # Merge r8 from A/D/G_COPY into our switched target A_COPY/D/G.\n # A_COPY/D/G should get mergeinfo for r8 as a result of the merge,\n # but because it's switched should not inherit the mergeinfo from\n # its nearest WC ancestor with mergeinfo (A_COPY: svn:mergeinfo : /A:4)\n expected_output = wc.State(A_COPY_D_G_path, {\n 'rho' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_D_G_path, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_COPY_D_G_path, {\n })\n # Note: A_COPY/D/G won't show as switched.\n expected_status = wc.State(A_COPY_D_G_path, {\n '' : Item(status=' M', wc_rev=8),\n 'pi' : Item(status=' ', wc_rev=8),\n 'rho' : Item(status='M ', wc_rev=8),\n 'tau' : Item(status=' ', wc_rev=8),\n })\n expected_status.tweak('', switched='S')\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G_COPY:8'}),\n 'pi' : Item(\"This is the file 'pi'.\\n\"),\n 'rho' : Item(\"New *and* improved rho content\"),\n 'tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_D_G_path, { })\n\n svntest.actions.run_and_verify_merge(A_COPY_D_G_path, '7', '8',\n sbox.repo_url + '/A/D/G_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n # Check that the mergeinfo set on a switched target can elide to the\n # repository.\n #\n # Specifically this is testing the \"switched target\" portions of\n # issue #3188 'Mergeinfo on switched targets/subtrees should\n # elide to repos'.\n #\n # Revert the previous merge and manually set 'svn:mergeinfo : /A/D:4'\n # on 'merge_tests-1\\A_COPY\\D'. Now merge -c-4 from /A/D/G into A_COPY/D/G.\n # This should produce no mergeinfo on A_COPY/D/G'. If the A_COPY/D/G was\n # unswitched this merge would normally set empty mergeinfo on A_COPY/D/G,\n # but as it is switched this empty mergeinfo just elides to the\n # repository (empty mergeinfo on a path can elide if that path doesn't\n # inherit *any* mergeinfo).\n svntest.actions.run_and_verify_svn([\"Reverted '\" + A_COPY_path+ \"'\\n\",\n \"Reverted '\" + A_COPY_D_G_path+ \"'\\n\",\n \"Reverted '\" + A_COPY_D_G_rho_path +\n \"'\\n\"],\n [], 'revert', '-R', wc_dir)\n svntest.actions.run_and_verify_svn([\"property '\" + SVN_PROP_MERGEINFO +\n \"' set on '\" + A_COPY_D_path+ \"'\" +\n \"\\n\"], [], 'ps', SVN_PROP_MERGEINFO,\n '/A/D:4', A_COPY_D_path)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[-4]],\n ['U ' + A_COPY_D_G_rho_path + '\\n',\n ' U ' + A_COPY_D_G_path + '\\n'],\n elides=True),\n [], 'merge', '-c-4',\n sbox.repo_url + '/A/D/G_COPY',\n A_COPY_D_G_path)\n wc_status.tweak(\"A_COPY/D\", status=' M')\n wc_status.tweak(\"A_COPY/D/G/rho\", status='M ')\n wc_status.tweak(wc_rev=8)\n svntest.actions.run_and_verify_status(wc_dir, wc_status)\n check_mergeinfo_recursively(A_COPY_D_path,\n { A_COPY_D_path : '/A/D:4' })\n\n#----------------------------------------------------------------------\n# Test for issues\n#\n# 2823: Account for mergeinfo differences for switched\n# directories when gathering mergeinfo\n#\n# 2839: Support non-inheritable mergeinfo revision ranges\n#\n# 3187: Reverse merges don't work properly with\n# non-inheritable ranges.\n#\n# 3188: Mergeinfo on switched targets/subtrees should\n# elide to repos\n@SkipUnless(server_has_mergeinfo)\n@Issue(2823,2839,3187,3188,4056)\ndef merge_to_path_with_switched_children(sbox):\n \"merge to path with switched children\"\n\n # Merging to a target with switched children requires special handling\n # to keep mergeinfo correct:\n #\n # 1) If the target of a merge has switched children without explicit\n # mergeinfo, the switched children should get mergeinfo set on\n # them as a result of the merge. This mergeinfo includes the\n # mergeinfo resulting from the merge *and* any mergeinfo inherited\n # from the repos for the switched path.\n #\n # 2) Mergeinfo on switched children should never elide.\n #\n # 3) The path the switched child overrides cannot be modified by the\n # merge (it isn't present in the WC) so should not inherit any\n # mergeinfo added as a result of the merge. To prevent this, the\n # immediate parent of any switched child should have non-inheritable\n # mergeinfo added/modified for the merge performed.\n #\n # 4) Because of 3, siblings of switched children will not inherit the\n # mergeinfo resulting from the merge, so must get their own, full set\n # of mergeinfo.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, False, 3)\n\n # Some paths we'll care about\n D_path = sbox.ospath('A/D')\n A_COPY_path = sbox.ospath('A_COPY')\n A_COPY_beta_path = sbox.ospath('A_COPY/B/E/beta')\n A_COPY_chi_path = sbox.ospath('A_COPY/D/H/chi')\n A_COPY_omega_path = sbox.ospath('A_COPY/D/H/omega')\n A_COPY_psi_path = sbox.ospath('A_COPY/D/H/psi')\n A_COPY_G_path = sbox.ospath('A_COPY/D/G')\n A_COPY_rho_path = sbox.ospath('A_COPY/D/G/rho')\n A_COPY_H_path = sbox.ospath('A_COPY/D/H')\n A_COPY_D_path = sbox.ospath('A_COPY/D')\n A_COPY_gamma_path = sbox.ospath('A_COPY/D/gamma')\n H_COPY_2_path = sbox.ospath('A_COPY_2/D/H')\n\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [], 'up',\n wc_dir)\n wc_status.tweak(wc_rev=8)\n\n # Switch a file and dir path in the branch:\n\n # Switch A_COPY/D/G to A_COPY_2/D/G.\n wc_status.tweak(\"A_COPY/D/G\", switched='S')\n expected_output = svntest.wc.State(sbox.wc_dir, {})\n svntest.actions.run_and_verify_switch(sbox.wc_dir, A_COPY_G_path,\n sbox.repo_url + \"/A_COPY_2/D/G\",\n expected_output, wc_disk, wc_status,\n [], 1)\n\n # Switch A_COPY/D/G/rho to A_COPY_3/D/G/rho.\n wc_status.tweak(\"A_COPY/D/G/rho\", switched='S')\n expected_output = svntest.wc.State(sbox.wc_dir, {})\n svntest.actions.run_and_verify_switch(sbox.wc_dir, A_COPY_rho_path,\n sbox.repo_url + \"/A_COPY_3/D/G/rho\",\n expected_output, wc_disk, wc_status,\n [], 1)\n\n # Switch A_COPY/D/H/psi to A_COPY_2/D/H/psi.\n wc_status.tweak(\"A_COPY/D/H/psi\", switched='S')\n expected_output = svntest.wc.State(sbox.wc_dir, {})\n svntest.actions.run_and_verify_switch(sbox.wc_dir, A_COPY_psi_path,\n sbox.repo_url + \"/A_COPY_2/D/H/psi\",\n expected_output, wc_disk, wc_status,\n [], 1)\n\n # Target with switched file child:\n #\n # Merge r8 from A/D/H into A_COPY/D/H. The switched child of\n # A_COPY/D/H, file A_COPY/D/H/psi (which has no mergeinfo prior\n # to the merge), is unaffected by the merge so does not get it's\n # own explicit mergeinfo.\n #\n # A_COPY/D/H/psi's parent A_COPY/D/H has no pre-exiting explicit\n # mergeinfo so should get its own mergeinfo, the non-inheritable\n # r8 resulting from the merge.\n #\n # A_COPY/D/H/psi's unswitched sibling, A_COPY/D/H/omega is affected\n # by the merge but won't inherit r8 from A_COPY/D/H, so it needs its\n # own mergeinfo.\n expected_output = wc.State(A_COPY_H_path, {\n 'omega' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_H_path, {\n '' : Item(status=' U'),\n 'omega' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_COPY_H_path, {\n 'omega' : Item(status=' U')\n })\n expected_status = wc.State(A_COPY_H_path, {\n '' : Item(status=' M', wc_rev=8),\n 'psi' : Item(status=' ', wc_rev=8, switched='S'),\n 'omega' : Item(status='M ', wc_rev=8),\n 'chi' : Item(status=' ', wc_rev=8),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:8'}),\n 'psi' : Item(\"This is the file 'psi'.\\n\"),\n 'omega' : Item(\"New content\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_H_path, { })\n\n svntest.actions.run_and_verify_merge(A_COPY_H_path, '7', '8',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n # Target with switched dir child:\n #\n # Merge r6 from A/D into A_COPY/D. The only subtrees with explicit\n # mergeinfo (or switched) that are affected by the merge are A_COPY/D/G\n # and A_COPY/D/G/rho. Only these two subtrees, and the target itself,\n # should receive mergeinfo updates.\n expected_output = wc.State(A_COPY_D_path, {\n 'G/rho' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_D_path, {\n '' : Item(status=' U'),\n 'G' : Item(status=' U'),\n 'G/rho' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_COPY_D_path, {\n })\n expected_status_D = wc.State(A_COPY_D_path, {\n '' : Item(status=' M', wc_rev=8),\n 'H' : Item(status=' M', wc_rev=8),\n 'H/chi' : Item(status=' ', wc_rev=8),\n 'H/omega' : Item(status='M ', wc_rev=8),\n 'H/psi' : Item(status=' ', wc_rev=8, switched='S'),\n 'G' : Item(status=' M', wc_rev=8, switched='S'),\n 'G/pi' : Item(status=' ', wc_rev=8),\n 'G/rho' : Item(status='MM', wc_rev=8, switched='S'),\n 'G/tau' : Item(status=' ', wc_rev=8),\n 'gamma' : Item(status=' ', wc_rev=8),\n })\n expected_disk_D = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D:6*'}),\n 'H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:8'}),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/omega' : Item(\"New content\"),\n 'H/psi' : Item(\"This is the file 'psi'.\\n\",),\n 'G' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G:6*'}),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/G/rho:6'}),\n 'G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\"),\n })\n expected_skip_D = wc.State(A_COPY_D_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_D_path, '5', '6',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk_D,\n expected_status_D, expected_skip_D,\n check_props=True)\n\n\n # Merge r5 from A/D into A_COPY/D. This updates the mergeinfo on the\n # target A_COPY\\D because the target is always updated. It also updates\n # the mergeinfo on A_COPY\\D\\H because that path has explicit mergeinfo\n # and has a subtree affected by the merge. Lastly, mergeinfo on\n # A_COPY/D/H/psi is added because that path is switched.\n expected_output = wc.State(A_COPY_D_path, {\n 'H/psi' : Item(status='U ')})\n expected_mergeinfo_output = wc.State(A_COPY_D_path, {\n '' : Item(status=' G'),\n 'H' : Item(status=' G'),\n 'H/psi' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_COPY_D_path, {\n })\n expected_disk_D.tweak('', props={SVN_PROP_MERGEINFO : '/A/D:5,6*'})\n expected_disk_D.tweak('H', props={SVN_PROP_MERGEINFO : '/A/D/H:5*,8'})\n expected_disk_D.tweak('H/psi', contents=\"New content\",\n props={SVN_PROP_MERGEINFO :'/A/D/H/psi:5'})\n expected_status_D.tweak('H/psi', status='MM')\n svntest.actions.run_and_verify_merge(A_COPY_D_path, '4', '5',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk_D,\n expected_status_D, expected_skip_D,\n check_props=True)\n\n # Finally, merge r4:8 into A_COPY. A_COPY gets mergeinfo for r5-8 added but\n # since none of A_COPY's subtrees with mergeinfo are affected, none of them\n # get any mergeinfo changes.\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=8),\n 'B' : Item(status=' ', wc_rev=8),\n 'mu' : Item(status=' ', wc_rev=8),\n 'B/E' : Item(status=' ', wc_rev=8),\n 'B/E/alpha' : Item(status=' ', wc_rev=8),\n 'B/E/beta' : Item(status='M ', wc_rev=8),\n 'B/lambda' : Item(status=' ', wc_rev=8),\n 'B/F' : Item(status=' ', wc_rev=8),\n 'C' : Item(status=' ', wc_rev=8),\n 'D' : Item(status=' M', wc_rev=8),\n 'D/G' : Item(status=' M', wc_rev=8, switched='S'),\n 'D/G/pi' : Item(status=' ', wc_rev=8),\n 'D/G/rho' : Item(status='MM', wc_rev=8, switched='S'),\n 'D/G/tau' : Item(status=' ', wc_rev=8),\n 'D/gamma' : Item(status=' ', wc_rev=8),\n 'D/H' : Item(status=' M', wc_rev=8),\n 'D/H/chi' : Item(status=' ', wc_rev=8),\n 'D/H/psi' : Item(status='MM', wc_rev=8, switched='S'),\n 'D/H/omega' : Item(status='M ', wc_rev=8),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5-8'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(props={SVN_PROP_MERGEINFO : '/A/D:5,6*'}),\n 'D/G' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G:6*'}),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/G/rho:6'}),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:5*,8'}),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/psi:5'}),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '4', '8',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n # Commit changes thus far.\n expected_output = svntest.wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D' : Item(verb='Sending'),\n 'A_COPY/D/G' : Item(verb='Sending'),\n 'A_COPY/D/G/rho' : Item(verb='Sending'),\n 'A_COPY/D/H' : Item(verb='Sending'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY', 'A_COPY/B/E/beta', 'A_COPY/D', 'A_COPY/D/G',\n 'A_COPY/D/G/rho', 'A_COPY/D/H', 'A_COPY/D/H/omega',\n 'A_COPY/D/H/psi', wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Unswitch A_COPY/D/H/psi.\n expected_output = svntest.wc.State(wc_dir, {\n 'A_COPY/D/H/psi' : Item(status='UU')})\n wc_status.tweak(\"A_COPY/D/H/psi\", switched=None, wc_rev=9)\n wc_disk.tweak(\"A_COPY\",\n props={SVN_PROP_MERGEINFO : '/A:5-8'})\n wc_disk.tweak(\"A_COPY/B/E/beta\",\n contents=\"New content\")\n wc_disk.tweak(\"A_COPY/D\",\n props={SVN_PROP_MERGEINFO : '/A/D:5,6*'})\n wc_disk.tweak(\"A_COPY/D/G\",\n props={SVN_PROP_MERGEINFO : '/A/D/G:6*'})\n wc_disk.tweak(\"A_COPY/D/G/rho\",\n contents=\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/G/rho:6'})\n wc_disk.tweak(\"A_COPY/D/H\",\n props={SVN_PROP_MERGEINFO : '/A/D/H:5*,8'})\n wc_disk.tweak(\"A_COPY/D/H/omega\",\n contents=\"New content\")\n wc_disk.tweak(\"A_COPY_2\", props={})\n svntest.actions.run_and_verify_switch(sbox.wc_dir, A_COPY_psi_path,\n sbox.repo_url + \"/A_COPY/D/H/psi\",\n expected_output, wc_disk, wc_status,\n [], 1)\n\n # Non-inheritable mergeinfo ranges on a target don't prevent repeat\n # merges of that range on the target's children.\n #\n # Non-inheritable mergeinfo ranges on a target are removed if the target\n # no longer has any switched children and a repeat merge is performed.\n #\n # Merge r4:8 from A/D/H into A_COPY/D/H. A_COPY/D/H already has mergeinfo\n # for r5 and r8 but it is marked as uninheritable so the repeat merge is\n # allowed on its children, notably the now unswitched A_COPY/D/H/psi.\n # Since A_COPY/D/H no longer has any switched children and the merge of\n # r4:8 has been repeated the previously uninheritable ranges 5* and 8* on\n # A_COPY/D/H are made inheritable and combined with r6-7. A_COPY/D/H/omega\n # has explicit mergeinfo, but is not touched by the merge, so is left as-is.\n expected_output = wc.State(A_COPY_H_path, {\n 'psi' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_H_path, {\n '' : Item(status=' U'),\n 'psi' : Item(status=' G')\n })\n expected_elision_output = wc.State(A_COPY_H_path, {\n 'psi' : Item(status=' U')\n })\n expected_status = wc.State(A_COPY_H_path, {\n '' : Item(status=' M', wc_rev=9),\n 'psi' : Item(status='M ', wc_rev=9),\n 'omega' : Item(status=' ', wc_rev=9),\n 'chi' : Item(status=' ', wc_rev=8),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:5-8'}),\n 'psi' : Item(\"New content\"),\n 'omega' : Item(\"New content\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_H_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_H_path, '4', '8',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [],\n True, False, '--allow-mixed-revisions',\n A_COPY_H_path)\n\n # Non-inheritable mergeinfo ranges on a target do prevent repeat\n # merges on the target itself.\n #\n # Add a prop A/D and commit it as r10. Merge r10 into A_COPY/D. Since\n # A_COPY/D has a switched child it gets r10 added as a non-inheritable\n # range. Repeat the same merge checking that no repeat merge is\n # attempted on A_COPY/D.\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n D_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', D_path)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D' : Item(verb='Sending'),\n 'A_COPY/D/H' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY/D', wc_rev=9)\n wc_status.tweak('A/D', 'A_COPY/D/H', 'A_COPY/D/H/psi', wc_rev=10)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n expected_output = wc.State(A_COPY_D_path, {\n '' : Item(status=' U')\n })\n expected_mergeinfo_output = wc.State(A_COPY_D_path, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_COPY_D_path, {\n })\n # Reuse expected status and disk from last merge to A_COPY/D\n expected_status_D.tweak(status=' ')\n expected_status_D.tweak('', status=' M', wc_rev=9)\n expected_status_D.tweak('H', wc_rev=10)\n expected_status_D.tweak('H/psi', wc_rev=10, switched=None)\n expected_status_D.tweak('H/omega', wc_rev=9)\n expected_status_D.tweak('G', 'G/rho', switched='S', wc_rev=9)\n expected_disk_D.tweak('', props={SVN_PROP_MERGEINFO : '/A/D:5,6*,10',\n \"prop:name\" : \"propval\"})\n expected_disk_D.tweak('G/rho',\n props={SVN_PROP_MERGEINFO : '/A/D/G/rho:6'})\n expected_disk_D.tweak('H', props={SVN_PROP_MERGEINFO : '/A/D/H:5-8'})\n expected_disk_D.tweak('H/psi', contents=\"New content\", props={})\n svntest.actions.run_and_verify_merge(A_COPY_D_path, '9', '10',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk_D,\n expected_status_D, expected_skip_D,\n [],\n True, False, '--allow-mixed-revisions',\n A_COPY_D_path)\n # Repeated merge is a no-op, though we still see the notification reporting\n # the mergeinfo describing the merge has been recorded, though this time it\n # is a ' G' notification because there is a local mergeinfo change.\n expected_output = wc.State(A_COPY_D_path, {})\n expected_mergeinfo_output = wc.State(A_COPY_D_path, {\n '' : Item(status=' G')\n })\n svntest.actions.run_and_verify_merge(A_COPY_D_path, '9', '10',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk_D,\n expected_status_D, expected_skip_D,\n [],\n True, False, '--allow-mixed-revisions',\n A_COPY_D_path)\n\n # Test issue #3187 'Reverse merges don't work properly with\n # non-inheritable ranges'.\n #\n # Test the \"switched subtrees\" portion of issue #3188 'Mergeinfo on\n # switched targets/subtrees should elide to repos'.\n #\n # Reverse merge r5-8, this should revert all the subtree merges done to\n # A_COPY thus far and remove all mergeinfo.\n\n # Revert all local changes. This leaves just the mergeinfo for r5-8\n # on A_COPY and its various subtrees.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # Update merge target so working revisions are uniform and all\n # possible elision occurs.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(10), [],\n 'up', A_COPY_path)\n\n # Do the reverse merge.\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U '),\n 'D/G/rho' : Item(status='U '),\n 'D/H/omega' : Item(status='U '),\n 'D/H/psi' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D' : Item(status=' U'),\n 'D/G' : Item(status=' U'),\n 'D/G/rho' : Item(status=' U'),\n 'D/H' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D' : Item(status=' U'),\n 'D/G' : Item(status=' U'),\n 'D/G/rho' : Item(status=' U'),\n 'D/H' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=10),\n 'B' : Item(status=' ', wc_rev=10),\n 'mu' : Item(status=' ', wc_rev=10),\n 'B/E' : Item(status=' ', wc_rev=10),\n 'B/E/alpha' : Item(status=' ', wc_rev=10),\n 'B/E/beta' : Item(status='M ', wc_rev=10),\n 'B/lambda' : Item(status=' ', wc_rev=10),\n 'B/F' : Item(status=' ', wc_rev=10),\n 'C' : Item(status=' ', wc_rev=10),\n 'D' : Item(status=' M', wc_rev=10),\n 'D/G' : Item(status=' M', wc_rev=10, switched='S'),\n 'D/G/pi' : Item(status=' ', wc_rev=10),\n 'D/G/rho' : Item(status='MM', wc_rev=10, switched='S'),\n 'D/G/tau' : Item(status=' ', wc_rev=10),\n 'D/gamma' : Item(status=' ', wc_rev=10),\n 'D/H' : Item(status=' M', wc_rev=10),\n 'D/H/chi' : Item(status=' ', wc_rev=10),\n 'D/H/psi' : Item(status='M ', wc_rev=10),\n 'D/H/omega' : Item(status='M ', wc_rev=10),\n })\n expected_disk = wc.State('', {\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '8', '4',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for issue 2047: Merge from parent dir fails while it succeeds from\n# the direct dir\n@Issue(2047)\ndef merge_with_implicit_target_file(sbox):\n \"merge a change to a file, using relative path\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make a change to A/mu, then revert it using 'svn merge -r 2:1 A/mu'\n\n # change A/mu and commit\n A_path = sbox.ospath('A')\n mu_path = os.path.join(A_path, 'mu')\n\n svntest.main.file_append(mu_path, \"A whole new line.\\n\")\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=2)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Update to revision 2.\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n\n # Revert the change committed in r2\n os.chdir(wc_dir)\n\n # run_and_verify_merge doesn't accept file paths.\n svntest.actions.run_and_verify_svn(None, [], 'merge', '-r', '2:1',\n 'A/mu')\n\n#----------------------------------------------------------------------\n# Test practical application of issue #2769 fix, empty rev range elision,\n# and elision to the repos.\n@Issue(2769)\n@SkipUnless(server_has_mergeinfo)\ndef empty_mergeinfo(sbox):\n \"mergeinfo can explicitly be empty\"\n\n # A bit o' history: The fix for issue #2769 originally permitted mergeinfo\n # with empty range lists and as a result we permitted partial elision and\n # had a whole slew of tests here for that. But the fix of issue #3029 now\n # prevents svn ps or svn merge from creating mergeinfo with paths mapped to\n # empty ranges, only empty mergeinfo is allowed. As a result this test now\n # covers the following areas:\n #\n # A) Merging a set of revisions into a path, then reverse merging the\n # same set out of a subtree of path results in empty mergeinfo\n # (i.e. \"\") on the subtree.\n #\n # B) Empty mergeinfo elides to empty mergeinfo.\n #\n # C) If a merge sets empty mergeinfo on its target and that target has\n # no ancestor in either the WC or the repository with explicit\n # mergeinfo, then the target's mergeinfo is removed (a.k.a. elides\n # to nothing).\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n\n # Test area A -- Merge r2:4 into A_COPY then reverse merge 4:2 to\n # A_COPY/D/G. A_COPY/D/G should end up with empty mergeinfo to\n # override that of A_COPY.\n expected_output = wc.State(A_COPY_path, {\n 'D/H/psi' : Item(status='U '),\n 'D/G/rho' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'B' : Item(status=' ', wc_rev=2),\n 'mu' : Item(status=' ', wc_rev=2),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha' : Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status=' ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' ', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status='M ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' ', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status='M ', wc_rev=2),\n 'D/H/omega' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3-4'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '4',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n # Now do the reverse merge into the subtree.\n expected_output = wc.State(H_COPY_path, {\n 'psi' : Item(status='G '),\n })\n expected_mergeinfo_output = wc.State(H_COPY_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(H_COPY_path, {\n })\n expected_status = wc.State(H_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'chi' : Item(status=' ', wc_rev=2),\n 'psi' : Item(status=' ', wc_rev=2),\n 'omega' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : ''}),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n 'psi' : Item(\"This is the file 'psi'.\\n\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(H_COPY_path, { })\n svntest.actions.run_and_verify_merge(H_COPY_path, '4', '2',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Test areas B and C -- Reverse merge r3 into A_COPY, this would result in\n # empty mergeinfo on A_COPY and A_COPY/D/H, but the empty mergeinfo on the\n # latter elides to the former. And then the empty mergeinfo on A_COPY,\n # which has no parent with explicit mergeinfo to override (in either the WC\n # or the repos) itself elides. This leaves the WC in the same unmodified\n # state as after the call to set_up_branch().\n expected_output = expected_merge_output(\n [[4,3]], ['G ' + rho_COPY_path + '\\n',\n ' G ' + A_COPY_path + '\\n',\n ' U ' + H_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',],\n elides=True)\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'merge', '-r4:2',\n sbox.repo_url + '/A',\n A_COPY_path)\n svntest.actions.run_and_verify_status(wc_dir, wc_status)\n # Check that A_COPY's mergeinfo is gone.\n svntest.actions.run_and_verify_svn([], '.*W200017: Property.*not found',\n 'pg', 'svn:mergeinfo',\n A_COPY_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(2781)\ndef prop_add_to_child_with_mergeinfo(sbox):\n \"merge adding prop to child of merge target works\"\n\n # Test for Issue #2781 Prop add to child of merge target corrupts WC if\n # child has mergeinfo.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n expected_disk, expected_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n beta_path = sbox.ospath('A/B/E/beta')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n B_COPY_path = sbox.ospath('A_COPY/B')\n\n # Set a non-mergeinfo prop on a file.\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n beta_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', beta_path)\n expected_disk.tweak('A/B/E/beta', props={'prop:name' : 'propval'})\n expected_status.tweak('A/B/E/beta', wc_rev=7)\n expected_output = wc.State(wc_dir,\n {'A/B/E/beta' : Item(verb='Sending')})\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Merge r4:5 from A/B/E/beta into A_COPY/B/E/beta.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]],\n ['U ' + beta_COPY_path +'\\n',\n ' U ' + beta_COPY_path +'\\n',]),\n [], 'merge', '-c5',\n sbox.repo_url + '/A/B/E/beta',\n beta_COPY_path)\n\n # Merge r6:7 into A_COPY/B. In issue #2781 this adds a bogus\n # and incomplete entry in A_COPY/B/.svn/entries for 'beta'.\n expected_output = wc.State(B_COPY_path, {\n 'E/beta' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(B_COPY_path, {\n '' : Item(status=' U'),\n 'E/beta' : Item(status=' G'),\n })\n expected_elision_output = wc.State(B_COPY_path, {\n })\n expected_status = wc.State(B_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'E' : Item(status=' ', wc_rev=2),\n 'E/alpha' : Item(status=' ', wc_rev=2),\n 'E/beta' : Item(status='MM', wc_rev=2),\n 'lambda' : Item(status=' ', wc_rev=2),\n 'F' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:7'}),\n 'E' : Item(),\n 'E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(contents=\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/B/E/beta:5,7',\n 'prop:name' : 'propval'}),\n 'F' : Item(),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\")\n })\n expected_skip = wc.State(B_COPY_path, { })\n svntest.actions.run_and_verify_merge(B_COPY_path, '6', '7',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@Issue(2788,3383)\ndef foreign_repos_does_not_update_mergeinfo(sbox):\n \"set no mergeinfo when merging from foreign repos\"\n\n # Test for issue #2788 and issue #3383.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n expected_disk, expected_status = set_up_branch(sbox)\n\n # Set up for test of issue #2788.\n\n # Create a second repository with the same greek tree\n repo_dir = sbox.repo_dir\n other_repo_dir, other_repo_url = sbox.add_repo_path(\"other\")\n other_wc_dir = sbox.add_wc_path(\"other\")\n svntest.main.copy_repos(repo_dir, other_repo_dir, 6, 1)\n\n # Merge r3:4 (using implied peg revisions) from 'other' repos into\n # A_COPY/D/G. Merge should succeed, but no mergeinfo should be set.\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n svntest.actions.run_and_verify_svn(expected_merge_output([[4]],\n 'U ' +\n os.path.join(G_COPY_path,\n \"rho\") + '\\n', True),\n [], 'merge', '-c4',\n other_repo_url + '/A/D/G',\n G_COPY_path)\n\n # Merge r4:5 (using explicit peg revisions) from 'other' repos into\n # A_COPY/B/E. Merge should succeed, but no mergeinfo should be set.\n E_COPY_path = sbox.ospath('A_COPY/B/E')\n svntest.actions.run_and_verify_svn(expected_merge_output([[5]],\n 'U ' +\n os.path.join(E_COPY_path,\n \"beta\") +'\\n', True),\n [], 'merge',\n other_repo_url + '/A/B/E@4',\n other_repo_url + '/A/B/E@5',\n E_COPY_path)\n\n expected_status.tweak('A_COPY/D/G/rho', 'A_COPY/B/E/beta', status='M ')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n # Set up for test of issue #3383.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # Get a working copy for the foreign repos.\n svntest.actions.run_and_verify_svn(None, [], 'co', other_repo_url,\n other_wc_dir)\n\n # Create mergeinfo on the foreign repos on an existing directory and\n # file and an added directory and file. Commit as r7. And no, we aren't\n # checking these intermediate steps very thoroughly, but we test these\n # simple merges to *death* elsewhere.\n\n # Create mergeinfo on an existing directory.\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n other_repo_url + '/A',\n os.path.join(other_wc_dir, 'A_COPY'),\n '-c5')\n\n # Create mergeinfo on an existing file.\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n other_repo_url + '/A/D/H/psi',\n os.path.join(other_wc_dir, 'A_COPY',\n 'D', 'H', 'psi'),\n '-c3')\n\n # Add a new directory with mergeinfo in the foreign repos.\n new_dir = os.path.join(other_wc_dir, 'A_COPY', 'N')\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', new_dir)\n svntest.actions.run_and_verify_svn(None, [], 'ps',\n SVN_PROP_MERGEINFO, '', new_dir)\n\n # Add a new file with mergeinfo in the foreign repos.\n new_file = os.path.join(other_wc_dir, 'A_COPY', 'nu')\n svntest.main.file_write(new_file, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', new_file)\n svntest.actions.run_and_verify_svn(None, [], 'ps',\n SVN_PROP_MERGEINFO, '', new_file)\n\n expected_output = wc.State(other_wc_dir,{\n 'A_COPY' : Item(verb='Sending'), # Mergeinfo created\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'), # Mergeinfo created\n 'A_COPY/N' : Item(verb='Adding'), # Has empty mergeinfo\n 'A_COPY/nu' : Item(verb='Adding'), # Has empty mergeinfo\n })\n svntest.actions.run_and_verify_commit(other_wc_dir, expected_output,\n None, [], other_wc_dir,\n '-m',\n 'create mergeinfo on foreign repos')\n # Now merge a diff from the foreign repos that contains the mergeinfo\n # addition in r7 to A_COPY. The mergeinfo diff should *not* be applied\n # to A_COPY since it refers to a foreign repository...\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n other_repo_url + '/A@1',\n other_repo_url + '/A_COPY@7',\n sbox.ospath('A_COPY'))\n #...which means there should be no mergeinfo anywhere in WC_DIR, since\n # this test never created any.\n svntest.actions.run_and_verify_svn([], [], 'pg',\n SVN_PROP_MERGEINFO, '-vR',\n wc_dir)\n\n#----------------------------------------------------------------------\n# This test involves tree conflicts.\n@XFail()\n@Issue(2897)\ndef avoid_reflected_revs(sbox):\n \"avoid repeated merges for cyclic merging\"\n\n # See <https://issues.apache.org/jira/browse/SVN-2897>.\n #\n # This test cherry-picks some changes (all of them, in fact) from the\n # parent branch 'A' to the child branch 'A_COPY', and then tries to\n # reintegrate 'A_COPY' to 'A' (explicitly specifying a revision range\n # on the source branch). It expects the changes that are unique to the\n # branch 'A_COPY' to be merged to 'A'.\n #\n # A --1----[3]---[5]----------?\n # \\ \\_____\\___ /\n # \\ \\ \\ /\n # A_COPY 2-[---4-----6--7--8]-\n\n # Create a WC with a single branch\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, True, 1)\n\n # Some paths we'll care about\n A_path = sbox.ospath('A')\n A_COPY_path = sbox.ospath('A_COPY')\n tfile1_path = sbox.ospath('A/tfile1')\n tfile2_path = sbox.ospath('A/tfile2')\n bfile1_path = os.path.join(A_COPY_path, 'bfile1')\n bfile2_path = os.path.join(A_COPY_path, 'bfile2')\n\n # Contents to be added to files\n tfile1_content = \"This is tfile1\\n\"\n tfile2_content = \"This is tfile2\\n\"\n bfile1_content = \"This is bfile1\\n\"\n bfile2_content = \"This is bfile2\\n\"\n\n # We'll consider A as the trunk and A_COPY as the feature branch\n # r3 - Create a tfile1 in A\n svntest.main.file_write(tfile1_path, tfile1_content)\n svntest.actions.run_and_verify_svn(None, [], 'add', tfile1_path)\n expected_output = wc.State(wc_dir, {'A/tfile1' : Item(verb='Adding')})\n wc_status.add({'A/tfile1' : Item(status=' ', wc_rev=3)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r4 - Create a bfile1 in A_COPY\n svntest.main.file_write(bfile1_path, bfile1_content)\n svntest.actions.run_and_verify_svn(None, [], 'add', bfile1_path)\n expected_output = wc.State(wc_dir, {'A_COPY/bfile1' : Item(verb='Adding')})\n wc_status.add({'A_COPY/bfile1' : Item(status=' ', wc_rev=4)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r5 - Create one more file in A\n svntest.main.file_write(tfile2_path, tfile2_content)\n svntest.actions.run_and_verify_svn(None, [], 'add', tfile2_path)\n expected_output = wc.State(wc_dir, {'A/tfile2' : Item(verb='Adding')})\n wc_status.add({'A/tfile2' : Item(status=' ', wc_rev=5)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Merge r5 from /A to /A_COPY, creating r6\n expected_output = wc.State(A_COPY_path, {\n 'tfile2' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'tfile2' : Item(status='A ', wc_rev='-', copied='+'),\n 'bfile1' : Item(status=' ', wc_rev=4),\n 'mu' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' ', wc_rev=2),\n 'B' : Item(status=' ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha': Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status=' ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' ', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/omega': Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5'}),\n 'tfile2' : Item(tfile2_content),\n 'bfile1' : Item(bfile1_content),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'C' : Item(),\n 'D' : Item(),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha': Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/omega': Item(\"This is the file 'omega'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, {})\n\n svntest.actions.run_and_verify_merge(A_COPY_path, '4', '5',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n A_COPY_path,\n '--allow-mixed-revisions')\n\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/tfile2' : Item(verb='Adding'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n None)\n\n # Merge r3 from /A to /A_COPY, creating r7\n expected_output = wc.State(A_COPY_path, {\n 'tfile1' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status.tweak(wc_rev=5)\n expected_status.tweak('', wc_rev=6)\n expected_status.tweak('tfile2', status=' ', copied=None, wc_rev=6)\n expected_status.add({\n 'tfile1' : Item(status='A ', wc_rev='-', copied='+'),\n })\n expected_disk.tweak('', props={SVN_PROP_MERGEINFO : '/A:3,5'})\n expected_disk.add({\n 'tfile1' : Item(tfile1_content),\n })\n\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n A_COPY_path,\n '--allow-mixed-revisions')\n\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/tfile1' : Item(verb='Adding'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n None)\n\n # r8 - Add bfile2 to A_COPY\n svntest.main.file_write(bfile2_path, bfile2_content)\n svntest.actions.run_and_verify_svn(None, [], 'add', bfile2_path)\n expected_output = wc.State(wc_dir, {'A_COPY/bfile2' : Item(verb='Adding')})\n wc_status.tweak(wc_rev=6)\n wc_status.add({\n 'A_COPY/bfile2' : Item(status=' ', wc_rev=8),\n 'A_COPY' : Item(status=' ', wc_rev=7),\n 'A_COPY/tfile2' : Item(status=' ', wc_rev=6),\n 'A_COPY/tfile1' : Item(status=' ', wc_rev=7),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Merge 2:8 from A_COPY(feature branch) to A(trunk).\n expected_output = wc.State(A_path, {\n 'bfile2' : Item(status='A '),\n 'bfile1' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_path, {\n })\n expected_status = wc.State(A_path, {\n '' : Item(status=' M', wc_rev=6),\n 'bfile2' : Item(status='A ', wc_rev='-', copied='+'),\n 'bfile1' : Item(status='A ', wc_rev='-', copied='+'),\n 'tfile2' : Item(status=' ', wc_rev=6),\n 'tfile1' : Item(status=' ', wc_rev=6),\n 'mu' : Item(status=' ', wc_rev=6),\n 'C' : Item(status=' ', wc_rev=6),\n 'D' : Item(status=' ', wc_rev=6),\n 'B' : Item(status=' ', wc_rev=6),\n 'B/lambda' : Item(status=' ', wc_rev=6),\n 'B/E' : Item(status=' ', wc_rev=6),\n 'B/E/alpha' : Item(status=' ', wc_rev=6),\n 'B/E/beta' : Item(status=' ', wc_rev=6),\n 'B/F' : Item(status=' ', wc_rev=6),\n 'D/gamma' : Item(status=' ', wc_rev=6),\n 'D/G' : Item(status=' ', wc_rev=6),\n 'D/G/pi' : Item(status=' ', wc_rev=6),\n 'D/G/rho' : Item(status=' ', wc_rev=6),\n 'D/G/tau' : Item(status=' ', wc_rev=6),\n 'D/H' : Item(status=' ', wc_rev=6),\n 'D/H/chi' : Item(status=' ', wc_rev=6),\n 'D/H/omega' : Item(status=' ', wc_rev=6),\n 'D/H/psi' : Item(status=' ', wc_rev=6),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A_COPY:3-8'}),\n 'bfile2' : Item(bfile2_content),\n 'bfile1' : Item(bfile1_content),\n 'tfile2' : Item(tfile2_content),\n 'tfile1' : Item(tfile1_content),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'C' : Item(),\n 'D' : Item(),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n })\n\n expected_skip = wc.State(A_path, {})\n\n svntest.actions.run_and_verify_merge(A_path, '2', '8',\n sbox.repo_url + '/A_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef update_loses_mergeinfo(sbox):\n \"update does not merge mergeinfo\"\n\n \"\"\"\n When a working copy path receives a fresh svn:mergeinfo property due to\n an update, and the path has local mergeinfo changes, then the local\n mergeinfo should be merged with the incoming mergeinfo.\n \"\"\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n A_C_wc_dir = sbox.ospath('A/C')\n A_B_url = sbox.repo_url + '/A/B'\n A_B_J_url = sbox.repo_url + '/A/B/J'\n A_B_K_url = sbox.repo_url + '/A/B/K'\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'],\n [],\n 'mkdir', '-m', 'rev 2', A_B_J_url)\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 3.\\n'],\n [],\n 'mkdir', '-m', 'rev 3', A_B_K_url)\n\n other_wc = sbox.add_wc_path('other')\n svntest.actions.duplicate_dir(wc_dir, other_wc)\n\n expected_output = wc.State(A_C_wc_dir, {'J' : Item(status='A ')})\n expected_mergeinfo_output = wc.State(A_C_wc_dir, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(A_C_wc_dir, {\n })\n expected_disk = wc.State('', {\n 'J' : Item(),\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2'}),\n })\n expected_status = wc.State(A_C_wc_dir,\n { '' : Item(wc_rev=1, status=' M'),\n 'J' : Item(status='A ',\n wc_rev='-', copied='+')\n }\n )\n expected_skip = wc.State('', { })\n svntest.actions.run_and_verify_merge(A_C_wc_dir, '1', '2',\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=1)\n expected_output = wc.State(A_C_wc_dir, {\n '' : Item(verb='Sending'),\n 'J' : Item(verb='Adding')\n })\n expected_status = wc.State(A_C_wc_dir,\n { '' : Item(status=' ', wc_rev=4),\n 'J' : Item(status=' ', wc_rev=4)\n }\n )\n svntest.actions.run_and_verify_commit(A_C_wc_dir,\n expected_output,\n expected_status)\n\n other_A_C_wc_dir = os.path.join(other_wc, 'A', 'C')\n expected_output = wc.State(other_A_C_wc_dir, {'K' : Item(status='A ')})\n expected_mergeinfo_output = wc.State(other_A_C_wc_dir, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(other_A_C_wc_dir, {\n })\n expected_disk = wc.State('', {\n 'K' : Item(),\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:3'}),\n })\n expected_status = wc.State(other_A_C_wc_dir,\n { '' : Item(wc_rev=1, status=' M'),\n 'K' : Item(status='A ',\n wc_rev='-', copied='+')\n }\n )\n expected_skip = wc.State('', { })\n svntest.actions.run_and_verify_merge(other_A_C_wc_dir, '2', '3',\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=1)\n expected_output = wc.State(other_A_C_wc_dir,\n {'J' : Item(status='A '),\n '' : Item(status=' G')\n }\n )\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2-3'}),\n 'J' : Item(),\n 'K' : Item(),\n })\n expected_status = wc.State(other_A_C_wc_dir,\n { '' : Item(wc_rev=4, status=' M'),\n 'J' : Item(status=' ', wc_rev='4'),\n 'K' : Item(status='A ',\n wc_rev='-', copied='+')\n }\n )\n svntest.actions.run_and_verify_update(other_A_C_wc_dir,\n expected_output,\n expected_disk,\n expected_status,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Tests part of issue# 2829.\n@Issue(2829)\n@SkipUnless(server_has_mergeinfo)\ndef merge_loses_mergeinfo(sbox):\n \"merge should merge mergeinfo\"\n\n \"\"\"\n When a working copy has no mergeinfo(due to local full revert of all merges),\n and merge is attempted for someother revision rX, The new mergeinfo should be\n /merge/src: rX not all the reverted ones reappearing along with rX.\n \"\"\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n A_C_wc_dir = sbox.ospath('A/C')\n A_B_url = sbox.repo_url + '/A/B'\n A_B_J_url = sbox.repo_url + '/A/B/J'\n A_B_K_url = sbox.repo_url + '/A/B/K'\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'],\n [],\n 'mkdir', '-m', 'rev 2', A_B_J_url)\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 3.\\n'],\n [],\n 'mkdir', '-m', 'rev 3', A_B_K_url)\n\n expected_output = wc.State(A_C_wc_dir, {'J' : Item(status='A ')})\n expected_mergeinfo_output = wc.State(A_C_wc_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_C_wc_dir, {\n })\n expected_disk = wc.State('', {\n 'J' : Item(),\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2'}),\n })\n expected_status = wc.State(A_C_wc_dir,\n { '' : Item(wc_rev=1, status=' M'),\n 'J' : Item(status='A ',\n wc_rev='-', copied='+')\n }\n )\n expected_skip = wc.State('', { })\n svntest.actions.run_and_verify_merge(A_C_wc_dir, '1', '2',\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=1)\n expected_output = wc.State(A_C_wc_dir, {\n '' : Item(verb='Sending'),\n 'J' : Item(verb='Adding')\n })\n expected_status = wc.State(A_C_wc_dir,\n { '' : Item(status=' ', wc_rev=4),\n 'J' : Item(status=' ', wc_rev=4)\n }\n )\n svntest.actions.run_and_verify_commit(A_C_wc_dir,\n expected_output,\n expected_status)\n expected_output = wc.State(A_C_wc_dir, {'J' : Item(status='D ')})\n expected_elision_output = wc.State(A_C_wc_dir, {\n '' : Item(status=' U'),\n })\n expected_disk = wc.State('', {})\n expected_status = wc.State(A_C_wc_dir,\n { '' : Item(wc_rev=4, status=' M'),\n 'J' : Item(wc_rev=4, status='D ')\n }\n )\n svntest.actions.run_and_verify_merge(A_C_wc_dir, '2', '1',\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=1)\n\n expected_output = wc.State(A_C_wc_dir, {'K' : Item(status='A ')})\n expected_disk = wc.State('', {\n 'K' : Item(),\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:3'}),\n })\n expected_status = wc.State(A_C_wc_dir,\n { '' : Item(wc_rev=4, status=' M'),\n 'K' : Item(status='A ',\n wc_rev='-', copied='+'),\n 'J' : Item(wc_rev=4, status='D ')\n }\n )\n expected_mergeinfo_output = wc.State(A_C_wc_dir, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_C_wc_dir, {\n })\n svntest.actions.run_and_verify_merge(A_C_wc_dir, '2', '3',\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=1)\n\n#----------------------------------------------------------------------\n@Issue(2853)\ndef single_file_replace_style_merge_capability(sbox):\n \"replace-style merge capability for a single file\"\n\n # Test for issue #2853, do_single_file_merge() lacks \"Replace-style\n # merge\" capability\n\n sbox.build()\n wc_dir = sbox.wc_dir\n iota_path = sbox.ospath('iota')\n mu_path = sbox.ospath('A/mu')\n\n # delete mu and replace it with a copy of iota\n svntest.main.run_svn(None, 'rm', mu_path)\n svntest.main.run_svn(None, 'mv', iota_path, mu_path)\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', status=' ', wc_rev=2)\n expected_status.remove('iota')\n expected_output = svntest.wc.State(wc_dir, {\n 'iota': Item(verb='Deleting'),\n 'A/mu': Item(verb='Replacing'),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Merge the file mu alone to rev1\n svntest.actions.run_and_verify_svn(expected_merge_output(None,\n ['R ' + mu_path + '\\n']),\n [],\n 'merge',\n mu_path + '@2',\n mu_path + '@1',\n mu_path)\n\n#----------------------------------------------------------------------\n# Test for issue 2786 fix.\n@Issue(2786)\n@SkipUnless(server_has_mergeinfo)\ndef merge_to_out_of_date_target(sbox):\n \"merge to ood path can lead to inaccurate mergeinfo\"\n\n # Create a WC with a branch.\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n\n # Make second working copy\n other_wc = sbox.add_wc_path('other')\n svntest.actions.duplicate_dir(wc_dir, other_wc)\n\n # Some paths we'll care about\n A_COPY_H_path = sbox.ospath('A_COPY/D/H')\n other_A_COPY_H_path = os.path.join(other_wc, \"A_COPY\", \"D\", \"H\")\n\n # Merge -c3 into A_COPY/D/H of first WC.\n expected_output = wc.State(A_COPY_H_path, {\n 'psi' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_H_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_H_path, {\n })\n expected_status = wc.State(A_COPY_H_path, {\n '' : Item(status=' M', wc_rev=2),\n 'psi' : Item(status='M ', wc_rev=2),\n 'omega' : Item(status=' ', wc_rev=2),\n 'chi' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:3'}),\n 'psi' : Item(\"New content\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_H_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_H_path, '2', '3',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n # Commit merge to first WC.\n wc_status.tweak('A_COPY/D/H/psi', 'A_COPY/D/H', wc_rev=7)\n expected_output = svntest.wc.State(wc_dir, {\n 'A_COPY/D/H' : Item(verb='Sending'),\n 'A_COPY/D/H/psi': Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n # Merge -c6 into A_COPY/D/H of other WC.\n expected_output = wc.State(other_A_COPY_H_path, {\n 'omega' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(other_A_COPY_H_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(other_A_COPY_H_path, {\n })\n expected_status = wc.State(other_A_COPY_H_path, {\n '' : Item(status=' M', wc_rev=2),\n 'psi' : Item(status=' ', wc_rev=2),\n 'omega' : Item(status='M ', wc_rev=2),\n 'chi' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:6'}),\n 'psi' : Item(\"This is the file 'psi'.\\n\"),\n 'omega' : Item(\"New content\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(other_A_COPY_H_path, { })\n svntest.actions.run_and_verify_merge(other_A_COPY_H_path, '5', '6',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=1)\n\n # Update A_COPY/D/H in other WC. Local mergeinfo for r6 on A_COPY/D/H\n # should be *merged* with r3 from first WC.\n expected_output = svntest.wc.State(other_A_COPY_H_path, {\n '' : Item(status=' G'),\n 'psi' : Item(status='U ')\n })\n other_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:3,6'}),\n 'psi' : Item(contents=\"New content\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n 'omega' : Item(contents=\"New content\"),\n })\n other_status = wc.State(other_A_COPY_H_path,{\n '' : Item(wc_rev=7, status=' M'),\n 'chi' : Item(wc_rev=7, status=' '),\n 'psi' : Item(wc_rev=7, status=' '),\n 'omega' : Item(wc_rev=7, status='M ')\n })\n svntest.actions.run_and_verify_update(other_A_COPY_H_path,\n expected_output,\n other_disk,\n other_status,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_with_depth_files(sbox):\n \"merge test for --depth files\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n mu_path = sbox.ospath('A/mu')\n gamma_path = sbox.ospath('A/D/gamma')\n Acopy_path = sbox.ospath('A_copy')\n Acopy_mu_path = sbox.ospath('A_copy/mu')\n A_url = sbox.repo_url + '/A'\n Acopy_url = sbox.repo_url + '/A_copy'\n\n # Copy A_url to A_copy_url\n svntest.actions.run_and_verify_svn(None, [], 'cp',\n A_url, Acopy_url,\n '-m', 'create a new copy of A')\n\n svntest.main.file_write(mu_path, \"this is file 'mu' modified.\\n\")\n svntest.main.file_write(gamma_path, \"this is file 'gamma' modified.\\n\")\n\n # Create expected output tree for commit\n expected_output = wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n 'A/D/gamma' : Item(verb='Sending'),\n })\n\n # Create expected status tree for commit\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/mu' : Item(status=' ', wc_rev=3),\n 'A/D/gamma' : Item(status=' ', wc_rev=3),\n })\n\n # Commit the modified contents\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Update working copy\n svntest.actions.run_and_verify_svn(None, [],\n 'up', Acopy_path)\n\n # Merge r1:3 into A_copy with --depth files. The merge only affects\n # 'A_copy' and its one file child 'mu', so 'A_copy' gets non-inheritable\n # mergeinfo for -r1:3 and 'mu' gets its own complete set of mergeinfo:\n # r1 from its parent, and r1:3 from the merge itself.\n expected_output = wc.State(Acopy_path, {\n 'mu' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(Acopy_path, {\n '' : Item(status=' U'),\n 'mu' : Item(status=' U'),\n })\n expected_elision_output = wc.State(Acopy_path, {\n })\n expected_status = wc.State(Acopy_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status='MM'),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=3)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-3*'}),\n 'B' : Item(),\n 'mu' : Item(\"this is file 'mu' modified.\\n\",\n props={SVN_PROP_MERGEINFO : '/A/mu:2-3'}),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(Acopy_path, { })\n svntest.actions.run_and_verify_merge(Acopy_path, '1', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, True,\n '--depth', 'files', Acopy_path)\n\n#----------------------------------------------------------------------\n# Test for issue #2976 Subtrees can lose non-inheritable ranges.\n#\n# Also test for a bug with paths added as the immediate child of the\n# merge target when the merge target has non-inheritable mergeinfo\n# and is also the current working directory, see\n# http://svn.haxx.se/dev/archive-2008-12/0133.shtml.\n#\n# Test for issue #3392 'Parsing error with reverse merges and\n# non-inheritable mergeinfo.\n#\n# Test issue #3407 'Shallow merges incorrectly set mergeinfo on children'.\n@SkipUnless(server_has_mergeinfo)\n@Issues(2976,3392,3407,4057)\ndef merge_away_subtrees_noninheritable_ranges(sbox):\n \"subtrees can lose non-inheritable ranges\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, nbr_of_branches=2)\n\n # Some paths we'll care about\n H_path = sbox.ospath('A/D/H')\n D_COPY_path = sbox.ospath('A_COPY/D')\n A_COPY_path = sbox.ospath('A_COPY')\n nu_path = sbox.ospath('A/nu')\n mu_path = sbox.ospath('A/mu')\n mu_2_path = sbox.ospath('A_COPY_2/mu')\n D_COPY_2_path = sbox.ospath('A_COPY_2/D')\n H_COPY_2_path = sbox.ospath('A_COPY_2/D/H')\n mu_COPY_path = sbox.ospath('A_COPY/mu')\n nu_COPY_path = sbox.ospath('A_COPY/nu')\n\n # Make a change to directory A/D/H and commit as r8.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(7), [],\n 'update', wc_dir)\n\n svntest.actions.run_and_verify_svn(\n [\"property 'prop:name' set on '\" + H_path + \"'\\n\"], [],\n 'ps', 'prop:name', 'propval', H_path)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/H' : Item(verb='Sending'),})\n wc_status.tweak(wc_rev=7)\n wc_status.tweak('A/D/H', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Merge r6:8 --depth immediates to A_COPY/D. This should merge the\n # prop change from r8 to A_COPY/H but not the change to A_COPY/D/H/omega\n # from r7 since that is below the depth we are merging to. Instead,\n # non-inheritable mergeinfo should be set on the immediate directory\n # child of A_COPY/D that is affected by the merge: A_COPY/D/H.\n expected_output = wc.State(D_COPY_path, {\n 'H' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(D_COPY_path, {\n '' : Item(status=' U'),\n 'H' : Item(status=' U'),\n })\n expected_elision_output = wc.State(D_COPY_path, {\n })\n expected_status = wc.State(D_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n 'H' : Item(status=' M', wc_rev=7),\n 'H/chi' : Item(status=' ', wc_rev=7),\n 'H/omega' : Item(status=' ', wc_rev=7),\n 'H/psi' : Item(status=' ', wc_rev=7),\n 'G' : Item(status=' ', wc_rev=7),\n 'G/pi' : Item(status=' ', wc_rev=7),\n 'G/rho' : Item(status=' ', wc_rev=7),\n 'G/tau' : Item(status=' ', wc_rev=7),\n 'gamma' : Item(status=' ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D:7-8'}),\n 'H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:7-8*',\n 'prop:name' : 'propval'}),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'G' : Item(),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\"),\n })\n expected_skip = wc.State(D_COPY_path, { })\n svntest.actions.run_and_verify_merge(D_COPY_path, '6', '8',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, True,\n '--depth', 'immediates', D_COPY_path)\n\n # Repeat the previous merge but at default depth of infinity. The change\n # to A_COPY/D/H/omega should now happen and the non-inheritable ranges on\n # A_COPY/D/G and A_COPY/D/H be changed to inheritable and then elide to\n # A_COPY/D.\n expected_output = wc.State(D_COPY_path, {\n 'H/omega' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(D_COPY_path, {\n '' : Item(status=' G'),\n 'H' : Item(status=' G'),\n 'H/omega' : Item(status=' G'),\n })\n expected_elision_output = wc.State(D_COPY_path, {\n 'H' : Item(status=' U'),\n 'H/omega' : Item(status=' U'),\n })\n expected_disk.tweak('', props={SVN_PROP_MERGEINFO : '/A/D:7-8'})\n expected_disk.tweak('H', props={'prop:name' : 'propval'})\n expected_disk.tweak('G', props={})\n expected_disk.tweak('H/omega', contents=\"New content\")\n expected_status.tweak('G', status=' ')\n expected_status.tweak('H/omega', status='M ')\n svntest.actions.run_and_verify_merge(D_COPY_path, '6', '8',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, True)\n\n # Now test the problem described in\n # http://svn.haxx.se/dev/archive-2008-12/0133.shtml.\n #\n # First revert all local mods.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # r9: Merge all available revisions from A to A_COPY at a depth of empty\n # this will create non-inheritable mergeinfo on A_COPY.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n wc_status.tweak(wc_rev=8)\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '--depth', 'empty',\n sbox.repo_url + '/A', A_COPY_path)\n wc_status.tweak('A_COPY', wc_rev=9)\n expected_output = wc.State(wc_dir, {'A_COPY' : Item(verb='Sending')})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # r10: Add the file A/nu.\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n expected_output = wc.State(wc_dir, {'A/nu' : Item(verb='Adding')})\n wc_status.add({'A/nu' : Item(status=' ', wc_rev=10)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Now merge -c10 from A to A_COPY.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State('', {\n 'nu': Item(status='A '),\n })\n expected_mergeinfo_output = wc.State('', {\n '' : Item(status=' U'),\n 'nu' : Item(status=' U'),\n })\n expected_elision_output = wc.State('', {\n })\n expected_status = wc.State('', {\n '' : Item(status=' M'),\n 'nu' : Item(status='A ', copied='+'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=10)\n expected_status.tweak('nu', wc_rev='-')\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-8*,10'}),\n 'nu' : Item(\"This is the file 'nu'.\\n\",\n props={SVN_PROP_MERGEINFO : '/A/nu:10'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State('.', { })\n saved_cwd = os.getcwd()\n os.chdir(A_COPY_path)\n svntest.actions.run_and_verify_merge('', '9', '10',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n os.chdir(saved_cwd)\n\n # If a merge target has inheritable and non-inheritable ranges and has a\n # child with no explicit mergeinfo, test that a merge which brings\n # mergeinfo changes to that child (i.e. as part of the diff) properly\n # records mergeinfo on the child that includes both the incoming mergeinfo\n # *and* the mergeinfo inherited from it's parent.\n #\n # First revert all local changes and remove A_COPY/C/nu from disk.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # Make a text change to A_COPY_2/mu in r11 and then merge that\n # change to A/mu in r12. This will create mergeinfo of '/A_COPY_2/mu:11'\n # on A/mu.\n svntest.main.file_write(mu_2_path, 'new content')\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m', 'log msg',\n wc_dir)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[11]],\n ['U ' + mu_path + '\\n',\n ' U ' + mu_path + '\\n']),\n [], 'merge', '-c11', sbox.repo_url + '/A_COPY_2/mu', mu_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m', 'log msg',\n wc_dir)\n\n # Now merge r12 from A to A_COPY. A_COPY/mu should get the mergeinfo from\n # r12, '/A_COPY_2/mu:11' as well as mergeinfo describing the merge itself,\n # '/A/mu:12'.\n expected_output = wc.State('.', {\n 'mu': Item(status='UG'),\n })\n expected_mergeinfo_output = wc.State('.', {\n '' : Item(status=' U'),\n 'mu' : Item(status=' G'),\n })\n expected_elision_output = wc.State('.', {\n })\n expected_status = wc.State('', {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status='MM'),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=10)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-8*,12'}),\n 'B' : Item(),\n 'mu' : Item(\"new content\",\n props={SVN_PROP_MERGEINFO : '/A/mu:12\\n/A_COPY_2/mu:11'}),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State('.', { })\n saved_cwd = os.getcwd()\n os.chdir(A_COPY_path)\n # Don't do a dry-run, because it will differ due to the way merge\n # sets override mergeinfo on the children of paths with non-inheritable\n # ranges.\n svntest.actions.run_and_verify_merge('.', '11', '12',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n os.chdir(saved_cwd)\n\n # Test for issue #3392\n #\n # Revert local changes and update.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Merge r8 from A/D/H to A_COPY_D/H at depth empty. Since r8 affects only\n # A_COPY/D/H itself, the resulting mergeinfo is inheritable. Commit this\n # merge as r13.\n expected_output = wc.State(H_COPY_2_path, {\n '' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(H_COPY_2_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(H_COPY_2_path, {\n })\n expected_status = wc.State(H_COPY_2_path, {\n '' : Item(status=' M', wc_rev=12),\n 'psi' : Item(status=' ', wc_rev=12),\n 'omega' : Item(status=' ', wc_rev=12),\n 'chi' : Item(status=' ', wc_rev=12),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:8',\n \"prop:name\" : \"propval\"}),\n 'psi' : Item(\"This is the file 'psi'.\\n\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(H_COPY_2_path, {})\n svntest.actions.run_and_verify_merge(H_COPY_2_path, '7', '8',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, True,\n '--depth', 'empty', H_COPY_2_path)\n svntest.actions.run_and_verify_svn(None, [], 'commit', '-m',\n 'log msg', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n # Now reverse the prior merge. Issue #3392 manifests itself here with\n # a mergeinfo parsing error:\n # >svn merge %url%/A/D/H merge_tests-62\\A_COPY_2\\D\\H -c-8\n # --- Reverse-merging r8 into 'merge_tests-62\\A_COPY_2\\D\\H':\n # U merge_tests-62\\A_COPY_2\\D\\H\n # ..\\..\\..\\subversion\\libsvn_subr\\mergeinfo.c:590: (apr_err=200020)\n # svn: Could not parse mergeinfo string '-8'\n # ..\\..\\..\\subversion\\libsvn_subr\\kitchensink.c:52: (apr_err=200022)\n # svn: Negative revision number found parsing '-8'\n #\n # Status is identical but for the working revision.\n expected_status.tweak(wc_rev=13)\n # The mergeinfo and prop:name props should disappear.\n expected_disk.remove('')\n expected_elision_output = wc.State(H_COPY_2_path, {\n '' : Item(status=' U'),\n })\n svntest.actions.run_and_verify_merge(H_COPY_2_path, '8', '7',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n # Test issue #3407 'Shallow merges incorrectly set mergeinfo on children'.\n #\n # Revert all local mods.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # Merge all available changes from A to A_COPY at --depth empty. Only the\n # mergeinfo on A_COPY should be affected.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[9,13]],\n [' U ' + A_COPY_path + '\\n']),\n [], 'merge', '--depth', 'empty',\n sbox.repo_url + '/A', A_COPY_path)\n svntest.actions.run_and_verify_svn([A_COPY_path + ' - /A:2-13*\\n'],\n [], 'pg', SVN_PROP_MERGEINFO,\n '-R', A_COPY_path)\n\n # Merge all available changes from A to A_COPY at --depth files. Only the\n # mergeinfo on A_COPY and its file children should be affected.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n # Revisions 2-13 are already merged to A_COPY and now they will be merged\n # to A_COPY's file children. Due to the way we drive the merge editor\n # r2-3, which are inoperative on A_COPY's file children, do not show up\n # in the merge notifications, although those revs are included in the\n # recorded mergeinfo.\n expected_output = expected_merge_output([[4,13], # Merge notification\n [9,13], # Merge notification\n [2,13]], # Mergeinfo notification\n ['UU %s\\n' % (mu_COPY_path),\n 'A %s\\n' % (nu_COPY_path),\n ' U %s\\n' % (A_COPY_path),\n ' G %s\\n' % (mu_COPY_path),\n ' U %s\\n' % (nu_COPY_path),])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '--depth', 'files',\n sbox.repo_url + '/A', A_COPY_path)\n expected_output = svntest.verify.UnorderedOutput(\n [A_COPY_path + ' - /A:2-13*\\n',\n mu_COPY_path + ' - /A/mu:2-13\\n',\n nu_COPY_path + ' - /A/nu:10-13\\n',])\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'pg', SVN_PROP_MERGEINFO,\n '-R', A_COPY_path)\n\n#----------------------------------------------------------------------\n# Test for issue #2827\n# Handle merge info for sparsely-populated directories\n@Issue(2827)\n@SkipUnless(server_has_mergeinfo)\ndef merge_to_sparse_directories(sbox):\n \"merge to sparse directories\"\n\n # Merges into sparse working copies should set non-inheritable mergeinfo\n # on the deepest directories present in the WC.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n\n # Some paths we'll care about\n A_path = sbox.ospath('A')\n D_path = sbox.ospath('A/D')\n I_path = sbox.ospath('A/C/I')\n G_path = sbox.ospath('A/D/G')\n A_COPY_path = sbox.ospath('A_COPY')\n\n # Make a few more changes to the merge source...\n\n # r7 - modify and commit A/mu\n svntest.main.file_write(sbox.ospath('A/mu'),\n \"New content\")\n expected_output = wc.State(wc_dir, {'A/mu' : Item(verb='Sending')})\n wc_status.tweak('A/mu', wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n wc_disk.tweak('A/mu', contents=\"New content\")\n\n # r8 - Add a prop to A/D and commit.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(7), [],\n 'up', wc_dir)\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n D_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', D_path)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D' : Item(verb='Sending'),\n })\n wc_status.tweak(wc_rev=7)\n wc_status.tweak('A/D', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # r9 - Add a prop to A and commit.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [],\n 'up', wc_dir)\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n A_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', A_path)\n expected_output = svntest.wc.State(wc_dir, {\n 'A' : Item(verb='Sending'),\n })\n wc_status.tweak(wc_rev=8)\n wc_status.tweak('A', wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Do an --immediates checkout of A_COPY\n immediates_dir = sbox.add_wc_path('immediates')\n expected_output = wc.State(immediates_dir, {\n 'B' : Item(status='A '),\n 'mu' : Item(status='A '),\n 'C' : Item(status='A '),\n 'D' : Item(status='A '),\n })\n expected_disk = wc.State('', {\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'C' : Item(),\n 'D' : Item(),\n })\n svntest.actions.run_and_verify_checkout(sbox.repo_url + \"/A_COPY\",\n immediates_dir,\n expected_output, expected_disk,\n [],\n \"--depth\", \"immediates\")\n\n # Merge r4:9 into the immediates WC.\n # The root of the immediates WC should get inheritable r4:9 as should\n # the one file present 'mu'. The three directory children present, 'B',\n # 'C', and 'D' are checked out at depth empty; the two of these affected\n # by the merge, 'B' and 'D', get non-inheritable mergeinfo for r4:9.\n # The root and 'D' do should also get the changes\n # that affect them directly (the prop adds from r8 and r9).\n #\n # Currently this fails due to r1424469. For a full explanation see\n # http://svn.haxx.se/dev/archive-2012-12/0472.shtml\n # and http://svn.haxx.se/dev/archive-2012-12/0475.shtml\n expected_output = wc.State(immediates_dir, {\n 'D' : Item(status=' U'),\n 'mu' : Item(status='U '),\n '' : Item(status=' U'),\n # Shadowed below skips\n 'D/H/omega' : Item(status=' ', treeconflict='U'),\n 'B/E/beta' : Item(status=' ', treeconflict='U'),\n })\n expected_mergeinfo_output = wc.State(immediates_dir, {\n '' : Item(status=' U'),\n 'B' : Item(status=' U'),\n 'D' : Item(status=' U'),\n })\n expected_elision_output = wc.State(immediates_dir, {\n })\n expected_status = wc.State(immediates_dir, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' M', wc_rev=9),\n 'mu' : Item(status='M ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' M', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5-9',\n \"prop:name\" : \"propval\"}),\n 'B' : Item(props={SVN_PROP_MERGEINFO : '/A/B:5-9*'}),\n 'mu' : Item(\"New content\"),\n 'C' : Item(),\n 'D' : Item(props={SVN_PROP_MERGEINFO : '/A/D:5-9*',\n \"prop:name\" : \"propval\"}),\n })\n expected_skip = svntest.wc.State(immediates_dir, {\n 'D/H' : Item(verb='Skipped missing target'),\n 'B/E' : Item(verb='Skipped missing target'),\n })\n svntest.actions.run_and_verify_merge(immediates_dir, '4', '9',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Do a --files checkout of A_COPY\n files_dir = sbox.add_wc_path('files')\n expected_output = wc.State(files_dir, {\n 'mu' : Item(status='A '),\n })\n expected_disk = wc.State('', {\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n })\n svntest.actions.run_and_verify_checkout(sbox.repo_url + \"/A_COPY\",\n files_dir,\n expected_output, expected_disk,\n [],\n \"--depth\", \"files\")\n\n # Merge r4:9 into the files WC.\n # The root of the files WC should get non-inheritable r4:9 and its one\n # present child 'mu' should get the same but inheritable. The root\n # should also get the change that affects it directly (the prop add\n # from r9).\n expected_output = wc.State(files_dir, {\n 'mu' : Item(status='U '),\n '' : Item(status=' U'),\n # Below the skips\n 'D/H/omega' : Item(status=' ', treeconflict='U'),\n 'B/E/beta' : Item(status=' ', treeconflict='U'),\n })\n expected_mergeinfo_output = wc.State(files_dir, {\n '' : Item(status=' U'),\n 'mu' : Item(status=' U'),\n })\n expected_elision_output = wc.State(files_dir, {\n })\n expected_status = wc.State(files_dir, {\n '' : Item(status=' M', wc_rev=9),\n 'mu' : Item(status='MM', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5-9*',\n \"prop:name\" : \"propval\"}),\n 'mu' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/mu:5-9'}),\n })\n expected_skip = svntest.wc.State(files_dir, {\n 'D' : Item(verb='Skipped missing target'),\n 'B' : Item(verb='Skipped missing target'),\n })\n svntest.actions.run_and_verify_merge(files_dir, '4', '9',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Do an --empty checkout of A_COPY\n empty_dir = sbox.add_wc_path('empty')\n expected_output = wc.State(empty_dir, {})\n expected_disk = wc.State('', {})\n svntest.actions.run_and_verify_checkout(sbox.repo_url + \"/A_COPY\",\n empty_dir,\n expected_output, expected_disk,\n [],\n \"--depth\", \"empty\")\n\n # Merge r4:9 into the empty WC.\n # The root of the files WC should get non-inheritable r4:9 and also get\n # the one change that affects it directly (the prop add from r9).\n expected_output = wc.State(empty_dir, {\n '' : Item(status=' U'),\n # Below the skips\n 'B/E/beta' : Item(status=' ', treeconflict='U'),\n 'D/H/omega' : Item(status=' ', treeconflict='U'),\n })\n expected_mergeinfo_output = wc.State(empty_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(empty_dir, {\n })\n expected_status = wc.State(empty_dir, {\n '' : Item(status=' M', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5-9*',\n \"prop:name\" : \"propval\"}),\n })\n expected_skip = svntest.wc.State(empty_dir, {\n 'mu' : Item(verb='Skipped missing target'),\n 'D' : Item(verb='Skipped missing target'),\n 'B' : Item(verb='Skipped missing target'),\n })\n svntest.actions.run_and_verify_merge(empty_dir, '4', '9',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Check that default depth for merge is infinity.\n #\n # Revert the previous changes to the immediates WC and update one\n # child in that WC to depth infinity.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R',\n immediates_dir)\n svntest.actions.run_and_verify_svn(None, [], 'up', '--set-depth',\n 'infinity',\n os.path.join(immediates_dir, 'D'))\n # Now merge r6 into the immediates WC, even though the root of the\n # is at depth immediates, the subtree rooted at child 'D' is fully\n # present, so a merge of r6 should affect 'D/H/omega'.\n expected_output = wc.State(immediates_dir, {\n 'D/H/omega' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(immediates_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(immediates_dir, {\n })\n expected_status = wc.State(immediates_dir, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status=' ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' ', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/omega' : Item(status='M ', wc_rev=9),\n 'D/H/psi' : Item(status=' ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:6'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(immediates_dir, {})\n svntest.actions.run_and_verify_merge(immediates_dir, '5', '6',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_old_and_new_revs_from_renamed_dir(sbox):\n \"merge -rold(before rename):head renamed dir\"\n\n # See the email on dev@ from <NAME>, 2007-09-27, \"RE: svn commit:\n # r26803 - [...]\", <http://svn.haxx.se/dev/archive-2007-09/0706.shtml> or\n # <http://subversion.tigris.org/ds/viewMessage.do?dsForumId=462&dsMessageId=927127>.\n\n # Create a WC with a single branch\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, True, 1)\n\n # Some paths we'll care about\n A_url = sbox.repo_url + '/A'\n A_MOVED_url = sbox.repo_url + '/A_MOVED'\n A_COPY_path = sbox.ospath('A_COPY')\n mu_path = sbox.ospath('A/mu')\n A_MOVED_mu_path = sbox.ospath('A_MOVED/mu')\n\n # Make a modification to A/mu\n svntest.main.file_write(mu_path, \"This is the file 'mu' modified.\\n\")\n expected_output = wc.State(wc_dir, {'A/mu' : Item(verb='Sending')})\n wc_status.add({'A/mu' : Item(status=' ', wc_rev=3)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Move A to A_MOVED\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 4.\\n'],\n [], 'mv', '-m', 'mv A to A_MOVED',\n A_url, A_MOVED_url)\n\n # Update the working copy to get A_MOVED\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Make a modification to A_MOVED/mu\n svntest.main.file_write(A_MOVED_mu_path, \"This is 'mu' in A_MOVED.\\n\")\n expected_output = wc.State(wc_dir, {'A_MOVED/mu' : Item(verb='Sending')})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 4)\n expected_status.remove('A', 'A/mu', 'A/C', 'A/D', 'A/B', 'A/B/lambda',\n 'A/B/E', 'A/B/E/alpha', 'A/B/E/beta', 'A/B/F',\n 'A/D/gamma', 'A/D/G', 'A/D/G/pi', 'A/D/G/rho',\n 'A/D/G/tau', 'A/D/H', 'A/D/H/chi', 'A/D/H/omega',\n 'A/D/H/psi')\n expected_status.add({\n '' : Item(status=' ', wc_rev=4),\n 'iota' : Item(status=' ', wc_rev=4),\n 'A_MOVED' : Item(status=' ', wc_rev=4),\n 'A_MOVED/mu' : Item(status=' ', wc_rev=5),\n 'A_MOVED/C' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D' : Item(status=' ', wc_rev=4),\n 'A_MOVED/B' : Item(status=' ', wc_rev=4),\n 'A_MOVED/B/lambda' : Item(status=' ', wc_rev=4),\n 'A_MOVED/B/E' : Item(status=' ', wc_rev=4),\n 'A_MOVED/B/E/alpha': Item(status=' ', wc_rev=4),\n 'A_MOVED/B/E/beta' : Item(status=' ', wc_rev=4),\n 'A_MOVED/B/F' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/gamma' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/G' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/G/pi' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/G/rho' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/G/tau' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/H' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/H/chi' : Item(status=' ', wc_rev=4),\n 'A_MOVED/D/H/omega': Item(status=' ', wc_rev=4),\n 'A_MOVED/D/H/psi' : Item(status=' ', wc_rev=4),\n 'A_COPY' : Item(status=' ', wc_rev=4),\n 'A_COPY/mu' : Item(status=' ', wc_rev=4),\n 'A_COPY/C' : Item(status=' ', wc_rev=4),\n 'A_COPY/D' : Item(status=' ', wc_rev=4),\n 'A_COPY/B' : Item(status=' ', wc_rev=4),\n 'A_COPY/B/lambda' : Item(status=' ', wc_rev=4),\n 'A_COPY/B/E' : Item(status=' ', wc_rev=4),\n 'A_COPY/B/E/alpha' : Item(status=' ', wc_rev=4),\n 'A_COPY/B/E/beta' : Item(status=' ', wc_rev=4),\n 'A_COPY/B/F' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/gamma' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/G' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/G/pi' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/G/rho' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/G/tau' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/H' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/H/chi' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/H/omega' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/H/psi' : Item(status=' ', wc_rev=4),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Merge /A_MOVED to /A_COPY - this happens in multiple passes\n # because /A_MOVED has renames in its history between the boundaries\n # of the requested merge range.\n expected_output = wc.State(A_COPY_path, {\n 'mu' : Item(status='G ', prev_status='U '), # mu gets touched twice\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' G', prev_status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=4),\n 'mu' : Item(status='M ', wc_rev=4),\n 'C' : Item(status=' ', wc_rev=4),\n 'D' : Item(status=' ', wc_rev=4),\n 'B' : Item(status=' ', wc_rev=4),\n 'B/lambda' : Item(status=' ', wc_rev=4),\n 'B/E' : Item(status=' ', wc_rev=4),\n 'B/E/alpha': Item(status=' ', wc_rev=4),\n 'B/E/beta' : Item(status=' ', wc_rev=4),\n 'B/F' : Item(status=' ', wc_rev=4),\n 'D/gamma' : Item(status=' ', wc_rev=4),\n 'D/G' : Item(status=' ', wc_rev=4),\n 'D/G/pi' : Item(status=' ', wc_rev=4),\n 'D/G/rho' : Item(status=' ', wc_rev=4),\n 'D/G/tau' : Item(status=' ', wc_rev=4),\n 'D/H' : Item(status=' ', wc_rev=4),\n 'D/H/chi' : Item(status=' ', wc_rev=4),\n 'D/H/omega': Item(status=' ', wc_rev=4),\n 'D/H/psi' : Item(status=' ', wc_rev=4),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3\\n/A_MOVED:4-5'}),\n 'mu' : Item(\"This is 'mu' in A_MOVED.\\n\"),\n 'C' : Item(),\n 'D' : Item(),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha': Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/omega': Item(\"This is the file 'omega'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, {})\n\n ### Disabling dry_run mode because currently it can't handle the way\n ### 'mu' gets textually modified in multiple passes.\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '5',\n A_MOVED_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_with_child_having_different_rev_ranges_to_merge(sbox):\n \"child having different rev ranges to merge\"\n #Modify A/mu to 30 lines with a content 'line1'...'line30' commit it at r2.\n #Create a branch A_COPY from A, commit it at r3.\n #Modify A/mu line number 7 to 'LINE7' modify and commit at r4.\n #Modify A/mu line number 17 to 'LINE17' modify, set prop 'prop1' on 'A'\n #with a value 'val1' and commit at r5.\n #Modify A/mu line number 27 to 'LINE27' modify and commit at r6.\n #Merge r5 to 'A/mu' as a single file merge explicitly to 'A_COPY/mu'.\n #Merge r3:6 from 'A' to 'A_COPY\n #This should merge r4 and then r5 through r6.\n #Revert r5 and r6 via single file merge on A_COPY/mu.\n #Revert r6 through r4 on A_COPY this should get back us the pristine copy.\n #Merge r3:6 from 'A' to 'A_COPY\n #Revert r5 on A_COPY/mu\n #Modify line number 17 with 'some other line17' of A_COPY/mu\n #Merge r6:3 from 'A' to 'A_COPY, This should leave line number 17\n #undisturbed in A_COPY/mu, rest should be reverted.\n\n # Create a WC\n sbox.build()\n wc_dir = sbox.wc_dir\n A_path = sbox.ospath('A')\n mu_path = sbox.ospath('A/mu')\n A_url = sbox.repo_url + '/A'\n A_mu_url = sbox.repo_url + '/A/mu'\n A_COPY_url = sbox.repo_url + '/A_COPY'\n A_COPY_path = sbox.ospath('A_COPY')\n A_COPY_mu_path = sbox.ospath('A_COPY/mu')\n thirty_line_dummy_text = 'line1\\n'\n for i in range(2, 31):\n thirty_line_dummy_text += 'line' + str(i) + '\\n'\n\n svntest.main.file_write(mu_path, thirty_line_dummy_text)\n expected_output = wc.State(wc_dir, {'A/mu' : Item(verb='Sending')})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=2)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n svntest.actions.run_and_verify_svn(None, [],\n 'cp', A_url, A_COPY_url, '-m', 'rev 3')\n # Update the working copy to get A_COPY\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_status.add({'A_COPY' : Item(status=' '),\n 'A_COPY/mu' : Item(status=' '),\n 'A_COPY/C' : Item(status=' '),\n 'A_COPY/D' : Item(status=' '),\n 'A_COPY/B' : Item(status=' '),\n 'A_COPY/B/lambda' : Item(status=' '),\n 'A_COPY/B/E' : Item(status=' '),\n 'A_COPY/B/E/alpha' : Item(status=' '),\n 'A_COPY/B/E/beta' : Item(status=' '),\n 'A_COPY/B/F' : Item(status=' '),\n 'A_COPY/D/gamma' : Item(status=' '),\n 'A_COPY/D/G' : Item(status=' '),\n 'A_COPY/D/G/pi' : Item(status=' '),\n 'A_COPY/D/G/rho' : Item(status=' '),\n 'A_COPY/D/G/tau' : Item(status=' '),\n 'A_COPY/D/H' : Item(status=' '),\n 'A_COPY/D/H/chi' : Item(status=' '),\n 'A_COPY/D/H/omega' : Item(status=' '),\n 'A_COPY/D/H/psi' : Item(status=' ')})\n expected_status.tweak(wc_rev=3)\n tweaked_7th_line = thirty_line_dummy_text.replace('line7', 'LINE 7')\n svntest.main.file_write(mu_path, tweaked_7th_line)\n expected_status.tweak('A/mu', wc_rev=4)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_status.tweak(wc_rev=4)\n tweaked_17th_line = tweaked_7th_line.replace('line17', 'LINE 17')\n svntest.main.file_write(mu_path, tweaked_17th_line)\n svntest.main.run_svn(None, 'propset', 'prop1', 'val1', A_path)\n expected_output = wc.State(wc_dir,\n {\n 'A' : Item(verb='Sending'),\n 'A/mu' : Item(verb='Sending')\n }\n )\n expected_status.tweak('A', wc_rev=5)\n expected_status.tweak('A/mu', wc_rev=5)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n tweaked_27th_line = tweaked_17th_line.replace('line27', 'LINE 27')\n svntest.main.file_write(mu_path, tweaked_27th_line)\n expected_status.tweak('A/mu', wc_rev=6)\n expected_output = wc.State(wc_dir, {'A/mu' : Item(verb='Sending')})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n # Merge r5 to A_COPY/mu\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]],\n ['U ' + A_COPY_mu_path + '\\n',\n ' U ' + A_COPY_mu_path + '\\n']),\n [], 'merge', '-r4:5', A_mu_url, A_COPY_mu_path)\n\n expected_skip = wc.State(A_COPY_path, {})\n expected_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'mu' : Item(status='G ', prev_status='G '), # Updated twice\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'mu' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n 'mu' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=4),\n 'mu' : Item(status='M ', wc_rev=4),\n 'C' : Item(status=' ', wc_rev=4),\n 'D' : Item(status=' ', wc_rev=4),\n 'B' : Item(status=' ', wc_rev=4),\n 'B/lambda' : Item(status=' ', wc_rev=4),\n 'B/E' : Item(status=' ', wc_rev=4),\n 'B/E/alpha': Item(status=' ', wc_rev=4),\n 'B/E/beta' : Item(status=' ', wc_rev=4),\n 'B/F' : Item(status=' ', wc_rev=4),\n 'D/gamma' : Item(status=' ', wc_rev=4),\n 'D/G' : Item(status=' ', wc_rev=4),\n 'D/G/pi' : Item(status=' ', wc_rev=4),\n 'D/G/rho' : Item(status=' ', wc_rev=4),\n 'D/G/tau' : Item(status=' ', wc_rev=4),\n 'D/H' : Item(status=' ', wc_rev=4),\n 'D/H/chi' : Item(status=' ', wc_rev=4),\n 'D/H/omega': Item(status=' ', wc_rev=4),\n 'D/H/psi' : Item(status=' ', wc_rev=4),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:4-6',\n 'prop1' : 'val1'}),\n 'mu' : Item(tweaked_27th_line),\n 'C' : Item(),\n 'D' : Item(),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha': Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/omega': Item(\"This is the file 'omega'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n })\n svntest.actions.run_and_verify_merge(A_COPY_path, '3', '6',\n A_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n # Revert r5 and r6 on A_COPY/mu\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[6,5]],\n ['G ' + A_COPY_mu_path + '\\n',\n ' G ' + A_COPY_mu_path + '\\n']),\n [], 'merge', '-r6:4', A_mu_url, A_COPY_mu_path)\n\n expected_output = wc.State(A_COPY_path, {\n '' : Item(status=' G'), # merged removal of prop1 property\n 'mu' : Item(status='G '), # merged reversion of text changes\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' G'),\n 'mu' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'mu' : Item(status=' U'),\n })\n expected_status.tweak('', status=' ')\n expected_status.tweak('mu', status=' ')\n expected_disk.tweak('', props={})\n expected_disk.remove('')\n expected_disk.tweak('mu', contents=thirty_line_dummy_text)\n svntest.actions.run_and_verify_merge(A_COPY_path, '6', '3',\n A_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n expected_disk.add({'' : Item(props={SVN_PROP_MERGEINFO : '/A:4-6',\n 'prop1' : 'val1'})})\n expected_disk.tweak('mu', contents=tweaked_27th_line)\n expected_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'), # new mergeinfo and prop1 property\n 'mu' : Item(status='U '), # text changes\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status.tweak('', status=' M')\n expected_status.tweak('mu', status='M ')\n svntest.actions.run_and_verify_merge(A_COPY_path, '3', '6',\n A_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n #Revert r5 on A_COPY/mu\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[-5]],\n ['G ' + A_COPY_mu_path + '\\n',\n ' G ' + A_COPY_mu_path + '\\n']),\n [], 'merge', '-r5:4', A_mu_url, A_COPY_mu_path)\n tweaked_17th_line_1 = tweaked_27th_line.replace('LINE 17',\n 'some other line17')\n tweaked_17th_line_2 = thirty_line_dummy_text.replace('line17',\n 'some other line17')\n svntest.main.file_write(A_COPY_mu_path, tweaked_17th_line_1)\n expected_output = wc.State(A_COPY_path, {\n '' : Item(status=' G'),\n 'mu' : Item(status='G ', prev_status='G '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' G'),\n 'mu' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'mu' : Item(status=' U'),\n })\n expected_status.tweak('', status=' ')\n expected_status.tweak('mu', status='M ')\n expected_disk.remove('')\n expected_disk.tweak('mu', contents=tweaked_17th_line_2)\n svntest.actions.run_and_verify_merge(A_COPY_path, '6', '3',\n A_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_old_and_new_revs_from_renamed_file(sbox):\n \"merge -rold(before rename):head renamed file\"\n\n ## See http://svn.haxx.se/dev/archive-2007-09/0706.shtml ##\n\n # Create a WC\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n mu_url = sbox.repo_url + '/A/mu'\n mu_MOVED_url = sbox.repo_url + '/A/mu_MOVED'\n mu_COPY_url = sbox.repo_url + '/A/mu_COPY'\n mu_COPY_path = sbox.ospath('A/mu_COPY')\n mu_path = sbox.ospath('A/mu')\n mu_MOVED_path = sbox.ospath('A/mu_MOVED')\n\n # Copy mu to mu_COPY\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'],\n [], 'cp', '-m', 'cp mu to mu_COPY',\n mu_url, mu_COPY_url)\n\n # Make a modification to A/mu\n svntest.main.file_write(mu_path, \"This is the file 'mu' modified.\\n\")\n expected_output = wc.State(wc_dir, {'A/mu' : Item(verb='Sending')})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=3)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Move mu to mu_MOVED\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 4.\\n'],\n [], 'mv', '-m', 'mv mu to mu_MOVED',\n mu_url, mu_MOVED_url)\n\n # Update the working copy to get mu_MOVED\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Make a modification to mu_MOVED\n svntest.main.file_write(mu_MOVED_path, \"This is 'mu' in mu_MOVED.\\n\")\n expected_output = wc.State(wc_dir, {'A/mu_MOVED' : Item(verb='Sending')})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 4)\n expected_status.remove('A/mu')\n expected_status.add({\n 'A/mu_MOVED' : Item(status=' ', wc_rev=5),\n 'A/mu_COPY' : Item(status=' ', wc_rev=4),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Merge A/mu_MOVED to A/mu_COPY - this happens in multiple passes\n # because A/mu_MOVED has renames in its history between the\n # boundaries of the requested merge range.\n expected_output = expected_merge_output([[2,3],[4,5]],\n ['U %s\\n' % (mu_COPY_path),\n ' U %s\\n' % (mu_COPY_path),\n 'G %s\\n' % (mu_COPY_path),\n ' G %s\\n' % (mu_COPY_path),])\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'merge', '-r', '1:5',\n mu_MOVED_url,\n mu_COPY_path)\n svntest.actions.run_and_verify_svn(['/A/mu:2-3\\n',\n '/A/mu_MOVED:4-5\\n'],\n [], 'propget', SVN_PROP_MERGEINFO,\n mu_COPY_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_with_auto_rev_range_detection(sbox):\n \"merge with auto detection of revision ranges\"\n\n ## See http://svn.haxx.se/dev/archive-2007-09/0735.shtml ##\n\n # Create a WC\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n A_url = sbox.repo_url + '/A'\n A_COPY_url = sbox.repo_url + '/A_COPY'\n B1_path = sbox.ospath('A/B1')\n B1_mu_path = sbox.ospath('A/B1/mu')\n A_COPY_path = sbox.ospath('A_COPY')\n\n # Create B1 inside A\n svntest.actions.run_and_verify_svn([\"A \" + B1_path + \"\\n\"],\n [], 'mkdir',\n B1_path)\n\n # Add a file mu inside B1\n svntest.main.file_write(B1_mu_path, \"This is the file 'mu'.\\n\")\n svntest.actions.run_and_verify_svn([\"A \" + B1_mu_path + \"\\n\"],\n [], 'add', B1_mu_path)\n\n # Commit B1 and B1/mu\n expected_output = wc.State(wc_dir, {\n 'A/B1' : Item(verb='Adding'),\n 'A/B1/mu' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B1' : Item(status=' ', wc_rev=2),\n 'A/B1/mu' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Copy A to A_COPY\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 3.\\n'],\n [], 'cp', '-m', 'cp A to A_COPY',\n A_url, A_COPY_url)\n\n # Make a modification to A/B1/mu\n svntest.main.file_write(B1_mu_path, \"This is the file 'mu' modified.\\n\")\n expected_output = wc.State(wc_dir, {'A/B1/mu' : Item(verb='Sending')})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/B1' : Item(status=' ', wc_rev=2),\n 'A/B1/mu' : Item(status=' ', wc_rev=4),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Update the working copy to get A_COPY\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Merge /A to /A_COPY\n expected_output = wc.State(A_COPY_path, {\n 'B1/mu' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=4),\n 'mu' : Item(status=' ', wc_rev=4),\n 'C' : Item(status=' ', wc_rev=4),\n 'D' : Item(status=' ', wc_rev=4),\n 'B' : Item(status=' ', wc_rev=4),\n 'B/lambda' : Item(status=' ', wc_rev=4),\n 'B/E' : Item(status=' ', wc_rev=4),\n 'B/E/alpha': Item(status=' ', wc_rev=4),\n 'B/E/beta' : Item(status=' ', wc_rev=4),\n 'B/F' : Item(status=' ', wc_rev=4),\n 'B1' : Item(status=' ', wc_rev=4),\n 'B1/mu' : Item(status='M ', wc_rev=4),\n 'D/gamma' : Item(status=' ', wc_rev=4),\n 'D/G' : Item(status=' ', wc_rev=4),\n 'D/G/pi' : Item(status=' ', wc_rev=4),\n 'D/G/rho' : Item(status=' ', wc_rev=4),\n 'D/G/tau' : Item(status=' ', wc_rev=4),\n 'D/H' : Item(status=' ', wc_rev=4),\n 'D/H/chi' : Item(status=' ', wc_rev=4),\n 'D/H/omega': Item(status=' ', wc_rev=4),\n 'D/H/psi' : Item(status=' ', wc_rev=4),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3-4'}),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'C' : Item(),\n 'D' : Item(),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha': Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'B1' : Item(),\n 'B1/mu' : Item(\"This is the file 'mu' modified.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/omega': Item(\"This is the file 'omega'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, {})\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n A_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n#----------------------------------------------------------------------\n# Test for issue 2818: Provide a 'merge' API which allows for merging of\n# arbitrary revision ranges (e.g. '-c 3,5,7')\n@Issue(2818)\n@SkipUnless(server_has_mergeinfo)\ndef cherry_picking(sbox):\n \"command line supports cherry picked merge ranges\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n H_path = sbox.ospath('A/D/H')\n G_path = sbox.ospath('A/D/G')\n A_COPY_path = sbox.ospath('A_COPY')\n D_COPY_path = sbox.ospath('A_COPY/D')\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n\n # Update working copy\n expected_output = svntest.wc.State(wc_dir, {})\n wc_status.tweak(wc_rev='6')\n svntest.actions.run_and_verify_update(wc_dir, expected_output,\n wc_disk, wc_status,\n check_props=True)\n\n # Make some prop changes to some dirs.\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n G_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', G_path)\n expected_output = svntest.wc.State(wc_dir, {'A/D/G': Item(verb='Sending'),})\n wc_status.tweak('A/D/G', wc_rev=7)\n wc_disk.tweak('A/D/G', props={'prop:name' : 'propval'})\n\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n H_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', H_path)\n expected_output = svntest.wc.State(wc_dir, {'A/D/H': Item(verb='Sending'),})\n wc_status.tweak('A/D/H', wc_rev=8)\n wc_disk.tweak('A/D/H', props={'prop:name' : 'propval'})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Do multiple additive merges to a file\"\n # Merge -r2:4 -c6 into A_COPY/D/G/rho.\n expected_skip = wc.State(rho_COPY_path, { })\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[3,4],[6]],\n ['U ' + rho_COPY_path + '\\n',\n ' U ' + rho_COPY_path + '\\n',\n ' G ' + rho_COPY_path + '\\n',]),\n [], 'merge', '-r2:4', '-c6',\n sbox.repo_url + '/A/D/G/rho', rho_COPY_path)\n\n # Check rho's status and props.\n expected_status = wc.State(rho_COPY_path,\n {'' : Item(status='MM', wc_rev=6)})\n svntest.actions.run_and_verify_status(rho_COPY_path, expected_status)\n svntest.actions.run_and_verify_svn([\"/A/D/G/rho:3-4,6\\n\"], [],\n 'propget', SVN_PROP_MERGEINFO,\n rho_COPY_path)\n\n #Do multiple additive merges to a directory:\n # Merge -c6 -c8 into A_COPY/D/H\n expected_output = expected_merge_output(\n [[6],[8]],\n ['U ' + omega_COPY_path + '\\n',\n ' U ' + H_COPY_path + '\\n',\n ' G ' + H_COPY_path + '\\n',])\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'merge', '-c6', '-c8',\n sbox.repo_url + '/A/D/H',\n H_COPY_path)\n\n # Check A_COPY/D/H's status and props.\n expected_status = wc.State(H_COPY_path,\n {'' : Item(status=' M', wc_rev=6),\n 'psi' : Item(status=' ', wc_rev=6),\n 'chi' : Item(status=' ', wc_rev=6),\n 'omega': Item(status='M ', wc_rev=6),})\n svntest.actions.run_and_verify_status(H_COPY_path, expected_status)\n svntest.actions.run_and_verify_svn([H_COPY_path + \" - /A/D/H:6,8\\n\"],\n [], 'propget', '-R', SVN_PROP_MERGEINFO,\n H_COPY_path)\n\n # Do multiple reverse merges to a directory:\n # Merge -c-6 -c-3 into A_COPY\n expected_output = expected_merge_output(\n [[-3],[-6]],\n ['G ' + omega_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',\n ' U ' + H_COPY_path + '\\n',\n ' G ' + A_COPY_path + '\\n',\n ' G ' + H_COPY_path + '\\n',],\n elides=True)\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'merge', '-c-3', '-c-6',\n sbox.repo_url + '/A',\n A_COPY_path)\n expected_status = wc.State(A_COPY_path,\n {'' : Item(status=' ', wc_rev=6),\n 'B' : Item(status=' ', wc_rev=6),\n 'B/lambda' : Item(status=' ', wc_rev=6),\n 'B/E' : Item(status=' ', wc_rev=6),\n 'B/E/alpha' : Item(status=' ', wc_rev=6),\n 'B/E/beta' : Item(status=' ', wc_rev=6),\n 'B/F' : Item(status=' ', wc_rev=6),\n 'mu' : Item(status=' ', wc_rev=6),\n 'C' : Item(status=' ', wc_rev=6),\n 'D' : Item(status=' ', wc_rev=6),\n 'D/gamma' : Item(status=' ', wc_rev=6),\n 'D/G' : Item(status=' ', wc_rev=6),\n 'D/G/pi' : Item(status=' ', wc_rev=6),\n 'D/G/rho' : Item(status='MM', wc_rev=6),\n 'D/G/tau' : Item(status=' ', wc_rev=6),\n 'D/H' : Item(status=' M', wc_rev=6),\n 'D/H/chi' : Item(status=' ', wc_rev=6),\n 'D/H/psi' : Item(status=' ', wc_rev=6),\n 'D/H/omega' : Item(status=' ', wc_rev=6),})\n svntest.actions.run_and_verify_status(A_COPY_path, expected_status)\n # A_COPY/D/G/rho is untouched by the merge so its mergeinfo\n # remains unchanged.\n expected_out = H_COPY_path + \" - /A/D/H:8\\n|\" + \\\n rho_COPY_path + \" - /A/D/G/rho:3-4,6\\n\"\n # Construct proper regex for '\\' infested Windows paths.\n if sys.platform == 'win32':\n expected_out = expected_out.replace(\"\\\\\", \"\\\\\\\\\")\n svntest.actions.run_and_verify_svn(expected_out, [],\n 'propget', '-R', SVN_PROP_MERGEINFO,\n A_COPY_path)\n\n # Do both additive and reverse merges to a directory:\n # Merge -r2:3 -c-4 -r4:7 to A_COPY/D\n expected_output = expected_merge_output(\n [[3], [-4], [6,7], [5,7]],\n [' U ' + G_COPY_path + '\\n',\n 'U ' + omega_COPY_path + '\\n',\n 'U ' + psi_COPY_path + '\\n',\n ' U ' + D_COPY_path + '\\n',\n ' G ' + D_COPY_path + '\\n',\n ' U ' + H_COPY_path + '\\n',\n ' G ' + H_COPY_path + '\\n',\n 'G ' + rho_COPY_path + '\\n',\n ' U ' + rho_COPY_path + '\\n',\n ' G ' + rho_COPY_path + '\\n'],\n elides=True)\n svntest.actions.run_and_verify_svn(expected_output, [], 'merge',\n '-r2:3', '-c-4', '-r4:7',\n sbox.repo_url + '/A/D',\n D_COPY_path)\n expected_status = wc.State(D_COPY_path,\n {'' : Item(status=' M', wc_rev=6),\n 'gamma' : Item(status=' ', wc_rev=6),\n 'G' : Item(status=' M', wc_rev=6),\n 'G/pi' : Item(status=' ', wc_rev=6),\n 'G/rho' : Item(status=' ', wc_rev=6),\n 'G/tau' : Item(status=' ', wc_rev=6),\n 'H' : Item(status=' M', wc_rev=6),\n 'H/chi' : Item(status=' ', wc_rev=6),\n 'H/psi' : Item(status='M ', wc_rev=6),\n 'H/omega' : Item(status='M ', wc_rev=6),})\n svntest.actions.run_and_verify_status(D_COPY_path, expected_status)\n expected_out = D_COPY_path + \" - /A/D:3,5-7\\n|\" + \\\n H_COPY_path + \" - /A/D/H:3,5-8\\n|\" + \\\n rho_COPY_path + \" - /A/D/G/rho:3-4,6\\n\"\n # Construct proper regex for '\\' infested Windows paths.\n if sys.platform == 'win32':\n expected_out = expected_out.replace(\"\\\\\", \"\\\\\\\\\")\n svntest.actions.run_and_verify_svn(expected_out, [],\n 'propget', '-R', SVN_PROP_MERGEINFO,\n D_COPY_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(2969)\ndef propchange_of_subdir_raises_conflict(sbox):\n \"merge of propchange on subdir raises conflict\"\n\n ## See https://issues.apache.org/jira/browse/SVN-2969. ##\n\n # Create a WC with a single branch\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, True, 1)\n\n # Some paths we'll care about\n B_url = sbox.repo_url + '/A/B'\n E_path = sbox.ospath('A/B/E')\n lambda_path = sbox.ospath('A/B/lambda')\n A_COPY_B_path = sbox.ospath('A_COPY/B')\n A_COPY_B_E_path = sbox.ospath('A_COPY/B/E')\n A_COPY_lambda_path = sbox.ospath('A_COPY/B/E/lambda')\n\n # Set a property on A/B/E and Make a modification to A/B/lambda\n svntest.main.run_svn(None, 'propset', 'x', 'x', E_path)\n\n svntest.main.file_write(lambda_path, \"This is the file 'lambda' modified.\\n\")\n expected_output = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Sending'),\n 'A/B/E' : Item(verb='Sending'),\n })\n wc_status.add({\n 'A/B/lambda' : Item(status=' ', wc_rev=3),\n 'A/B/E' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Merge /A/B to /A_COPY/B ie., r1 to r3 with depth files\n expected_output = wc.State(A_COPY_B_path, {\n 'lambda' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_B_path, {\n '' : Item(status=' U'),\n 'lambda' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_B_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2-3*'}),\n 'lambda' : Item(contents=\"This is the file 'lambda' modified.\\n\",\n props={SVN_PROP_MERGEINFO : '/A/B/lambda:2-3'}),\n 'F' : Item(),\n 'E' : Item(),\n 'E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n })\n expected_status = wc.State(A_COPY_B_path, {\n '' : Item(status=' M', wc_rev=2),\n 'lambda' : Item(status='MM', wc_rev=2),\n 'F' : Item(status=' ', wc_rev=2),\n 'E' : Item(status=' ', wc_rev=2),\n 'E/alpha' : Item(status=' ', wc_rev=2),\n 'E/beta' : Item(status=' ', wc_rev=2),\n })\n expected_skip = wc.State(A_COPY_B_path, {})\n\n svntest.actions.run_and_verify_merge(A_COPY_B_path, None, None,\n B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--depth', 'files',\n A_COPY_B_path)\n\n # Merge /A/B to /A_COPY/B ie., r1 to r3 with infinite depth\n expected_output = wc.State(A_COPY_B_path, {\n 'E' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(A_COPY_B_path, {\n '' : Item(status=' G'),\n 'E' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_B_path, {\n 'E' : Item(status=' U'),\n 'lambda' : Item(status=' U'),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2-3'}),\n 'lambda' : Item(contents=\"This is the file 'lambda' modified.\\n\"),\n 'F' : Item(),\n 'E' : Item(props={'x': 'x'}),\n 'E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n })\n expected_status = wc.State(A_COPY_B_path, {\n '' : Item(status=' M', wc_rev=2),\n 'lambda' : Item(status='M ', wc_rev=2),\n 'F' : Item(status=' ', wc_rev=2),\n 'E' : Item(status=' M', wc_rev=2),\n 'E/alpha' : Item(status=' ', wc_rev=2),\n 'E/beta' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_merge(A_COPY_B_path, None, None,\n B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], 1, 1)\n\n#----------------------------------------------------------------------\n# Test for issue #2971: Reverse merge of prop add segfaults if\n# merging to parent of first merge\n@Issue(2971)\n@SkipUnless(server_has_mergeinfo)\ndef reverse_merge_prop_add_on_child(sbox):\n \"reverse merge of prop add on child\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, True, 1)\n\n # Some paths we'll care about\n G_path = sbox.ospath('A/D/G')\n D_COPY_path = sbox.ospath('A_COPY/D')\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n\n # Make some prop changes to some dirs.\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n G_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', G_path)\n expected_output = svntest.wc.State(wc_dir, {'A/D/G': Item(verb='Sending'),})\n wc_status.tweak('A/D/G', wc_rev=3)\n wc_disk.tweak('A/D/G', props={'prop:name' : 'propval'})\n\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Merge -c3's prop add to A_COPY/D/G\n expected_output = wc.State(G_COPY_path, {\n '' : Item(status=' U')\n })\n expected_mergeinfo_output = wc.State(G_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(G_COPY_path, {\n })\n expected_status = wc.State(G_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'pi' : Item(status=' ', wc_rev=2),\n 'rho' : Item(status=' ', wc_rev=2),\n 'tau' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G:3',\n 'prop:name' : 'propval'}),\n 'pi' : Item(\"This is the file 'pi'.\\n\"),\n 'rho' : Item(\"This is the file 'rho'.\\n\"),\n 'tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_skip = wc.State(G_COPY_path, { })\n svntest.actions.run_and_verify_merge(G_COPY_path, '2', '3',\n sbox.repo_url + '/A/D/G', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Now merge -c-3 but target the previous target's parent instead.\n expected_output = wc.State(D_COPY_path, {\n 'G' : Item(status=' G'),\n })\n expected_mergeinfo_output = wc.State(D_COPY_path, {\n '' : Item(status=' U'),\n 'G' : Item(status=' G'),\n })\n expected_elision_output = wc.State(D_COPY_path, {\n '' : Item(status=' U'),\n 'G' : Item(status=' U'),\n })\n expected_status = wc.State(D_COPY_path, {\n '' : Item(status=' ', wc_rev=2),\n 'G' : Item(status=' ', wc_rev=2),\n 'G/pi' : Item(status=' ', wc_rev=2),\n 'G/rho' : Item(status=' ', wc_rev=2),\n 'G/tau' : Item(status=' ', wc_rev=2),\n 'H' : Item(status=' ', wc_rev=2),\n 'H/chi' : Item(status=' ', wc_rev=2),\n 'H/psi' : Item(status=' ', wc_rev=2),\n 'H/omega' : Item(status=' ', wc_rev=2),\n 'gamma' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n 'G' : Item(),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'H' : Item(),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\")\n })\n expected_skip = wc.State(D_COPY_path, { })\n svntest.actions.run_and_verify_merge(D_COPY_path, '3', '2',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@XFail()\n@Issues(2970,3642)\ndef merge_target_with_non_inheritable_mergeinfo(sbox):\n \"merge target with non inheritable mergeinfo\"\n\n ## See https://issues.apache.org/jira/browse/SVN-2970. ##\n\n # Create a WC with a single branch\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, True, 1)\n\n # Some paths we'll care about\n B_url = sbox.repo_url + '/A/B'\n lambda_path = sbox.ospath('A/B/lambda')\n newfile_path = sbox.ospath('A/B/E/newfile')\n A_COPY_B_path = sbox.ospath('A_COPY/B')\n\n # Make a modifications to A/B/lambda and add A/B/E/newfile\n svntest.main.file_write(lambda_path, \"This is the file 'lambda' modified.\\n\")\n svntest.main.file_write(newfile_path, \"This is the file 'newfile'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', newfile_path)\n expected_output = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Sending'),\n 'A/B/E/newfile' : Item(verb='Adding'),\n })\n wc_status.add({\n 'A/B/lambda' : Item(status=' ', wc_rev=3),\n 'A/B/E/newfile' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Merge /A/B to /A_COPY/B ie., r1 to r3 with depth immediates\n expected_output = wc.State(A_COPY_B_path, {\n 'lambda' : Item(status='U '),\n })\n # Issue #3642 https://issues.apache.org/jira/browse/SVN-3642\n #\n # We don't expect A_COPY/B/F to have mergeinfo recorded on it because\n # not only is it unaffected by the merge at depth immediates, it could\n # never be affected by the merge, regardless of depth.\n expected_mergeinfo_output = wc.State(A_COPY_B_path, {\n '' : Item(status=' U'),\n 'E' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_B_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2-3'}),\n 'lambda' : Item(contents=\"This is the file 'lambda' modified.\\n\"),\n 'F' : Item(), # No mergeinfo!\n 'E' : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:2-3*'}),\n 'E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n })\n expected_status = wc.State(A_COPY_B_path, {\n '' : Item(status=' M', wc_rev=2),\n 'lambda' : Item(status='M ', wc_rev=2),\n 'F' : Item(status=' ', wc_rev=2),\n 'E' : Item(status=' M', wc_rev=2),\n 'E/alpha' : Item(status=' ', wc_rev=2),\n 'E/beta' : Item(status=' ', wc_rev=2),\n })\n expected_skip = wc.State(A_COPY_B_path, {})\n\n svntest.actions.run_and_verify_merge(A_COPY_B_path, None, None,\n B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--depth', 'immediates',\n A_COPY_B_path)\n\n # Merge /A/B to /A_COPY/B ie., r1 to r3 with infinite depth\n expected_output = wc.State(A_COPY_B_path, {\n 'E/newfile' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_B_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_B_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2-3'}),\n 'lambda' : Item(contents=\"This is the file 'lambda' modified.\\n\"),\n 'F' : Item(),\n 'E' : Item(),\n 'E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'E/newfile' : Item(contents=\"This is the file 'newfile'.\\n\"),\n })\n expected_status = wc.State(A_COPY_B_path, {\n '' : Item(status=' M', wc_rev=2),\n 'lambda' : Item(status='M ', wc_rev=2),\n 'F' : Item(status=' ', wc_rev=2),\n 'E' : Item(status=' ', wc_rev=2),\n 'E/alpha' : Item(status=' ', wc_rev=2),\n 'E/beta' : Item(status=' ', wc_rev=2),\n 'E/newfile' : Item(status='A ', wc_rev=2),\n })\n\n svntest.actions.run_and_verify_merge(A_COPY_B_path, None, None,\n B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef self_reverse_merge(sbox):\n \"revert a commit on a target\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make changes to the working copy\n mu_path = sbox.ospath('A/mu')\n svntest.main.file_append(mu_path, 'appended mu text')\n\n # Created expected output tree for 'svn ci'\n expected_output = wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n })\n\n # Create expected status tree; all local revisions should be at 1,\n # but mu should be at revision 2.\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=2)\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # update to HEAD so that the to-be-undone revision is found in the\n # implicit mergeinfo (the natural history) of the target.\n svntest.actions.run_and_verify_svn(None, [], 'update', wc_dir)\n\n expected_output = wc.State(wc_dir, {\n 'A/mu' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(wc_dir, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_dir, {\n '' : Item(status=' U'),\n })\n expected_skip = wc.State(wc_dir, { })\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, 2)\n expected_status.tweak('A/mu', status='M ')\n svntest.actions.run_and_verify_merge(wc_dir, '2', '1', sbox.repo_url,\n None, expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, True)\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # record dummy self mergeinfo to test the fact that self-reversal should work\n # irrespective of mergeinfo.\n svntest.actions.run_and_verify_svn(None, [], 'ps', SVN_PROP_MERGEINFO,\n '/:1', wc_dir)\n\n # Bad svntest.main.greek_state does not have '', so adding it explicitly.\n expected_disk.add({'' : Item(props={SVN_PROP_MERGEINFO : '/:1'})})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 2)\n expected_status.tweak('', status = ' M')\n expected_status.tweak('A/mu', status = 'M ')\n expected_mergeinfo_output = wc.State(wc_dir, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(wc_dir, {\n })\n svntest.actions.run_and_verify_merge(wc_dir, '2', '1', sbox.repo_url,\n None, expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef ignore_ancestry_and_mergeinfo(sbox):\n \"--ignore-ancestry also ignores mergeinfo\"\n\n # Create a WC with a single branch\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, True, 1)\n\n # Some paths we'll care about\n A_B_url = sbox.repo_url + '/A/B'\n A_COPY_B_path = sbox.ospath('A_COPY/B')\n lambda_path = sbox.ospath('A/B/lambda')\n A_COPY_lambda_path = sbox.ospath('A_COPY/B/lambda')\n\n # Make modifications to A/B/lambda\n svntest.main.file_write(lambda_path, \"This is the file 'lambda' modified.\\n\")\n expected_output = wc.State(wc_dir, {\n 'A/B/lambda' : Item(verb='Sending'),\n })\n wc_status.add({\n 'A/B/lambda' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Merge /A/B to /A_COPY/B ie., r1 to r3 with depth immediates\n expected_output = wc.State(A_COPY_B_path, {\n 'lambda' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_B_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_B_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:2-3'}),\n 'lambda' : Item(contents=\"This is the file 'lambda' modified.\\n\"),\n 'F' : Item(props={}),\n 'E' : Item(props={}),\n 'E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n })\n expected_status = wc.State(A_COPY_B_path, {\n '' : Item(status=' M', wc_rev=3),\n 'lambda' : Item(status='M ', wc_rev=3),\n 'F' : Item(status=' ', wc_rev=3),\n 'E' : Item(status=' ', wc_rev=3),\n 'E/alpha' : Item(status=' ', wc_rev=3),\n 'E/beta' : Item(status=' ', wc_rev=3),\n })\n expected_skip = wc.State(A_COPY_B_path, {})\n\n svntest.actions.run_and_verify_merge(A_COPY_B_path, 1, 3,\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n # Now, revert lambda and repeat the merge. Nothing should happen.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R',\n A_COPY_lambda_path)\n expected_output.remove('lambda')\n expected_disk.tweak('lambda', contents=\"This is the file 'lambda'.\\n\")\n expected_status.tweak('lambda', status=' ')\n expected_mergeinfo_output = wc.State(A_COPY_B_path, {\n '' : Item(status=' G'),\n })\n svntest.actions.run_and_verify_merge(A_COPY_B_path, 1, 3,\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n # Now, try the merge again with --ignore-ancestry. We should get\n # lambda re-modified. */\n expected_output = wc.State(A_COPY_B_path, {\n 'lambda' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_B_path, {})\n expected_elision_output = wc.State(A_COPY_B_path, {\n })\n expected_disk.tweak('lambda',\n contents=\"This is the file 'lambda' modified.\\n\")\n expected_status.tweak('lambda', status='M ')\n svntest.actions.run_and_verify_merge(A_COPY_B_path, 1, 3,\n A_B_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--ignore-ancestry', A_COPY_B_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(3032)\ndef merge_from_renamed_branch_fails_while_avoiding_repeat_merge(sbox):\n \"merge from renamed branch\"\n #Copy A/C to A/COPY_C results in r2.\n #Rename A/COPY_C to A/RENAMED_C results in r3.\n #Add A/RENAMED_C/file1 and commit, results in r4.\n #Change A/RENAMED_C/file1 and commit, results in r5.\n #Merge r4 from A/RENAMED_C to A/C\n #Merge r2:5 from A/RENAMED_C to A/C <-- This fails tracked via #3032.\n\n ## See https://issues.apache.org/jira/browse/SVN-3032. ##\n\n # Create a WC with a single branch\n sbox.build()\n wc_dir = sbox.wc_dir\n # Some paths we'll care about\n A_C_url = sbox.repo_url + '/A/C'\n A_COPY_C_url = sbox.repo_url + '/A/COPY_C'\n A_RENAMED_C_url = sbox.repo_url + '/A/RENAMED_C'\n A_C_path = sbox.ospath('A/C')\n A_RENAMED_C_path = sbox.ospath('A/RENAMED_C')\n A_RENAMED_C_file1_path = sbox.ospath('A/RENAMED_C/file1')\n\n svntest.main.run_svn(None, 'cp', A_C_url, A_COPY_C_url, '-m', 'copy...')\n svntest.main.run_svn(None, 'mv', A_COPY_C_url, A_RENAMED_C_url, '-m',\n 'rename...')\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n svntest.main.file_write(A_RENAMED_C_file1_path, \"This is the file1.\\n\")\n svntest.main.run_svn(None, 'add', A_RENAMED_C_file1_path)\n expected_output = wc.State(A_RENAMED_C_path, {\n 'file1' : Item(verb='Adding'),\n })\n expected_status = wc.State(A_RENAMED_C_path, {\n '' : Item(status=' ', wc_rev=3),\n 'file1' : Item(status=' ', wc_rev=4),\n })\n svntest.actions.run_and_verify_commit(A_RENAMED_C_path, expected_output,\n expected_status)\n svntest.main.file_write(A_RENAMED_C_file1_path,\n \"This is the file1 modified.\\n\")\n expected_output = wc.State(A_RENAMED_C_path, {\n 'file1' : Item(verb='Sending'),\n })\n expected_status.tweak('file1', wc_rev=5)\n svntest.actions.run_and_verify_commit(A_RENAMED_C_path, expected_output,\n expected_status)\n\n expected_skip = wc.State(A_C_path, {})\n expected_output = wc.State(A_C_path, {\n 'file1' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_C_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_C_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/RENAMED_C:4'}),\n 'file1' : Item(\"This is the file1.\\n\"),\n })\n expected_status = wc.State(A_C_path, {\n '' : Item(status=' M', wc_rev=3),\n 'file1' : Item(status='A ', wc_rev='-', copied='+'),\n })\n svntest.actions.run_and_verify_merge(A_C_path, 3, 4,\n A_RENAMED_C_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n expected_output = wc.State(A_C_path, {\n 'file1' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_C_path, {\n '' : Item(status=' G'),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/RENAMED_C:3-5'}),\n 'file1' : Item(\"This is the file1 modified.\\n\"),\n })\n expected_status = wc.State(A_C_path, {\n '' : Item(status=' M', wc_rev=3),\n 'file1' : Item(status='A ', wc_rev='-', copied='+'),\n })\n svntest.actions.run_and_verify_merge(A_C_path, 2, 5,\n A_RENAMED_C_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n#----------------------------------------------------------------------\n# Test for part of issue #2877: 'do subtree merge only if subtree has\n# explicit mergeinfo set and exists in the merge source'\n@SkipUnless(server_has_mergeinfo)\n@Issue(2877)\ndef merge_source_normalization_and_subtree_merges(sbox):\n \"normalized mergeinfo is recorded on subtrees\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n D_COPY_path = sbox.ospath('A_COPY/D')\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n\n # Use our helper to copy 'A' to 'A_COPY' then make some changes under 'A'\n wc_disk, wc_status = set_up_branch(sbox)\n\n # r7 - Move A to A_MOVED\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 7.\\n'],\n [], 'mv', '-m', 'mv A to A_MOVED',\n sbox.repo_url + '/A',\n sbox.repo_url + '/A_MOVED')\n wc_status.add({\n 'A_MOVED/B' : Item(),\n 'A_MOVED/B/lambda' : Item(),\n 'A_MOVED/B/E' : Item(),\n 'A_MOVED/B/E/alpha' : Item(),\n 'A_MOVED/B/E/beta' : Item(),\n 'A_MOVED/B/F' : Item(),\n 'A_MOVED/mu' : Item(),\n 'A_MOVED/C' : Item(),\n 'A_MOVED/D' : Item(),\n 'A_MOVED/D/gamma' : Item(),\n 'A_MOVED/D/G' : Item(),\n 'A_MOVED/D/G/pi' : Item(),\n 'A_MOVED/D/G/rho' : Item(),\n 'A_MOVED/D/G/tau' : Item(),\n 'A_MOVED/D/H' : Item(),\n 'A_MOVED/D/H/chi' : Item(),\n 'A_MOVED/D/H/omega' : Item(),\n 'A_MOVED/D/H/psi' : Item(),\n 'A_MOVED' : Item()})\n wc_status.remove('A', 'A/B', 'A/B/lambda', 'A/B/E', 'A/B/E/alpha',\n 'A/B/E/beta', 'A/B/F', 'A/mu', 'A/C', 'A/D',\n 'A/D/gamma', 'A/D/G', 'A/D/G/pi', 'A/D/G/rho',\n 'A/D/G/tau' , 'A/D/H', 'A/D/H/chi', 'A/D/H/omega',\n 'A/D/H/psi')\n wc_status.tweak(status=' ', wc_rev=7)\n\n # Update the WC\n svntest.actions.run_and_verify_svn(None, [],\n 'update', wc_dir)\n\n # r8 - Make a text mod to 'A_MOVED/D/G/tau'\n svntest.main.file_write(sbox.ospath('A_MOVED/D/G/tau'),\n \"New content\")\n expected_output = wc.State(wc_dir,\n {'A_MOVED/D/G/tau' : Item(verb='Sending')})\n wc_status.tweak('A_MOVED/D/G/tau', status=' ', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Merge -c4 URL/A_MOVED/D/G A_COPY/D/G.\n #\n # A_MOVED/D/G doesn't exist at r3:4, it's still A/D/G,\n # so the merge source normalization logic should set\n # mergeinfo of '/A/D/G:4' on A_COPY/D/G, *not* 'A_MOVED/D/G:4',\n # see issue #2953.\n expected_output = wc.State(G_COPY_path, {\n 'rho' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(G_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(G_COPY_path, {\n })\n expected_status = wc.State(G_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n 'pi' : Item(status=' ', wc_rev=7),\n 'rho' : Item(status='M ', wc_rev=7),\n 'tau' : Item(status=' ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/G:4'}),\n 'pi' : Item(\"This is the file 'pi'.\\n\"),\n 'rho' : Item(\"New content\"),\n 'tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_skip = wc.State(G_COPY_path, { })\n svntest.actions.run_and_verify_merge(G_COPY_path, '3', '4',\n sbox.repo_url + '/A_MOVED/D/G',\n None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merge -c8 URL/A_MOVED/D A_COPY/D.\n #\n # The merge target A_COPY/D and the subtree at A_COPY/D/G\n # should both have their mergeinfo updated with r8\n # from A_MOVED_D, see reopened issue #2877.\n expected_output = wc.State(D_COPY_path, {\n 'G/tau' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(D_COPY_path, {\n '' : Item(status=' U'),\n 'G' : Item(status=' G'),\n })\n expected_elision_output = wc.State(D_COPY_path, {\n })\n expected_status = wc.State(D_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n 'G' : Item(status=' M', wc_rev=7),\n 'G/pi' : Item(status=' ', wc_rev=7),\n 'G/rho' : Item(status='M ', wc_rev=7),\n 'G/tau' : Item(status='M ', wc_rev=7),\n 'H' : Item(status=' ', wc_rev=7),\n 'H/chi' : Item(status=' ', wc_rev=7),\n 'H/psi' : Item(status=' ', wc_rev=7),\n 'H/omega' : Item(status=' ', wc_rev=7),\n 'gamma' : Item(status=' ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A_MOVED/D:8'}),\n 'G' : Item(props={SVN_PROP_MERGEINFO :\n '/A/D/G:4\\n/A_MOVED/D/G:8'}),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"New content\"),\n 'G/tau' : Item(\"New content\"),\n 'H' : Item(),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\")\n })\n expected_skip = wc.State(D_COPY_path, { })\n svntest.actions.run_and_verify_merge(D_COPY_path, '7', '8',\n sbox.repo_url + '/A_MOVED/D',\n None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Tests for issue #3067: 'subtrees with intersecting mergeinfo, that don't\n# exist at the start of a merge range shouldn't break the merge'\n@SkipUnless(server_has_mergeinfo)\n@Issue(3067)\ndef new_subtrees_should_not_break_merge(sbox):\n \"subtrees added after start of merge range are ok\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n D_COPY_path = sbox.ospath('A_COPY/D')\n nu_path = sbox.ospath('A/D/H/nu')\n nu_COPY_path = sbox.ospath('A_COPY/D/H/nu')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n\n # Create 'A/D/H/nu', commit it as r7, make a text mod to it in r8.\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Adding')})\n wc_status.add({'A/D/H/nu' : Item(status=' ', wc_rev=7)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n svntest.main.file_write(nu_path, \"New content\")\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Sending')})\n wc_status.tweak('A/D/H/nu', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Merge r7 to A_COPY/D/H, then, so it has it's own explicit mergeinfo,\n # then merge r8 to A_COPY/D/H/nu so it too has explicit mergeinfo.\n expected_output = wc.State(H_COPY_path, {\n 'nu' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(H_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(H_COPY_path, {\n })\n expected_status = wc.State(H_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'psi' : Item(status=' ', wc_rev=2),\n 'omega' : Item(status=' ', wc_rev=2),\n 'chi' : Item(status=' ', wc_rev=2),\n 'nu' : Item(status='A ', copied='+', wc_rev='-'),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:7'}),\n 'psi' : Item(\"This is the file 'psi'.\\n\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n 'nu' : Item(\"This is the file 'nu'.\\n\"),\n })\n expected_skip = wc.State(H_COPY_path, {})\n svntest.actions.run_and_verify_merge(H_COPY_path, '6', '7',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[8]],\n ['U ' + nu_COPY_path + '\\n',\n ' G ' + nu_COPY_path + '\\n']),\n [], 'merge', '-c8', '--allow-mixed-revisions',\n sbox.repo_url + '/A/D/H/nu', nu_COPY_path)\n\n # Merge -r4:6 to A_COPY, then reverse merge r6 from A_COPY/D.\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U '),\n 'D/H/omega': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'B' : Item(status=' ', wc_rev=2),\n 'mu' : Item(status=' ', wc_rev=2),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha' : Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status='M ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' ', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status=' ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' M', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status=' ', wc_rev=2),\n 'D/H/omega' : Item(status='M ', wc_rev=2),\n 'D/H/nu' : Item(status='A ', copied='+', wc_rev='-'),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5-6'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:5-7'}),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"New content\"),\n 'D/H/nu' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/nu:7-8'}),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '4', '6',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n expected_output = wc.State(D_COPY_path, {\n 'H/omega': Item(status='G '),\n })\n expected_mergeinfo_output = wc.State(D_COPY_path, {\n '' : Item(status=' G'),\n 'H' : Item(status=' G'),\n })\n expected_elision_output = wc.State(D_COPY_path, {\n })\n expected_status = wc.State(D_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'G' : Item(status=' ', wc_rev=2),\n 'G/pi' : Item(status=' ', wc_rev=2),\n 'G/rho' : Item(status=' ', wc_rev=2),\n 'G/tau' : Item(status=' ', wc_rev=2),\n 'gamma' : Item(status=' ', wc_rev=2),\n 'H' : Item(status=' M', wc_rev=2),\n 'H/chi' : Item(status=' ', wc_rev=2),\n 'H/psi' : Item(status=' ', wc_rev=2),\n 'H/omega' : Item(status=' ', wc_rev=2),\n 'H/nu' : Item(status='A ', copied='+', wc_rev='-'),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D:5'}),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:5,7'}),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'H/nu' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/nu:7-8'}),\n })\n expected_skip = wc.State(D_COPY_path, { })\n svntest.actions.run_and_verify_merge(D_COPY_path, '6', '5',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n # Now once again merge r6 to A_COPY. A_COPY already has r6 in its mergeinfo\n # so we expect only subtree merges on A_COPY/D, A_COPY_D_H, and\n # A_COPY/D/H/nu. The fact that A/D/H/nu doesn't exist at r6 should not cause\n # the merge to fail -- see\n # https://issues.apache.org/jira/browse/SVN-3067#desc7.\n expected_output = wc.State(A_COPY_path, {\n 'D/H/omega': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' G'),\n 'D' : Item(status=' G'),\n 'D/H' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n 'D' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'B' : Item(status=' ', wc_rev=2),\n 'mu' : Item(status=' ', wc_rev=2),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha' : Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status='M ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' ', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status=' ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' M', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status=' ', wc_rev=2),\n 'D/H/omega' : Item(status='M ', wc_rev=2),\n 'D/H/nu' : Item(status='A ', copied='+', wc_rev='-'),\n })\n expected_disk_1 = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5-6'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(), # Mergeinfo elides to 'A_COPY'\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:5-7'}),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"New content\"),\n 'D/H/nu' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/nu:7-8'}),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '5', '6',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk_1,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Commit this merge as r9.\n #\n # Update the wc first to make setting the expected status a bit easier.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [],\n 'up', wc_dir)\n wc_status.tweak(wc_rev=8)\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D/H' : Item(verb='Sending'),\n 'A_COPY/D/H/nu' : Item(verb='Adding'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY',\n 'A_COPY/B/E/beta',\n 'A_COPY/D/H',\n 'A_COPY/D/H/omega',\n wc_rev=9)\n wc_status.add({'A_COPY/D/H/nu' : Item(status=' ', wc_rev=9)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n # Update the WC.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [],\n 'up', wc_dir)\n wc_status.tweak(wc_rev=9)\n\n # Yet another test for issue #3067. Merge -rX:Y, where X>Y (reverse merge)\n # and the merge target has a subtree that came into existence at some rev\n # N where X < N < Y. This merge should simply delete the subtree.\n #\n # For this test merge -r9:2 to A_COPY. This should revert all the merges\n # done thus far, leaving the tree rooted at A_COPY with no explicit\n # mergeinfo.\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U '),\n 'D/H/omega': Item(status='U '),\n 'D/H/nu' : Item(status='D '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H': Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H': Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status='M ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status=' ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' M', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/psi' : Item(status=' ', wc_rev=9),\n 'D/H/omega' : Item(status='M ', wc_rev=9),\n 'D/H/nu' : Item(status='D ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '9', '2',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Revert the previous merge, then merge r4 to A_COPY/D/G/rho. Commit\n # this merge as r10.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[4]],\n ['U ' + rho_COPY_path + '\\n',\n ' G ' + rho_COPY_path + '\\n']),\n [], 'merge', '-c4', sbox.repo_url + '/A/D/G/rho', rho_COPY_path)\n expected_output = wc.State(wc_dir, {\n 'A_COPY/D/G/rho' : Item(verb='Sending'),})\n wc_status.tweak('A_COPY/D/G/rho', wc_rev=10)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n svntest.actions.run_and_verify_svn(exp_noop_up_out(10), [],\n 'up', wc_dir)\n wc_status.tweak(wc_rev=10)\n\n # Yet another test for issue #3067. Merge -rX:Y, where X>Y (reverse merge)\n # and the merge target has a subtree that doesn't exist in the merge source\n # between X and Y. This merge should no effect on that subtree.\n #\n # Specifically, merge -c4 to A_COPY. This should revert the previous merge\n # of r4 directly to A_COPY/D/G/rho. The subtree A_COPY/D/H/nu, whose merge\n # source A/D/H/nu doesn't in r4:3, shouldn't be affected nor should it break\n # the merge editor.\n expected_output = wc.State(A_COPY_path, {\n 'D/G/rho': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/G/rho' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n 'D/G/rho' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' ', wc_rev=10),\n 'B' : Item(status=' ', wc_rev=10),\n 'mu' : Item(status=' ', wc_rev=10),\n 'B/E' : Item(status=' ', wc_rev=10),\n 'B/E/alpha' : Item(status=' ', wc_rev=10),\n 'B/E/beta' : Item(status=' ', wc_rev=10),\n 'B/lambda' : Item(status=' ', wc_rev=10),\n 'B/F' : Item(status=' ', wc_rev=10),\n 'C' : Item(status=' ', wc_rev=10),\n 'D' : Item(status=' ', wc_rev=10),\n 'D/G' : Item(status=' ', wc_rev=10),\n 'D/G/pi' : Item(status=' ', wc_rev=10),\n 'D/G/rho' : Item(status='MM', wc_rev=10),\n 'D/G/tau' : Item(status=' ', wc_rev=10),\n 'D/gamma' : Item(status=' ', wc_rev=10),\n 'D/H' : Item(status=' ', wc_rev=10),\n 'D/H/chi' : Item(status=' ', wc_rev=10),\n 'D/H/psi' : Item(status=' ', wc_rev=10),\n 'D/H/omega' : Item(status=' ', wc_rev=10),\n 'D/H/nu' : Item(status=' ', wc_rev=10),\n })\n # Use expected_disk_1 from above since we should be\n # returning to that state.\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '4', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk_1,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef dont_add_mergeinfo_from_own_history(sbox):\n \"cyclic merges don't add mergeinfo from own history\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_path = sbox.ospath('A')\n A_MOVED_path = sbox.ospath('A_MOVED')\n mu_path = sbox.ospath('A/mu')\n mu_MOVED_path = sbox.ospath('A_MOVED/mu')\n A_COPY_path = sbox.ospath('A_COPY')\n mu_COPY_path = sbox.ospath('A_COPY/mu')\n\n # Merge r3 from 'A' to 'A_COPY', make a text mod to 'A_COPY/mu' and\n # commit both as r7. This results in mergeinfo of '/A:3' on 'A_COPY'.\n # Then merge r7 from 'A_COPY' to 'A'. This attempts to add the mergeinfo\n # '/A:3' to 'A', but that is self-referrential and should be filtered out,\n # leaving only the mergeinfo '/A_COPY:7' on 'A'.\n expected_output = wc.State(A_COPY_path, {\n 'D/H/psi' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_A_COPY_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'B' : Item(status=' ', wc_rev=2),\n 'mu' : Item(status=' ', wc_rev=2),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha' : Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status=' ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' ', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status=' ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' ', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status='M ', wc_rev=2),\n 'D/H/omega' : Item(status=' ', wc_rev=2),\n })\n expected_A_COPY_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_A_COPY_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_COPY_disk,\n expected_A_COPY_status,\n expected_A_COPY_skip,\n check_props=True)\n\n # Change 'A_COPY/mu'\n svntest.main.file_write(mu_COPY_path, \"New content\")\n\n # Commit r7\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n 'A_COPY/mu' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY', 'A_COPY/D/H/psi', 'A_COPY/mu', wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n # Merge r7 back to the 'A'\n expected_output = wc.State(A_path, {\n 'mu' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_path, {\n })\n expected_A_status = wc.State(A_path, {\n '' : Item(status=' M', wc_rev=1),\n 'B' : Item(status=' ', wc_rev=1),\n 'mu' : Item(status='M ', wc_rev=1),\n 'B/E' : Item(status=' ', wc_rev=1),\n 'B/E/alpha' : Item(status=' ', wc_rev=1),\n 'B/E/beta' : Item(status=' ', wc_rev=5),\n 'B/lambda' : Item(status=' ', wc_rev=1),\n 'B/F' : Item(status=' ', wc_rev=1),\n 'C' : Item(status=' ', wc_rev=1),\n 'D' : Item(status=' ', wc_rev=1),\n 'D/G' : Item(status=' ', wc_rev=1),\n 'D/G/pi' : Item(status=' ', wc_rev=1),\n 'D/G/rho' : Item(status=' ', wc_rev=4),\n 'D/G/tau' : Item(status=' ', wc_rev=1),\n 'D/gamma' : Item(status=' ', wc_rev=1),\n 'D/H' : Item(status=' ', wc_rev=1),\n 'D/H/chi' : Item(status=' ', wc_rev=1),\n 'D/H/psi' : Item(status=' ', wc_rev=3),\n 'D/H/omega' : Item(status=' ', wc_rev=6),\n })\n expected_A_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A_COPY:7'}),\n 'B' : Item(),\n 'mu' : Item(\"New content\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_A_skip = wc.State(A_path, {})\n svntest.actions.run_and_verify_merge(A_path, '6', '7',\n sbox.repo_url + '/A_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_disk,\n expected_A_status,\n expected_A_skip,\n [], True, False,\n '--allow-mixed-revisions', A_path)\n\n # Revert all local mods\n svntest.actions.run_and_verify_svn([\"Reverted '\" + A_path + \"'\\n\",\n \"Reverted '\" + mu_path + \"'\\n\"],\n [], 'revert', '-R', wc_dir)\n\n # Move 'A' to 'A_MOVED' and once again merge r7 from 'A_COPY', this time\n # to 'A_MOVED'. This attempts to add the mergeinfo '/A:3' to\n # 'A_MOVED', but 'A_MOVED@3' is 'A', so again this mergeinfo is filtered\n # out, leaving the only the mergeinfo created from the merge itself:\n # '/A_COPY:7'.\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 8.\\n'],\n [], 'move',\n sbox.repo_url + '/A',\n sbox.repo_url + '/A_MOVED',\n '-m', 'Copy A to A_MOVED')\n wc_status.remove('A', 'A/B', 'A/B/lambda', 'A/B/E', 'A/B/E/alpha',\n 'A/B/E/beta', 'A/B/F', 'A/mu', 'A/C', 'A/D', 'A/D/gamma', 'A/D/G',\n 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', 'A/D/H', 'A/D/H/chi',\n 'A/D/H/omega', 'A/D/H/psi')\n wc_status.add({\n 'A_MOVED' : Item(),\n 'A_MOVED/B' : Item(),\n 'A_MOVED/B/lambda' : Item(),\n 'A_MOVED/B/E' : Item(),\n 'A_MOVED/B/E/alpha' : Item(),\n 'A_MOVED/B/E/beta' : Item(),\n 'A_MOVED/B/F' : Item(),\n 'A_MOVED/mu' : Item(),\n 'A_MOVED/C' : Item(),\n 'A_MOVED/D' : Item(),\n 'A_MOVED/D/gamma' : Item(),\n 'A_MOVED/D/G' : Item(),\n 'A_MOVED/D/G/pi' : Item(),\n 'A_MOVED/D/G/rho' : Item(),\n 'A_MOVED/D/G/tau' : Item(),\n 'A_MOVED/D/H' : Item(),\n 'A_MOVED/D/H/chi' : Item(),\n 'A_MOVED/D/H/omega' : Item(),\n 'A_MOVED/D/H/psi' : Item(),\n })\n wc_status.tweak(wc_rev=8, status=' ')\n wc_disk.remove('A', 'A/B', 'A/B/lambda', 'A/B/E', 'A/B/E/alpha',\n 'A/B/E/beta', 'A/B/F', 'A/mu', 'A/C', 'A/D', 'A/D/gamma',\n 'A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau', 'A/D/H',\n 'A/D/H/chi', 'A/D/H/omega', 'A/D/H/psi' )\n wc_disk.add({\n 'A_MOVED' : Item(),\n 'A_MOVED/B' : Item(),\n 'A_MOVED/B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'A_MOVED/B/E' : Item(),\n 'A_MOVED/B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'A_MOVED/B/E/beta' : Item(\"New content\"),\n 'A_MOVED/B/F' : Item(),\n 'A_MOVED/mu' : Item(\"This is the file 'mu'.\\n\"),\n 'A_MOVED/C' : Item(),\n 'A_MOVED/D' : Item(),\n 'A_MOVED/D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'A_MOVED/D/G' : Item(),\n 'A_MOVED/D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'A_MOVED/D/G/rho' : Item(\"New content\"),\n 'A_MOVED/D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'A_MOVED/D/H' : Item(),\n 'A_MOVED/D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'A_MOVED/D/H/omega' : Item(\"New content\"),\n 'A_MOVED/D/H/psi' : Item(\"New content\"),\n })\n wc_disk.tweak('A_COPY/D/H/psi', 'A_COPY/mu', contents='New content')\n wc_disk.tweak('A_COPY', props={SVN_PROP_MERGEINFO : '/A:3'})\n expected_output = wc.State(wc_dir, {\n 'A' : Item(status='D '),\n 'A_MOVED' : Item(status='A '),\n 'A_MOVED/B' : Item(status='A '),\n 'A_MOVED/B/lambda' : Item(status='A '),\n 'A_MOVED/B/E' : Item(status='A '),\n 'A_MOVED/B/E/alpha' : Item(status='A '),\n 'A_MOVED/B/E/beta' : Item(status='A '),\n 'A_MOVED/B/F' : Item(status='A '),\n 'A_MOVED/mu' : Item(status='A '),\n 'A_MOVED/C' : Item(status='A '),\n 'A_MOVED/D' : Item(status='A '),\n 'A_MOVED/D/gamma' : Item(status='A '),\n 'A_MOVED/D/G' : Item(status='A '),\n 'A_MOVED/D/G/pi' : Item(status='A '),\n 'A_MOVED/D/G/rho' : Item(status='A '),\n 'A_MOVED/D/G/tau' : Item(status='A '),\n 'A_MOVED/D/H' : Item(status='A '),\n 'A_MOVED/D/H/chi' : Item(status='A '),\n 'A_MOVED/D/H/omega' : Item(status='A '),\n 'A_MOVED/D/H/psi' : Item(status='A ')\n })\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n wc_disk,\n wc_status,\n check_props=True)\n\n expected_output = wc.State(A_MOVED_path, {\n 'mu' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_MOVED_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_MOVED_path, {\n })\n expected_A_status = wc.State(A_MOVED_path, {\n '' : Item(status=' M', wc_rev=8),\n 'B' : Item(status=' ', wc_rev=8),\n 'mu' : Item(status='M ', wc_rev=8),\n 'B/E' : Item(status=' ', wc_rev=8),\n 'B/E/alpha' : Item(status=' ', wc_rev=8),\n 'B/E/beta' : Item(status=' ', wc_rev=8),\n 'B/lambda' : Item(status=' ', wc_rev=8),\n 'B/F' : Item(status=' ', wc_rev=8),\n 'C' : Item(status=' ', wc_rev=8),\n 'D' : Item(status=' ', wc_rev=8),\n 'D/G' : Item(status=' ', wc_rev=8),\n 'D/G/pi' : Item(status=' ', wc_rev=8),\n 'D/G/rho' : Item(status=' ', wc_rev=8),\n 'D/G/tau' : Item(status=' ', wc_rev=8),\n 'D/gamma' : Item(status=' ', wc_rev=8),\n 'D/H' : Item(status=' ', wc_rev=8),\n 'D/H/chi' : Item(status=' ', wc_rev=8),\n 'D/H/psi' : Item(status=' ', wc_rev=8),\n 'D/H/omega' : Item(status=' ', wc_rev=8),\n })\n # We can reuse expected_A_disk from above without change.\n svntest.actions.run_and_verify_merge(A_MOVED_path, '6', '7',\n sbox.repo_url + '/A_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_disk,\n expected_A_status,\n expected_A_skip,\n check_props=True)\n\n # Revert all local mods\n svntest.actions.run_and_verify_svn([\"Reverted '\" + A_MOVED_path + \"'\\n\",\n \"Reverted '\" + mu_MOVED_path + \"'\\n\"],\n [], 'revert', '-R', wc_dir)\n\n # Create a new 'A' unrelated to the old 'A' which was moved. Then merge\n # r7 from 'A_COPY' to this new 'A'. Since the new 'A' shares no history\n # with the mergeinfo 'A@3', the mergeinfo '/A:3' is added and when combined\n # with the mergeinfo created from the merge should result in\n # '/A:3\\n/A_COPY:7'\n #\n # Create the new 'A' by exporting the old 'A@1'.\n expected_output = svntest.verify.UnorderedOutput(\n [\"A \" + sbox.ospath('A') + \"\\n\",\n \"A \" + sbox.ospath('A/B') + \"\\n\",\n \"A \" + sbox.ospath('A/B/lambda') + \"\\n\",\n \"A \" + sbox.ospath('A/B/E') + \"\\n\",\n \"A \" + sbox.ospath('A/B/E/alpha') + \"\\n\",\n \"A \" + sbox.ospath('A/B/E/beta') + \"\\n\",\n \"A \" + sbox.ospath('A/B/F') + \"\\n\",\n \"A \" + sbox.ospath('A/mu') + \"\\n\",\n \"A \" + sbox.ospath('A/C') + \"\\n\",\n \"A \" + sbox.ospath('A/D') + \"\\n\",\n \"A \" + sbox.ospath('A/D/gamma') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G/pi') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G/rho') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G/tau') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H/chi') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H/omega') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H/psi') + \"\\n\",\n \"Exported revision 1.\\n\",]\n )\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'export', sbox.repo_url + '/A@1',\n A_path)\n expected_output = svntest.verify.UnorderedOutput(\n [\"A \" + sbox.ospath('A') + \"\\n\",\n \"A \" + sbox.ospath('A/B') + \"\\n\",\n \"A \" + sbox.ospath('A/B/lambda') + \"\\n\",\n \"A \" + sbox.ospath('A/B/E') + \"\\n\",\n \"A \" + sbox.ospath('A/B/E/alpha') + \"\\n\",\n \"A \" + sbox.ospath('A/B/E/beta') + \"\\n\",\n \"A \" + sbox.ospath('A/B/F') + \"\\n\",\n \"A \" + sbox.ospath('A/mu') + \"\\n\",\n \"A \" + sbox.ospath('A/C') + \"\\n\",\n \"A \" + sbox.ospath('A/D') + \"\\n\",\n \"A \" + sbox.ospath('A/D/gamma') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G/pi') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G/rho') + \"\\n\",\n \"A \" + sbox.ospath('A/D/G/tau') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H/chi') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H/omega') + \"\\n\",\n \"A \" + sbox.ospath('A/D/H/psi') + \"\\n\",]\n )\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'add', A_path)\n # Commit the new 'A' as r9\n expected_output = wc.State(wc_dir, {\n 'A' : Item(verb='Adding'),\n 'A/B' : Item(verb='Adding'),\n 'A/mu' : Item(verb='Adding'),\n 'A/B/E' : Item(verb='Adding'),\n 'A/B/E/alpha' : Item(verb='Adding'),\n 'A/B/E/beta' : Item(verb='Adding'),\n 'A/B/lambda' : Item(verb='Adding'),\n 'A/B/F' : Item(verb='Adding'),\n 'A/C' : Item(verb='Adding'),\n 'A/D' : Item(verb='Adding'),\n 'A/D/G' : Item(verb='Adding'),\n 'A/D/G/pi' : Item(verb='Adding'),\n 'A/D/G/rho' : Item(verb='Adding'),\n 'A/D/G/tau' : Item(verb='Adding'),\n 'A/D/gamma' : Item(verb='Adding'),\n 'A/D/H' : Item(verb='Adding'),\n 'A/D/H/chi' : Item(verb='Adding'),\n 'A/D/H/psi' : Item(verb='Adding'),\n 'A/D/H/omega' : Item(verb='Adding'),\n })\n wc_status.tweak(wc_rev=8)\n wc_status.add({\n 'A' : Item(wc_rev=9),\n 'A/B' : Item(wc_rev=9),\n 'A/B/lambda' : Item(wc_rev=9),\n 'A/B/E' : Item(wc_rev=9),\n 'A/B/E/alpha' : Item(wc_rev=9),\n 'A/B/E/beta' : Item(wc_rev=9),\n 'A/B/F' : Item(wc_rev=9),\n 'A/mu' : Item(wc_rev=9),\n 'A/C' : Item(wc_rev=9),\n 'A/D' : Item(wc_rev=9),\n 'A/D/gamma' : Item(wc_rev=9),\n 'A/D/G' : Item(wc_rev=9),\n 'A/D/G/pi' : Item(wc_rev=9),\n 'A/D/G/rho' : Item(wc_rev=9),\n 'A/D/G/tau' : Item(wc_rev=9),\n 'A/D/H' : Item(wc_rev=9),\n 'A/D/H/chi' : Item(wc_rev=9),\n 'A/D/H/omega' : Item(wc_rev=9),\n 'A/D/H/psi' : Item(wc_rev=9),\n })\n wc_status.tweak(status=' ')\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n expected_output = wc.State(A_path, {\n 'mu' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n '' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(A_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_path, {\n })\n expected_A_status = wc.State(A_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status='M ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status=' ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status=' ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' ', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/psi' : Item(status='M ', wc_rev=9),\n 'D/H/omega' : Item(status=' ', wc_rev=9),\n })\n expected_A_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3\\n/A_COPY:7'}),\n 'B' : Item(),\n 'mu' : Item(\"New content\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_A_skip = wc.State(A_path, {})\n svntest.actions.run_and_verify_merge(A_path, '6', '7',\n sbox.repo_url + '/A_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_disk,\n expected_A_status,\n expected_A_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(3094)\ndef merge_range_predates_history(sbox):\n \"merge range predates history\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n iota_path = sbox.ospath('iota')\n trunk_file_path = sbox.ospath('trunk/file')\n trunk_url = sbox.repo_url + \"/trunk\"\n branches_url = sbox.repo_url + \"/branches\"\n branch_path = sbox.ospath('branches/branch')\n branch_file_path = sbox.ospath('branches/branch/file')\n branch_url = sbox.repo_url + \"/branches/branch\"\n\n # Tweak a file and commit. (r2)\n svntest.main.file_append(iota_path, \"More data.\\n\")\n sbox.simple_commit(message='tweak iota')\n\n # Create our trunk and branches directory, and update working copy. (r3)\n svntest.main.run_svn(None, 'mkdir', trunk_url, branches_url,\n '-m', 'add trunk and branches dirs')\n svntest.main.run_svn(None, 'up', wc_dir)\n\n # Add a file to the trunk and commit. (r4)\n svntest.main.file_append(trunk_file_path, \"This is the file 'file'.\\n\")\n svntest.main.run_svn(None, 'add', trunk_file_path)\n sbox.simple_commit(message='add trunk file')\n\n # Branch trunk from r3, and update working copy. (r5)\n svntest.main.run_svn(None, 'cp', trunk_url, branch_url, '-r3',\n '-m', 'branch trunk@2')\n svntest.main.run_svn(None, 'up', wc_dir)\n\n # Now, try to merge trunk into the branch. There should be one\n # outstanding change -- the addition of the file.\n expected_output = expected_merge_output([[4,5]],\n ['A ' + branch_file_path + '\\n',\n ' U ' + branch_path + '\\n'])\n svntest.actions.run_and_verify_svn(expected_output, [], 'merge',\n trunk_url, branch_path)\n\n#----------------------------------------------------------------------\n@Issue(3623)\ndef foreign_repos(sbox):\n \"merge from a foreign repository\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make a copy of this repository and associated working copy. Both\n # should have nothing but a Greek tree in them, and the two\n # repository UUIDs should differ.\n sbox2 = sbox.clone_dependent(True)\n sbox2.build()\n wc_dir2 = sbox2.wc_dir\n\n # Convenience variables for working copy paths.\n Z_path = sbox.ospath('A/D/G/Z')\n B_path = sbox.ospath('A/B')\n Q_path = sbox.ospath('Q')\n H_path = sbox.ospath('A/D/H')\n iota_path = sbox.ospath('iota')\n beta_path = sbox.ospath('A/B/E/beta')\n alpha_path = sbox.ospath('A/B/E/alpha')\n zeta_path = sbox.ospath('A/D/G/Z/zeta')\n fred_path = sbox.ospath('A/C/fred')\n\n # Add new directories, with and without properties.\n svntest.main.run_svn(None, 'mkdir', Q_path, Z_path)\n svntest.main.run_svn(None, 'pset', 'foo', 'bar', Z_path)\n\n # Add new files, with contents, with and without properties.\n zeta_contents = \"This is the file 'zeta'.\\n\"\n fred_contents = \"This is the file 'fred'.\\n\"\n svntest.main.file_append(zeta_path, zeta_contents)\n svntest.main.file_append(fred_path, fred_contents)\n svntest.main.run_svn(None, 'add', zeta_path, fred_path)\n svntest.main.run_svn(None, 'pset', 'foo', 'bar', fred_path)\n\n # Modify existing files and directories.\n added_contents = \"This is another line of text.\\n\"\n svntest.main.file_append(iota_path, added_contents)\n svntest.main.file_append(beta_path, added_contents)\n svntest.main.run_svn(None, 'pset', 'foo', 'bar', iota_path, B_path)\n\n # Delete some stuff\n svntest.main.run_svn(None, 'delete', alpha_path, H_path)\n\n # Commit up these changes.\n expected_output = wc.State(wc_dir, {\n 'Q' : Item(verb='Adding'),\n 'A/D/G/Z' : Item(verb='Adding'),\n 'A/D/G/Z/zeta' : Item(verb='Adding'),\n 'A/C/fred' : Item(verb='Adding'),\n 'iota' : Item(verb='Sending'),\n 'A/B' : Item(verb='Sending'),\n 'A/B/E/beta' : Item(verb='Sending'),\n 'A/B/E/alpha' : Item(verb='Deleting'),\n 'A/D/H' : Item(verb='Deleting'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'Q' : Item(status=' ', wc_rev=2),\n 'A/D/G/Z' : Item(status=' ', wc_rev=2),\n 'A/D/G/Z/zeta' : Item(status=' ', wc_rev=2),\n 'A/C/fred' : Item(status=' ', wc_rev=2),\n })\n expected_status.tweak('iota', 'A/B/E/beta', 'A/B', wc_rev=2)\n expected_status.remove('A/B/E/alpha', 'A/D/H', 'A/D/H/chi',\n 'A/D/H/psi', 'A/D/H/omega')\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'Q' : Item(),\n 'A/D/G/Z' : Item(props={'foo':'bar'}),\n 'A/D/G/Z/zeta' : Item(contents=zeta_contents),\n 'A/C/fred' : Item(contents=fred_contents,props={'foo':'bar'}),\n })\n expected_disk.remove('A/B/E/alpha', 'A/D/H', 'A/D/H/chi',\n 'A/D/H/psi', 'A/D/H/omega')\n expected_disk.tweak('iota',\n contents=expected_disk.desc['iota'].contents\n + added_contents,\n props={'foo':'bar'})\n expected_disk.tweak('A/B', props={'foo':'bar'})\n expected_disk.tweak('A/B/E/beta',\n contents=expected_disk.desc['A/B/E/beta'].contents\n + added_contents)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n svntest.actions.verify_disk(wc_dir, expected_disk, True)\n\n # Now, merge our committed revision into a working copy of another\n # repository. Not only should the merge succeed, but the results on\n # disk should match those in our first working copy.\n\n ### TODO: Use run_and_verify_merge() ###\n svntest.main.run_svn(None, 'merge', '-c2', sbox.repo_url, wc_dir2)\n sbox2.simple_commit(message='Merge from foreign repo')\n svntest.actions.verify_disk(wc_dir2, expected_disk, True)\n\n # Now, let's make a third checkout -- our second from the original\n # repository -- and make sure that all the data there is correct.\n # It should look just like the original EXPECTED_DISK.\n # This is a regression test for issue #3623 in which wc_dir2 had the\n # correct state but the committed state was wrong.\n wc_dir3 = sbox.add_wc_path('wc3')\n svntest.actions.run_and_verify_svn(None, [], 'checkout',\n sbox2.repo_url, wc_dir3)\n svntest.actions.verify_disk(wc_dir3, expected_disk, True)\n\n#----------------------------------------------------------------------\ndef foreign_repos_uuid(sbox):\n \"verify uuid of items added via foreign repo merge\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_uuid = svntest.actions.get_wc_uuid(wc_dir)\n\n # Make a copy of this repository and associated working copy. Both\n # should have nothing but a Greek tree in them, and the two\n # repository UUIDs should differ.\n sbox2 = sbox.clone_dependent(True)\n sbox2.build()\n wc_dir2 = sbox2.wc_dir\n wc2_uuid = svntest.actions.get_wc_uuid(wc_dir2)\n\n # Convenience variables for working copy paths.\n zeta_path = sbox.ospath('A/D/G/zeta')\n Z_path = sbox.ospath('A/Z')\n\n # Add new file and directory.\n zeta_contents = \"This is the file 'zeta'.\\n\"\n svntest.main.file_append(zeta_path, zeta_contents)\n os.mkdir(Z_path)\n svntest.main.run_svn(None, 'add', zeta_path, Z_path)\n\n # Commit up these changes.\n expected_output = wc.State(wc_dir, {\n 'A/D/G/zeta' : Item(verb='Adding'),\n 'A/Z' : Item(verb='Adding'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/D/G/zeta' : Item(status=' ', wc_rev=2),\n 'A/Z' : Item(status=' ', wc_rev=2),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'A/D/G/zeta' : Item(contents=zeta_contents),\n 'A/Z' : Item(),\n })\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n svntest.actions.verify_disk(wc_dir, expected_disk, True)\n\n svntest.main.run_svn(None, 'merge', '-c2', sbox.repo_url, wc_dir2)\n sbox2.simple_commit(message='Merge from foreign repos')\n\n # Run info to check the copied rev to make sure it's right\n zeta2_path = os.path.join(wc_dir2, 'A', 'D', 'G', 'zeta')\n expected_info = {\"Path\" : re.escape(zeta2_path), # escape backslashes\n \"URL\" : sbox2.repo_url + \"/A/D/G/zeta\",\n \"Repository Root\" : sbox2.repo_url,\n \"Repository UUID\" : wc2_uuid,\n \"Revision\" : \"2\",\n \"Node Kind\" : \"file\",\n \"Schedule\" : \"normal\",\n }\n svntest.actions.run_and_verify_info([expected_info], zeta2_path)\n\n # Run info to check the copied rev to make sure it's right\n Z2_path = os.path.join(wc_dir2, 'A', 'Z')\n expected_info = {\"Path\" : re.escape(Z2_path), # escape backslashes\n \"URL\" : sbox2.repo_url + \"/A/Z\",\n \"Repository Root\" : sbox2.repo_url,\n \"Repository UUID\" : wc2_uuid,\n \"Revision\" : \"2\",\n \"Node Kind\" : \"directory\",\n \"Schedule\" : \"normal\",\n }\n svntest.actions.run_and_verify_info([expected_info], Z2_path)\n\n#----------------------------------------------------------------------\ndef foreign_repos_2_url(sbox):\n \"2-url merge from a foreign repository\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make a copy of this repository and associated working copy. Both\n # should have nothing but a Greek tree in them, and the two\n # repository UUIDs should differ.\n sbox2 = sbox.clone_dependent(True)\n sbox2.build()\n wc_dir2 = sbox2.wc_dir\n\n # Convenience variables for working copy paths.\n Z_path = sbox.ospath('A/D/G/Z')\n Q_path = sbox.ospath('A/Q')\n H_path = sbox.ospath('A/D/H')\n beta_path = sbox.ospath('A/B/E/beta')\n alpha_path = sbox.ospath('A/B/E/alpha')\n zeta_path = sbox.ospath('A/D/G/Z/zeta')\n fred_path = sbox.ospath('A/C/fred')\n\n # First, \"tag\" the current state of the repository.\n svntest.main.run_svn(None, 'copy', sbox.repo_url + '/A',\n sbox.repo_url + '/A-tag1', '-m', 'tag1')\n\n # Add new directories\n svntest.main.run_svn(None, 'mkdir', Q_path, Z_path)\n\n # Add new files\n zeta_contents = \"This is the file 'zeta'.\\n\"\n fred_contents = \"This is the file 'fred'.\\n\"\n svntest.main.file_append(zeta_path, zeta_contents)\n svntest.main.file_append(fred_path, fred_contents)\n svntest.main.run_svn(None, 'add', zeta_path, fred_path)\n\n # Modify existing files\n added_contents = \"This is another line of text.\\n\"\n svntest.main.file_append(beta_path, added_contents)\n\n # Delete some stuff\n svntest.main.run_svn(None, 'delete', alpha_path, H_path)\n\n # Commit up these changes.\n expected_output = wc.State(wc_dir, {\n 'A/Q' : Item(verb='Adding'),\n 'A/D/G/Z' : Item(verb='Adding'),\n 'A/D/G/Z/zeta' : Item(verb='Adding'),\n 'A/C/fred' : Item(verb='Adding'),\n 'A/B/E/beta' : Item(verb='Sending'),\n 'A/B/E/alpha' : Item(verb='Deleting'),\n 'A/D/H' : Item(verb='Deleting'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/Q' : Item(status=' ', wc_rev=3),\n 'A/D/G/Z' : Item(status=' ', wc_rev=3),\n 'A/D/G/Z/zeta' : Item(status=' ', wc_rev=3),\n 'A/C/fred' : Item(status=' ', wc_rev=3),\n })\n expected_status.tweak('A/B/E/beta', wc_rev=3)\n expected_status.remove('A/B/E/alpha', 'A/D/H', 'A/D/H/chi',\n 'A/D/H/psi', 'A/D/H/omega')\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'A/Q' : Item(),\n 'A/D/G/Z' : Item(),\n 'A/D/G/Z/zeta' : Item(contents=zeta_contents),\n 'A/C/fred' : Item(contents=fred_contents),\n })\n expected_disk.remove('A/B/E/alpha', 'A/D/H', 'A/D/H/chi',\n 'A/D/H/psi', 'A/D/H/omega')\n expected_disk.tweak('A/B/E/beta',\n contents=expected_disk.desc['A/B/E/beta'].contents\n + added_contents)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n svntest.actions.verify_disk(wc_dir, expected_disk, True)\n\n # Now, \"tag\" the new state of the repository.\n svntest.main.run_svn(None, 'copy', sbox.repo_url + '/A',\n sbox.repo_url + '/A-tag2', '-m', 'tag2')\n\n # Now, merge across our \"tags\" (copies of /A) into the /A of a\n # working copy of another repository. Not only should the merge\n # succeed, but the results on disk should match those in our first\n # working copy.\n\n ### TODO: Use run_and_verify_merge() ###\n svntest.main.run_svn(None, 'merge', sbox.repo_url + '/A-tag1',\n sbox.repo_url + '/A-tag2',\n os.path.join(wc_dir2, 'A'))\n sbox2.simple_commit(message='Merge from foreign repos')\n svntest.actions.verify_disk(wc_dir2, expected_disk, True)\n\n#----------------------------------------------------------------------\n@Issue(1962)\ndef merge_added_subtree(sbox):\n \"merge added subtree\"\n\n # The result of a subtree added by copying\n # or merging an added subtree, should be the same on disk\n ### with the exception of mergeinfo?!\n\n # test for issue 1962\n sbox.build()\n wc_dir = sbox.wc_dir\n url = sbox.repo_url\n\n # make a branch of A\n # svn cp A A_COPY\n A_url = url + \"/A\"\n A_COPY_url = url + \"/A_COPY\"\n A_path = sbox.ospath('A')\n\n svntest.actions.run_and_verify_svn([\"Committing transaction...\\n\",\n \"Committed revision 2.\\n\"], [],\n \"cp\", \"-m\", \"\", A_url, A_COPY_url)\n svntest.actions.run_and_verify_svn([\"Committing transaction...\\n\",\n \"Committed revision 3.\\n\"], [],\n \"cp\", \"-m\", \"\",\n A_COPY_url + '/D',\n A_COPY_url + '/D2')\n expected_output = wc.State(A_path, {\n 'D2' : Item(status='A '),\n 'D2/gamma' : Item(status='A '),\n 'D2/H' : Item(status='A '),\n 'D2/H/chi' : Item(status='A '),\n 'D2/H/psi' : Item(status='A '),\n 'D2/H/omega': Item(status='A '),\n 'D2/G' : Item(status='A '),\n 'D2/G/pi' : Item(status='A '),\n 'D2/G/rho' : Item(status='A '),\n 'D2/G/tau' : Item(status='A ')\n })\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/D2' : Item(status='A ', copied='+', wc_rev='-'),\n 'A/D2/gamma' : Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/H' : Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/H/chi' : Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/H/psi' : Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/H/omega': Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/G' : Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/G/pi' : Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/G/rho' : Item(status=' ', copied='+', wc_rev='-'),\n 'A/D2/G/tau' : Item(status=' ', copied='+', wc_rev='-')\n })\n expected_status.remove('', 'iota')\n\n expected_skip = wc.State('', {})\n expected_disk = svntest.main.greek_state.subtree(\"A\")\n dest_name = ''\n expected_disk.add({\n dest_name + 'D2' : Item(),\n dest_name + 'D2/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n dest_name + 'D2/G' : Item(),\n dest_name + 'D2/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n dest_name + 'D2/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n dest_name + 'D2/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n dest_name + 'D2/H' : Item(),\n dest_name + 'D2/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n dest_name + 'D2/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n dest_name + 'D2/H/psi' : Item(\"This is the file 'psi'.\\n\")\n })\n\n # Using the above information, verify a REPO->WC copy\n svntest.actions.run_and_verify_svn(None, [],\n \"cp\", A_COPY_url + '/D2',\n os.path.join(A_path, \"D2\"))\n svntest.actions.verify_disk(A_path, expected_disk)\n svntest.actions.run_and_verify_status(A_path, expected_status)\n\n # Remove the copy artifacts\n svntest.actions.run_and_verify_svn(None, [],\n \"revert\", \"-R\", A_path)\n svntest.main.safe_rmtree(os.path.join(A_path, \"D2\"))\n\n # Add merge-tracking differences between copying and merging\n # Verify a merge using the otherwise unchanged disk and status trees\n expected_status.tweak('A',status=' M')\n expected_mergeinfo_output = wc.State(A_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_path, {\n })\n svntest.actions.run_and_verify_merge(A_path, 2, 3, A_COPY_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip)\n\n#----------------------------------------------------------------------\n# Issue #3138\n@SkipUnless(server_has_mergeinfo)\n@Issue(3138)\ndef merge_unknown_url(sbox):\n \"merging an unknown url should return error\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # remove a path from the repo and commit.\n iota_path = sbox.ospath('iota')\n svntest.actions.run_and_verify_svn(None, [], 'rm', iota_path)\n svntest.actions.run_and_verify_svn(None, [],\n \"ci\", wc_dir, \"-m\", \"log message\")\n\n\n url = sbox.repo_url + \"/iota\"\n expected_err = \".*File not found.*iota.*|.*iota.*path not found.*\"\n svntest.actions.run_and_verify_svn(None, expected_err,\n \"merge\", url, wc_dir)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef reverse_merge_away_all_mergeinfo(sbox):\n \"merges that remove all mergeinfo work\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_H_path = sbox.ospath('A_COPY/D/H')\n\n # Merge r4:8 from A/D/H into A_COPY/D/H.\n expected_output = wc.State(A_COPY_H_path, {\n 'omega' : Item(status='U '),\n 'psi' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_H_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_H_path, {\n })\n expected_status = wc.State(A_COPY_H_path, {\n '' : Item(status=' M', wc_rev=2),\n 'psi' : Item(status='M ', wc_rev=2),\n 'omega' : Item(status='M ', wc_rev=2),\n 'chi' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:3-6'}),\n 'psi' : Item(\"New content\"),\n 'omega' : Item(\"New content\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_H_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_H_path, '2', '6',\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n # Commit the merge as r7\n expected_output = wc.State(wc_dir, {\n 'A_COPY/D/H' : Item(verb='Sending'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY/D/H', 'A_COPY/D/H/omega', 'A_COPY/D/H/psi',\n wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n # Now reverse merge r7 from itself, all mergeinfo should be removed.\n expected_output = wc.State(A_COPY_H_path, {\n '' : Item(status=' U'),\n 'omega' : Item(status='U '),\n 'psi' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(A_COPY_H_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_H_path, {\n '' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_H_path, {\n '' : Item(status=' M', wc_rev=7),\n 'psi' : Item(status='M ', wc_rev=7),\n 'omega' : Item(status='M ', wc_rev=7),\n 'chi' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n 'psi' : Item(\"This is the file 'psi'.\\n\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_H_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_H_path, '7', '6',\n sbox.repo_url + '/A_COPY/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [],\n True, False, '--allow-mixed-revisions',\n A_COPY_H_path)\n\n#----------------------------------------------------------------------\n# Issue #3138\n# Another test for issue #3067: 'subtrees with intersecting mergeinfo,\n# that don't exist at the start of a merge range shouldn't break the\n# merge'. Specifically see\n# https://issues.apache.org/jira/browse/SVN-3067#desc5\n@SkipUnless(server_has_mergeinfo)\n@Issues(3138,3067,4217)\ndef dont_merge_revs_into_subtree_that_predate_it(sbox):\n \"dont merge revs into a subtree that predate it\"\n\n # +-> merge -c7 A/D/H/nu@7 H_COPY/nu\n # | +-> merge -c2 A/D/H H_COPY\n # | | +-> merge A/D/H H_COPY\n # | | |\n # A/D/H A----------------------\n # +-psi +-M-------------M------\n # +-nu A-D C---M-D\n # H_COPY C---------G-G\n # +-psi +---------+-.\n # +-nu +-------G---.\n # 1 2 3 4 5 6 7 8 9 w w w\n\n # Create our good 'ole greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n psi_path = sbox.ospath('A/D/H/psi')\n nu_path = sbox.ospath('A/D/H/nu')\n H_COPY_path = sbox.ospath('H_COPY')\n nu_COPY_path = sbox.ospath('H_COPY/nu')\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_disk = svntest.main.greek_state.copy()\n\n # Make a text mod to 'A/D/H/psi' and commit it as r2\n svntest.main.file_write(psi_path, \"New content\")\n expected_output = wc.State(wc_dir, {'A/D/H/psi' : Item(verb='Sending')})\n expected_status.tweak('A/D/H/psi', wc_rev=2)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n expected_disk.tweak('A/D/H/psi', contents=\"New content\")\n\n # Create 'A/D/H/nu' and commit it as r3.\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Adding')})\n expected_status.add({'A/D/H/nu' : Item(status=' ', wc_rev=3)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Delete 'A/D/H/nu' and commit it as r4.\n svntest.actions.run_and_verify_svn(None, [], 'rm', nu_path)\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Deleting')})\n expected_status.remove('A/D/H/nu')\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Copy 'A/D/H/nu' from r3 and commit it as r5.\n svntest.actions.run_and_verify_svn(None, [], 'cp',\n sbox.repo_url + '/A/D/H/nu@3', nu_path)\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Adding')})\n expected_status.add({'A/D/H/nu' : Item(status=' ', wc_rev=5)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Copy 'A/D/H' to 'H_COPY' in r6.\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 6.\\n'],\n [], 'copy',\n sbox.repo_url + \"/A/D/H\",\n sbox.repo_url + \"/H_COPY\",\n \"-m\", \"Copy A/D/H to H_COPY\")\n expected_status.add({\n \"H_COPY\" : Item(),\n \"H_COPY/chi\" : Item(),\n \"H_COPY/omega\" : Item(),\n \"H_COPY/psi\" : Item(),\n \"H_COPY/nu\" : Item()})\n\n # Update to pull the previous copy into the WC\n svntest.main.run_svn(None, 'up', wc_dir)\n expected_status.tweak(status=' ', wc_rev=6)\n\n # Make a text mod to 'A/D/H/nu' and commit it as r7.\n svntest.main.file_write(nu_path, \"New content\")\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Sending')})\n expected_status.tweak('A/D/H/nu', wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Remove A/D/H/nu and commit it as r8.\n # We do this deletion so that following cherry harvest has a *tough*\n # time to identify the line of history of /A/D/H/nu@HEAD.\n svntest.main.run_svn(None, 'rm', nu_path)\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Deleting')})\n expected_status.remove('A/D/H/nu')\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Make another text mod to 'A/D/H/psi' that can be merged to 'H_COPY'\n # during a cherry harvest and commit it as r9.\n svntest.main.file_write(psi_path, \"Even *newer* content\")\n expected_output = wc.State(wc_dir, {'A/D/H/psi' : Item(verb='Sending')})\n expected_status.tweak('A/D/H/psi', wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n expected_disk.tweak('A/D/H/psi', contents=\"Even *newer* content\")\n\n # Update WC so elision occurs smoothly.\n svntest.main.run_svn(None, 'up', wc_dir)\n expected_status.tweak(status=' ', wc_rev=9)\n\n # Merge r7 from 'A/D/H/nu' to 'H_COPY/nu'.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[7]],\n ['U ' + nu_COPY_path + '\\n',\n ' U ' + nu_COPY_path + '\\n']),\n [], 'merge', '-c7', sbox.repo_url + '/A/D/H/nu@7', nu_COPY_path)\n\n # Cherry harvest all eligible revisions from 'A/D/H' to 'H_COPY'.\n #\n # This is where we see the problem described in\n # https://issues.apache.org/jira/browse/SVN-3067#desc5.\n #\n # Use run_and_verify_svn() because run_and_verify_merge*() require\n # explicit revision ranges.\n\n expected_skip = wc.State(H_COPY_path, { })\n #Cherry pick r2 prior to cherry harvest.\n svntest.actions.run_and_verify_svn([], [], 'merge', '-c2',\n sbox.repo_url + '/A/D/H',\n H_COPY_path)\n\n # H_COPY needs r6-9 applied while H_COPY/nu needs only 6,8-9.\n svntest.actions.run_and_verify_svn(\n expected_merge_output(\n [[7,9], # Merge notification\n [6,9]], # Mergeinfo notification\n ['U ' + os.path.join(H_COPY_path, \"psi\") + '\\n',\n 'D ' + os.path.join(H_COPY_path, \"nu\") + '\\n',\n ' U ' + H_COPY_path + '\\n',]),\n [], 'merge', sbox.repo_url + '/A/D/H', H_COPY_path, '--force')\n\n # Check the status after the merge.\n expected_status.tweak('H_COPY', status=' M')\n expected_status.tweak('H_COPY/psi', status='M ')\n expected_status.tweak('H_COPY/nu', status='D ')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n check_mergeinfo_recursively(wc_dir,\n { H_COPY_path: '/A/D/H:6-9' })\n\n#----------------------------------------------------------------------\n# Helper for merge_chokes_on_renamed_subtrees and\n# subtrees_with_empty_mergeinfo.\ndef set_up_renamed_subtree(sbox):\n '''Starting with standard greek tree, make a text mod to A/D/H/psi\n as r2. Tweak A/D/H/omega and commit it at r3(We do this to create\n broken segment of history of A/D/H.\n *DO NOT SVN UPDATE*.\n Move A/D/H/psi to A/D/H/psi_moved as r4. Copy A/D/H to H_COPY\n as r5. Make a text mod to A/D/H/psi_moved and commit it at r6.\n Update the working copy and return the expected disk and status\n representing it'''\n\n # Create our good 'ole greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n psi_path = sbox.ospath('A/D/H/psi')\n omega_path = sbox.ospath('A/D/H/omega')\n psi_moved_path = sbox.ospath('A/D/H/psi_moved')\n psi_COPY_moved_path = sbox.ospath('H_COPY/psi_moved')\n H_COPY_path = sbox.ospath('H_COPY')\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_disk = svntest.main.greek_state.copy()\n\n # Make a text mod to 'A/D/H/psi' and commit it as r2\n svntest.main.file_write(psi_path, \"New content\")\n expected_output = wc.State(wc_dir, {'A/D/H/psi' : Item(verb='Sending')})\n expected_status.tweak('A/D/H/psi', wc_rev=2)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n expected_disk.tweak('A/D/H/psi', contents=\"New content\")\n\n # Make a text mod to 'A/D/H/omega' and commit it as r3\n svntest.main.file_write(omega_path, \"New omega\")\n expected_output = wc.State(wc_dir, {'A/D/H/omega' : Item(verb='Sending')})\n expected_status.tweak('A/D/H/omega', wc_rev=3)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n expected_disk.tweak('A/D/H/omega', contents=\"New omega\")\n\n # Move 'A/D/H/psi' to 'A/D/H/psi_moved' and commit it as r4.\n svntest.actions.run_and_verify_svn(None, [], 'move',\n psi_path, psi_moved_path)\n expected_output = wc.State(wc_dir, {\n 'A/D/H/psi' : Item(verb='Deleting'),\n 'A/D/H/psi_moved' : Item(verb='Adding')\n })\n expected_status.add({'A/D/H/psi_moved' : Item(status=' ', wc_rev=4)})\n expected_status.remove('A/D/H/psi')\n\n # Replicate old WC-to-WC move behavior where empty mergeinfo was set on\n # the move destination. Pre 1.6 repositories might have mergeinfo like\n # this so we still want to test that the issue #3067 fixes tested by\n # merge_chokes_on_renamed_subtrees and subtrees_with_empty_mergeinfo\n # still work.\n svntest.actions.run_and_verify_svn(None, [], 'ps', SVN_PROP_MERGEINFO,\n \"\", psi_moved_path)\n\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Copy 'A/D/H' to 'H_COPY' in r5.\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 5.\\n'],\n [], 'copy',\n sbox.repo_url + \"/A/D/H\",\n sbox.repo_url + \"/H_COPY\",\n \"-m\", \"Copy A/D/H to H_COPY\")\n expected_status.add({\n \"H_COPY\" : Item(),\n \"H_COPY/chi\" : Item(),\n \"H_COPY/omega\" : Item(),\n \"H_COPY/psi_moved\" : Item()})\n\n # Update to pull the previous copy into the WC\n svntest.main.run_svn(None, 'up', wc_dir)\n expected_status.tweak(status=' ', wc_rev=5)\n\n # Make a text mod to 'A/D/H/psi_moved' and commit it as r6\n svntest.main.file_write(psi_moved_path, \"Even *Newer* content\")\n expected_output = wc.State(wc_dir,\n {'A/D/H/psi_moved' : Item(verb='Sending')})\n expected_status.tweak('A/D/H/psi_moved', wc_rev=6)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n expected_disk.remove('A/D/H/psi')\n expected_disk.add({\n 'A/D/H/psi_moved' : Item(\"Even *Newer* content\"),\n })\n\n # Update for a uniform working copy before merging.\n svntest.main.run_svn(None, 'up', wc_dir)\n expected_status.tweak(status=' ', wc_rev=6)\n\n return wc_dir, expected_disk, expected_status\n\n#----------------------------------------------------------------------\n# Test for issue #3174: 'Merge algorithm chokes on subtrees needing\n# special attention that have been renamed'\n@SkipUnless(server_has_mergeinfo)\n@Issue(3174)\ndef merge_chokes_on_renamed_subtrees(sbox):\n \"merge fails with renamed subtrees with mergeinfo\"\n\n # Use helper to setup a renamed subtree.\n wc_dir, expected_disk, expected_status = set_up_renamed_subtree(sbox)\n\n # Some paths we'll care about\n psi_COPY_moved_path = sbox.ospath('H_COPY/psi_moved')\n\n\n # Cherry harvest all available revsions from 'A/D/H/psi_moved' to\n # 'H_COPY/psi_moved'.\n #\n # Here is where issue #3174 appears, the merge fails with:\n # svn: svn: File not found: revision 3, path '/A/D/H/psi'\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5,6],[3,6]],\n ['U ' + psi_COPY_moved_path + '\\n',\n ' U ' + psi_COPY_moved_path + '\\n',\n ' G ' + psi_COPY_moved_path + '\\n',],\n elides=True),\n [], 'merge', sbox.repo_url + '/A/D/H/psi_moved',\n psi_COPY_moved_path)\n\n expected_status.tweak('H_COPY/psi_moved', status='MM')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n\n#----------------------------------------------------------------------\n# Issue #3157\n@SkipUnless(server_has_mergeinfo)\n@Issue(3157)\ndef dont_explicitly_record_implicit_mergeinfo(sbox):\n \"don't explicitly record implicit mergeinfo\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n A_copy_path = sbox.ospath('A_copy')\n A_copy2_path = sbox.ospath('A_copy2')\n A_copy_mu_path = sbox.ospath('A_copy/mu')\n A_copy2_mu_path = sbox.ospath('A_copy2/mu')\n nu_path = sbox.ospath('A/D/H/nu')\n nu_copy_path = sbox.ospath('A_copy/D/H/nu')\n\n def _commit_and_update(rev, action):\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'r%d - %s' % (rev, action),\n sbox.wc_dir)\n svntest.main.run_svn(None, 'up', wc_dir)\n\n # r2 - copy A to A_copy\n svntest.main.run_svn(None, 'cp', A_path, A_copy_path)\n _commit_and_update(2, \"Copy A to A_copy.\")\n\n # r3 - tweak A_copy/mu\n svntest.main.file_append(A_copy_mu_path, \"r3\\n\")\n _commit_and_update(3, \"Edit A_copy/mu.\")\n\n # r4 - copy A_copy to A_copy2\n svntest.main.run_svn(None, 'cp', A_copy_path, A_copy2_path)\n _commit_and_update(4, \"Copy A_copy to A_copy2.\")\n\n # r5 - tweak A_copy2/mu\n svntest.main.file_append(A_copy2_mu_path, \"r5\\n\")\n _commit_and_update(5, \"Edit A_copy2/mu.\")\n\n # Merge r5 from A_copy2/mu to A_copy/mu.\n #\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn. Check the resulting mergeinfo with\n # a propget.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]], ['U ' + A_copy_mu_path + '\\n',\n ' U ' + A_copy_mu_path + '\\n']),\n [], 'merge', '-c5', sbox.repo_url + '/A_copy2/mu', A_copy_mu_path)\n check_mergeinfo_recursively(A_copy_mu_path,\n { A_copy_mu_path: '/A_copy2/mu:5' })\n\n # Now, merge A_copy2 (in full) back to A_copy. This should result in\n # mergeinfo of '/A_copy2:4-5' on A_copy and '/A_copy2/mu:4-5' on A_copy/mu\n # and the latter should elide to the former. Any revisions < 4 are part of\n # A_copy's natural history and should not be explicitly recorded.\n expected_output = wc.State(A_copy_path, {})\n expected_mergeinfo_output = wc.State(A_copy_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_copy_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A_copy2:4-5'}),\n 'mu' : Item(\"This is the file 'mu'.\\nr3\\nr5\\n\",\n props={SVN_PROP_MERGEINFO : '/A_copy2/mu:5'}),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_status = wc.State(A_copy_path, {\n '' : Item(status=' M'),\n 'mu' : Item(status='MM'),\n 'B' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=5)\n expected_skip = wc.State(A_copy_path, { })\n svntest.actions.run_and_verify_merge(A_copy_path, None, None,\n sbox.repo_url + '/A_copy2', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n # Revert the previous merges and try a cherry harvest merge where\n # the subtree's natural history is a proper subset of the merge.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n wc_status = svntest.actions.get_virginal_state(wc_dir, 5)\n wc_status.add({\n 'A_copy' : Item(),\n 'A_copy/B' : Item(),\n 'A_copy/B/lambda' : Item(),\n 'A_copy/B/E' : Item(),\n 'A_copy/B/E/alpha' : Item(),\n 'A_copy/B/E/beta' : Item(),\n 'A_copy/B/F' : Item(),\n 'A_copy/mu' : Item(),\n 'A_copy/C' : Item(),\n 'A_copy/D' : Item(),\n 'A_copy/D/gamma' : Item(),\n 'A_copy/D/G' : Item(),\n 'A_copy/D/G/pi' : Item(),\n 'A_copy/D/G/rho' : Item(),\n 'A_copy/D/G/tau' : Item(),\n 'A_copy/D/H' : Item(),\n 'A_copy/D/H/chi' : Item(),\n 'A_copy/D/H/omega' : Item(),\n 'A_copy/D/H/psi' : Item(),\n 'A_copy2' : Item(),\n 'A_copy2/B' : Item(),\n 'A_copy2/B/lambda' : Item(),\n 'A_copy2/B/E' : Item(),\n 'A_copy2/B/E/alpha' : Item(),\n 'A_copy2/B/E/beta' : Item(),\n 'A_copy2/B/F' : Item(),\n 'A_copy2/mu' : Item(),\n 'A_copy2/C' : Item(),\n 'A_copy2/D' : Item(),\n 'A_copy2/D/gamma' : Item(),\n 'A_copy2/D/G' : Item(),\n 'A_copy2/D/G/pi' : Item(),\n 'A_copy2/D/G/rho' : Item(),\n 'A_copy2/D/G/tau' : Item(),\n 'A_copy2/D/H' : Item(),\n 'A_copy2/D/H/chi' : Item(),\n 'A_copy2/D/H/omega' : Item(),\n 'A_copy2/D/H/psi' : Item(),\n })\n wc_status.tweak(status=' ', wc_rev=5)\n\n # r6 - Add the file 'A/D/H/nu'.\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Adding')})\n wc_status.add({'A/D/H/nu' : Item(status=' ', wc_rev=6)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r7 - Make a change to 'A/D/H/nu'.\n svntest.main.file_write(nu_path, \"Nu content\")\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Sending')})\n wc_status.tweak('A/D/H/nu', wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r8 - Merge r6 to 'A_copy'.\n expected_output = wc.State(A_copy_path, {\n 'D/H/nu' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_copy_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_copy_path, {\n })\n expected_A_copy_status = wc.State(A_copy_path, {\n '' : Item(status=' M', wc_rev=5),\n 'B' : Item(status=' ', wc_rev=5),\n 'mu' : Item(status=' ', wc_rev=5),\n 'B/E' : Item(status=' ', wc_rev=5),\n 'B/E/alpha' : Item(status=' ', wc_rev=5),\n 'B/E/beta' : Item(status=' ', wc_rev=5),\n 'B/lambda' : Item(status=' ', wc_rev=5),\n 'B/F' : Item(status=' ', wc_rev=5),\n 'C' : Item(status=' ', wc_rev=5),\n 'D' : Item(status=' ', wc_rev=5),\n 'D/G' : Item(status=' ', wc_rev=5),\n 'D/G/pi' : Item(status=' ', wc_rev=5),\n 'D/G/rho' : Item(status=' ', wc_rev=5),\n 'D/G/tau' : Item(status=' ', wc_rev=5),\n 'D/gamma' : Item(status=' ', wc_rev=5),\n 'D/H' : Item(status=' ', wc_rev=5),\n 'D/H/chi' : Item(status=' ', wc_rev=5),\n 'D/H/psi' : Item(status=' ', wc_rev=5),\n 'D/H/omega' : Item(status=' ', wc_rev=5),\n 'D/H/nu' : Item(status='A ', wc_rev='-', copied='+'),\n })\n expected_A_copy_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:6'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\nr3\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'D/H/nu' : Item(\"This is the file 'nu'.\\n\"),\n })\n expected_A_copy_skip = wc.State(A_copy_path, {})\n svntest.actions.run_and_verify_merge(A_copy_path, '5', '6',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_copy_disk,\n expected_A_copy_status,\n expected_A_copy_skip,\n check_props=True)\n wc_status.add({'A_copy/D/H/nu' : Item(status=' ', wc_rev=8)})\n wc_status.tweak('A_copy', wc_rev=8)\n expected_output = wc.State(wc_dir, {\n 'A_copy/D/H/nu' : Item(verb='Adding'),\n 'A_copy' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r9 - Merge r7 to 'A_copy/D/H/nu'.\n expected_skip = wc.State(nu_copy_path, { })\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[7]],\n ['U ' + nu_copy_path + '\\n',\n ' G ' + nu_copy_path + '\\n',]),\n [], 'merge', '-c7', sbox.repo_url + '/A/D/H/nu', nu_copy_path)\n expected_output = wc.State(wc_dir, {'A_copy/D/H/nu' : Item(verb='Sending')})\n wc_status.tweak('A_copy/D/H/nu', wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Update WC\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n wc_status.tweak(wc_rev=9)\n\n # r10 - Make another change to 'A/D/H/nu'.\n svntest.main.file_write(nu_path, \"Even nuer content\")\n expected_output = wc.State(wc_dir, {'A/D/H/nu' : Item(verb='Sending')})\n wc_status.tweak('A/D/H/nu', wc_rev=10)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Update WC\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n wc_status.tweak(wc_rev=10)\n\n # Now do a cherry harvest merge to 'A_copy'.\n expected_output = wc.State(A_copy_path, {\n 'D/H/nu' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_copy_path, {\n '' : Item(status=' U'),\n 'D/H/nu' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_copy_path, {\n })\n expected_A_copy_status = wc.State(A_copy_path, {\n '' : Item(status=' M', wc_rev=10),\n 'B' : Item(status=' ', wc_rev=10),\n 'mu' : Item(status=' ', wc_rev=10),\n 'B/E' : Item(status=' ', wc_rev=10),\n 'B/E/alpha' : Item(status=' ', wc_rev=10),\n 'B/E/beta' : Item(status=' ', wc_rev=10),\n 'B/lambda' : Item(status=' ', wc_rev=10),\n 'B/F' : Item(status=' ', wc_rev=10),\n 'C' : Item(status=' ', wc_rev=10),\n 'D' : Item(status=' ', wc_rev=10),\n 'D/G' : Item(status=' ', wc_rev=10),\n 'D/G/pi' : Item(status=' ', wc_rev=10),\n 'D/G/rho' : Item(status=' ', wc_rev=10),\n 'D/G/tau' : Item(status=' ', wc_rev=10),\n 'D/gamma' : Item(status=' ', wc_rev=10),\n 'D/H' : Item(status=' ', wc_rev=10),\n 'D/H/chi' : Item(status=' ', wc_rev=10),\n 'D/H/psi' : Item(status=' ', wc_rev=10),\n 'D/H/omega' : Item(status=' ', wc_rev=10),\n 'D/H/nu' : Item(status='MM', wc_rev=10),\n })\n expected_A_copy_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-10'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\nr3\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'D/H/nu' : Item(\"Even nuer content\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/nu:6-10'}),\n })\n expected_A_copy_skip = wc.State(A_copy_path, {})\n svntest.actions.run_and_verify_merge(A_copy_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_copy_disk,\n expected_A_copy_status,\n expected_A_copy_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for issue where merging a change to a broken link fails\n@SkipUnless(svntest.main.is_posix_os)\ndef merge_broken_link(sbox):\n \"merge with broken symlinks in target\"\n\n # Create our good 'ole greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n src_path = sbox.ospath('A/B/E')\n copy_path = sbox.ospath('A/B/E_COPY')\n link_path = os.path.join(src_path, 'beta_link')\n\n os.symlink('beta_broken', link_path)\n svntest.main.run_svn(None, 'add', link_path)\n svntest.main.run_svn(None, 'commit', '-m', 'Create a broken link', link_path)\n svntest.main.run_svn(None, 'copy', src_path, copy_path)\n svntest.main.run_svn(None, 'commit', '-m', 'Copy the tree with the broken link',\n copy_path)\n os.unlink(link_path)\n os.symlink('beta', link_path)\n svntest.main.run_svn(None, 'commit', '-m', 'Fix a broken link', link_path)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[4]],\n ['U ' + copy_path + '/beta_link\\n',\n ' U ' + copy_path + '\\n']),\n [], 'merge', '-c4', src_path, copy_path)\n\n#----------------------------------------------------------------------\n# Test for issue #3199 'Subtree merges broken when required ranges\n# don't intersect with merge target'\n@SkipUnless(server_has_mergeinfo)\n@Issue(3199)\ndef subtree_merges_dont_intersect_with_targets(sbox):\n \"subtree ranges might not intersect with target\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make two branches to merge to.\n wc_disk, wc_status = set_up_branch(sbox, False, 2)\n\n # Some paths we'll care about.\n A_COPY_path = sbox.ospath('A_COPY')\n A_COPY_2_path = sbox.ospath('A_COPY_2')\n H_COPY_2_path = sbox.ospath('A_COPY_2/D/H')\n gamma_path = sbox.ospath('A/D/gamma')\n psi_path = sbox.ospath('A/D/H/psi')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n gamma_COPY_path = sbox.ospath('A_COPY/D/gamma')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n psi_COPY_2_path = sbox.ospath('A_COPY_2/D/H/psi')\n rho_COPY_2_path = sbox.ospath('A_COPY_2/D/G/rho')\n\n # Make a tweak to A/D/gamma and A/D/H/psi in r8.\n svntest.main.file_write(gamma_path, \"New content\")\n svntest.main.file_write(psi_path, \"Even newer content\")\n expected_output = wc.State(wc_dir, {\n 'A/D/gamma' : Item(verb='Sending'),\n 'A/D/H/psi' : Item(verb='Sending'),\n })\n wc_status.tweak('A/D/gamma', 'A/D/H/psi', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n wc_disk.tweak('A/D/gamma', contents=\"New content\")\n wc_disk.tweak('A/D/H/psi', contents=\"Even newer content\")\n\n # Update the WC.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [],\n 'update', wc_dir)\n wc_status.tweak(wc_rev=8)\n\n # Run a bunch of merges to setup the 2 branches with explicit\n # mergeinfo on each branch root and explicit mergeinfo on one subtree\n # of each root. The mergeinfo should be such that:\n #\n # 1) On one branch: The mergeinfo on the root and the subtree do\n # not intersect.\n #\n # 2) On the other branch: The mergeinfo on the root and subtree\n # are each 'missing' and eligible ranges and these missing\n # ranges do not intersect.\n #\n # Note: We just use run_and_verify_svn(...'merge'...) here rather than\n # run_and_verify_merge() because these types of simple merges are\n # tested to death elsewhere and this is just setup for the \"real\"\n # test.\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c4',\n sbox.repo_url + '/A/D/H/psi',\n psi_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c8',\n sbox.repo_url + '/A',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c-8',\n sbox.repo_url + '/A/D/H/psi',\n psi_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'merge',\n sbox.repo_url + '/A',\n A_COPY_2_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c-5',\n sbox.repo_url + '/A',\n A_COPY_2_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c5', '-c-8',\n sbox.repo_url + '/A/D/H',\n H_COPY_2_path)\n\n # Commit all the previous merges as r9.\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n 'A_COPY/D/gamma' : Item(verb='Sending'),\n 'A_COPY_2' : Item(verb='Sending'),\n 'A_COPY_2/B/E/beta' : Item(verb='Sending'),\n 'A_COPY_2/D/H' : Item(verb='Sending'),\n 'A_COPY_2/D/H/omega' : Item(verb='Sending'),\n 'A_COPY_2/D/H/psi' : Item(verb='Sending'),\n 'A_COPY_2/D/gamma' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY',\n 'A_COPY/D/H/psi',\n 'A_COPY/D/gamma',\n 'A_COPY_2',\n 'A_COPY_2/B/E/beta',\n 'A_COPY_2/D/H',\n 'A_COPY_2/D/H/omega',\n 'A_COPY_2/D/H/psi',\n 'A_COPY_2/D/gamma',\n wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n # Update the WC.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [],\n 'update', wc_dir)\n\n # Make sure we have mergeinfo that meets the two criteria set out above.\n check_mergeinfo_recursively(wc_dir,\n { # Criterion 1\n A_COPY_path: '/A:8',\n psi_COPY_path: '/A/D/H/psi:4',\n # Criterion 2\n A_COPY_2_path : '/A:3-4,6-8',\n H_COPY_2_path : '/A/D/H:3-7' })\n\n # Merging to the criterion 2 branch.\n #\n # Forward merge a range to a target with a subtree where the target\n # and subtree need different, non-intersecting revision ranges applied:\n # Merge r3:9 from A into A_COPY_2.\n #\n # The subtree A_COPY_2/D/H needs r8-9 applied (affecting A_COPY_2/D/H/psi)\n # while the target needs r5 (affecting A_COPY_2/D/G/rho) applied. The\n # resulting mergeinfo on A_COPY_2 and A_COPY_2/D/H should be equivalent\n # and therefore elide to A_COPY_2.\n expected_output = wc.State(A_COPY_2_path, {\n 'D/G/rho' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_2_path, {\n '' : Item(status=' U'),\n 'D/H' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_2_path, {\n 'D/H' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_2_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status=' ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status='M ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' M', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/psi' : Item(status='M ', wc_rev=9),\n 'D/H/omega' : Item(status=' ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3-9'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"New content\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"Even newer content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_2_path, {})\n svntest.actions.run_and_verify_merge(A_COPY_2_path, '3', '9',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merging to the criterion 1 branch.\n #\n # Reverse merge a range to a target with a subtree where the target\n # and subtree need different, non-intersecting revision ranges\n # reversed: Merge r9:3 from A into A_COPY.\n #\n # The subtree A_COPY_2/D/H/psi needs r4 reversed, while the target needs\n # r8 (affecting A_COPY/D/gamma) reversed. Since this reverses all merges\n # thus far to A_COPY, there should be *no* mergeinfo post merge.\n expected_output = wc.State(A_COPY_path, {\n 'D/gamma' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H/psi' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H/psi' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status=' ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status=' ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status='M ', wc_rev=9),\n 'D/H' : Item(status=' ', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/psi' : Item(status='MM', wc_rev=9),\n 'D/H/omega' : Item(status=' ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, {})\n svntest.actions.run_and_verify_merge(A_COPY_path, '9', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Test the notification portion of issue #3199.\n #\n # run_and_verify_merge() doesn't check the notification headers\n # so we need to repeat the previous two merges using\n # run_and_verify_svn(...'merge'...) and expected_merge_output().\n #\n ### TODO: Things are fairly ugly when it comes to testing the\n ### merge notification headers. run_and_verify_merge*()\n ### just ignores the notifications and in the few places\n ### we use expected_merge_output() the order of notifications\n ### and paths are not considered. In a perfect world we'd\n ### have run_and_verify_merge() that addressed these\n ### shortcomings (and allowed merges to file targets).\n #\n # Revert the previous merges.\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R', wc_dir)\n\n # Repeat the forward merge\n expected_output = expected_merge_output(\n [[5],[8],[5,9]],\n ['U %s\\n' % (rho_COPY_2_path),\n 'U %s\\n' % (psi_COPY_2_path),\n ' U %s\\n' % (H_COPY_2_path),\n ' U %s\\n' % (A_COPY_2_path),],\n elides=True)\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'merge', '-r', '3:9',\n sbox.repo_url + '/A',\n A_COPY_2_path)\n # Repeat the reverse merge\n expected_output = expected_merge_output(\n [[-4],[-8],[8,4]],\n ['U %s\\n' % (gamma_COPY_path),\n 'U %s\\n' % (psi_COPY_path),\n ' U %s\\n' % (A_COPY_path),\n ' U %s\\n' % (psi_COPY_path)],\n elides=True)\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'merge', '-r', '9:3',\n sbox.repo_url + '/A',\n A_COPY_path)\n\n#----------------------------------------------------------------------\n# Some more tests for issue #3067 'subtrees that don't exist at the start\n# or end of a merge range shouldn't break the merge'\n@Issue(3067)\n@SkipUnless(server_has_mergeinfo)\ndef subtree_source_missing_in_requested_range(sbox):\n \"subtree merge source might not exist\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make a branch to merge to.\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n\n # Some paths we'll care about.\n psi_path = sbox.ospath('A/D/H/psi')\n omega_path = sbox.ospath('A/D/H/omega')\n A_COPY_path = sbox.ospath('A_COPY')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n\n # r7 Delete A/D/H/psi.\n svntest.actions.run_and_verify_svn(None, [],\n 'delete', psi_path)\n sbox.simple_commit(message='delete psi')\n\n # r8 - modify A/D/H/omega.\n svntest.main.file_write(os.path.join(omega_path), \"Even newer content\")\n sbox.simple_commit(message='modify omega')\n\n # r9 - Merge r3 to A_COPY/D/H/psi\n expected_output = expected_merge_output(\n [[3]], ['U %s\\n' % (psi_COPY_path),\n ' U %s\\n' % (psi_COPY_path),])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '-c', '3',\n sbox.repo_url + '/A/D/H/psi@3',\n psi_COPY_path)\n sbox.simple_commit(message='merge r3 to A_COPY/D/H/psi')\n\n # r10 - Merge r6 to A_COPY/D/H/omega.\n expected_output = expected_merge_output(\n [[6]], ['U %s\\n' % (omega_COPY_path),\n ' U %s\\n' % (omega_COPY_path),])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '-c', '6',\n sbox.repo_url + '/A/D/H/omega',\n omega_COPY_path)\n sbox.simple_commit(message='merge r6 to A_COPY')\n svntest.actions.run_and_verify_svn(exp_noop_up_out(10), [], 'up',\n wc_dir)\n\n # r11 - Merge r8 to A_COPY.\n expected_output = expected_merge_output(\n [[8]], ['U %s\\n' % (omega_COPY_path),\n ' U %s\\n' % (omega_COPY_path),\n ' U %s\\n' % (A_COPY_path)])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '-c', '8',\n sbox.repo_url + '/A',\n A_COPY_path)\n # Repeat the merge using the --record-only option so A_COPY/D/H/psi gets\n # mergeinfo including 'A/D/H/psi:8', which doesn't exist. Why? Because\n # we are trying to create mergeinfo that will provoke an invalid editor\n # drive. In 1.5-1.6 merge updated all subtrees, regardless of whether the\n # merge touched these subtrees. This --record-only merge duplicates that\n # behavior, allowing us to test the relevant issue #3067 fixes.\n expected_output = expected_merge_output(\n [[8]], [' G %s\\n' % (omega_COPY_path),\n ' U %s\\n' % (psi_COPY_path),\n ' G %s\\n' % (A_COPY_path)])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '-c', '8',\n sbox.repo_url + '/A',\n A_COPY_path, '--record-only')\n sbox.simple_commit(message='merge r8 to A_COPY/D/H/omega')\n svntest.actions.run_and_verify_svn(exp_noop_up_out(11), [], 'up',\n wc_dir)\n\n # r12 - modify A/D/H/omega yet again.\n svntest.main.file_write(os.path.join(omega_path),\n \"Now with fabulous new content!\")\n sbox.simple_commit(message='modify omega')\n\n # r13 - Merge all available revs to A_COPY/D/H/omega.\n expected_output = expected_merge_output(\n [[9,12],[2,12]], ['U %s\\n' % (omega_COPY_path),\n ' U %s\\n' % (omega_COPY_path)])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge',\n sbox.repo_url + '/A/D/H/omega',\n omega_COPY_path)\n sbox.simple_commit(message='cherry harvest to A_COPY/D/H/omega')\n svntest.actions.run_and_verify_svn(exp_noop_up_out(13), [], 'up',\n wc_dir)\n\n # Check that svn:mergeinfo is as expected.\n check_mergeinfo_recursively(wc_dir,\n { A_COPY_path: '/A:8',\n omega_COPY_path: '/A/D/H/omega:2-12',\n psi_COPY_path : '/A/D/H/psi:3,8' })\n\n # Now test a reverse merge where part of the requested range postdates\n # a subtree's existence. Merge -r12:1 to A_COPY. This should revert\n # all of the merges done thus far. The fact that A/D/H/psi no longer\n # exists after r7 shouldn't break the subtree merge into A_COPY/D/H/psi.\n # A_COPY/D/H/psi should simply have r3 reverse merged. No paths under\n # in the tree rooted at A_COPY should have any explicit mergeinfo.\n expected_output = wc.State(A_COPY_path, {\n 'D/H/omega' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n 'D/H/omega' : Item(status='G ', prev_status='G '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H/psi' : Item(status=' U'),\n 'D/H/omega' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H/psi' : Item(status=' U'),\n 'D/H/omega' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=13),\n 'B' : Item(status=' ', wc_rev=13),\n 'mu' : Item(status=' ', wc_rev=13),\n 'B/E' : Item(status=' ', wc_rev=13),\n 'B/E/alpha' : Item(status=' ', wc_rev=13),\n 'B/E/beta' : Item(status=' ', wc_rev=13),\n 'B/lambda' : Item(status=' ', wc_rev=13),\n 'B/F' : Item(status=' ', wc_rev=13),\n 'C' : Item(status=' ', wc_rev=13),\n 'D' : Item(status=' ', wc_rev=13),\n 'D/G' : Item(status=' ', wc_rev=13),\n 'D/G/pi' : Item(status=' ', wc_rev=13),\n 'D/G/rho' : Item(status=' ', wc_rev=13),\n 'D/G/tau' : Item(status=' ', wc_rev=13),\n 'D/gamma' : Item(status=' ', wc_rev=13),\n 'D/H' : Item(status=' ', wc_rev=13),\n 'D/H/chi' : Item(status=' ', wc_rev=13),\n 'D/H/psi' : Item(status='MM', wc_rev=13),\n 'D/H/omega' : Item(status='MM', wc_rev=13),\n })\n expected_disk = wc.State('', {\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '12', '1',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n\n # Revert the previous merge.\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '-R', wc_dir)\n # Merge r12 to A_COPY and commit as r14.\n expected_output = wc.State(A_COPY_path, {})\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=13),\n 'B' : Item(status=' ', wc_rev=13),\n 'mu' : Item(status=' ', wc_rev=13),\n 'B/E' : Item(status=' ', wc_rev=13),\n 'B/E/alpha' : Item(status=' ', wc_rev=13),\n 'B/E/beta' : Item(status=' ', wc_rev=13),\n 'B/lambda' : Item(status=' ', wc_rev=13),\n 'B/F' : Item(status=' ', wc_rev=13),\n 'C' : Item(status=' ', wc_rev=13),\n 'D' : Item(status=' ', wc_rev=13),\n 'D/G' : Item(status=' ', wc_rev=13),\n 'D/G/pi' : Item(status=' ', wc_rev=13),\n 'D/G/rho' : Item(status=' ', wc_rev=13),\n 'D/G/tau' : Item(status=' ', wc_rev=13),\n 'D/gamma' : Item(status=' ', wc_rev=13),\n 'D/H' : Item(status=' ', wc_rev=13),\n 'D/H/chi' : Item(status=' ', wc_rev=13),\n 'D/H/psi' : Item(status=' ', wc_rev=13),\n 'D/H/omega' : Item(status=' ', wc_rev=13),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:8,12'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/psi:3,8'}),\n 'D/H/omega' : Item(\"Now with fabulous new content!\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/omega:2-12'}),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '11', '12',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n # As we did earlier, repeat the merge with the --record-only option to\n # preserve the old behavior of recording mergeinfo on every subtree, thus\n # allowing this test to actually test the issue #3067 fixes.\n expected_output = expected_merge_output(\n [[12]], ['U %s\\n' % (A_COPY_path),\n ' G %s\\n' % (A_COPY_path),\n ' U %s\\n' % (psi_COPY_path),\n ' U %s\\n' % (omega_COPY_path),])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '-c', '12',\n sbox.repo_url + '/A',\n A_COPY_path, '--record-only')\n sbox.simple_commit(message='Merge r12 to A_COPY')\n\n # Update A_COPY/D/H/rho back to r13 so it's mergeinfo doesn't include\n # r12. Then merge a range, -r6:12 which should delete a subtree\n # (A_COPY/D/H/psi).\n svntest.actions.run_and_verify_svn(exp_noop_up_out(14), [], 'up',\n wc_dir)\n expected_output = wc.State(A_COPY_path, {\n 'D/H/psi' : Item(status='D '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=14),\n 'B' : Item(status=' ', wc_rev=14),\n 'mu' : Item(status=' ', wc_rev=14),\n 'B/E' : Item(status=' ', wc_rev=14),\n 'B/E/alpha' : Item(status=' ', wc_rev=14),\n 'B/E/beta' : Item(status=' ', wc_rev=14),\n 'B/lambda' : Item(status=' ', wc_rev=14),\n 'B/F' : Item(status=' ', wc_rev=14),\n 'C' : Item(status=' ', wc_rev=14),\n 'D' : Item(status=' ', wc_rev=14),\n 'D/G' : Item(status=' ', wc_rev=14),\n 'D/G/pi' : Item(status=' ', wc_rev=14),\n 'D/G/rho' : Item(status=' ', wc_rev=14),\n 'D/G/tau' : Item(status=' ', wc_rev=14),\n 'D/gamma' : Item(status=' ', wc_rev=14),\n 'D/H' : Item(status=' ', wc_rev=14),\n 'D/H/chi' : Item(status=' ', wc_rev=14),\n 'D/H/psi' : Item(status='D ', wc_rev=14),\n 'D/H/omega' : Item(status=' ', wc_rev=14),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:7-12'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/omega' : Item(\"Now with fabulous new content!\",\n props={SVN_PROP_MERGEINFO : '/A/D/H/omega:2-12'}),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '6', '12',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n\n#----------------------------------------------------------------------\n# Another test for issue #3067: 'subtrees that don't exist at the start\n# or end of a merge range shouldn't break the merge'\n#\n# See https://issues.apache.org/jira/browse/SVN-3067#desc34\n@Issue(3067)\n@SkipUnless(server_has_mergeinfo)\ndef subtrees_with_empty_mergeinfo(sbox):\n \"mergeinfo not set on subtree with empty mergeinfo\"\n\n # Use helper to setup a renamed subtree.\n wc_dir, expected_disk, expected_status = set_up_renamed_subtree(sbox)\n\n # Some paths we'll care about\n H_COPY_path = sbox.ospath('H_COPY')\n\n # Cherry harvest all available revsions from 'A/D/H' to 'H_COPY'.\n #\n # This should merge r4:6 from 'A/D/H' setting mergeinfo for r5-6\n # on both 'H_COPY' and 'H_COPY/psi_moved'. But since the working copy\n # is at a uniform working revision, the latter's mergeinfo should\n # elide, leaving explicit mergeinfo only on the merge target.\n expected_output = wc.State(H_COPY_path, {\n 'psi_moved' : Item(status='U ')\n })\n expected_mergeinfo_output = wc.State(H_COPY_path, {\n '' : Item(status=' U'),\n 'psi_moved' : Item(status=' U'),\n })\n expected_elision_output = wc.State(H_COPY_path, {\n 'psi_moved' : Item(status=' U'),\n })\n expected_status = wc.State(H_COPY_path, {\n '' : Item(status=' M', wc_rev=6), # mergeinfo set on target\n 'psi_moved' : Item(status='MM', wc_rev=6), # mergeinfo elides\n 'omega' : Item(status=' ', wc_rev=6),\n 'chi' : Item(status=' ', wc_rev=6),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:5-6'}),\n 'psi_moved' : Item(\"Even *Newer* content\"), # mergeinfo elides\n 'omega' : Item(\"New omega\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(H_COPY_path, { })\n\n svntest.actions.run_and_verify_merge(H_COPY_path, None, None,\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for issue #3240 'commits to subtrees added by merge\n# corrupt working copy and repos'.\n@SkipUnless(server_has_mergeinfo)\n@Issue(3240)\ndef commit_to_subtree_added_by_merge(sbox):\n \"commits to subtrees added by merge wreak havoc\"\n\n # Setup a standard greek tree in r1.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n N_path = sbox.ospath('A/D/H/N')\n nu_path = sbox.ospath('A/D/H/N/nu')\n nu_COPY_path = sbox.ospath('A_COPY/D/H/N/nu')\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n\n # Copy 'A' to 'A_COPY' in r2.\n wc_disk, wc_status = set_up_branch(sbox, True)\n\n # Create a 'A/D/H/N' and 'A/D/H/N/nu', and commit this new\n # subtree as r3.\n os.mkdir(N_path)\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', N_path)\n expected_output = wc.State(wc_dir,\n {'A/D/H/N' : Item(verb='Adding'),\n 'A/D/H/N/nu' : Item(verb='Adding')})\n wc_status.add({'A/D/H/N' : Item(status=' ', wc_rev=3),\n 'A/D/H/N/nu' : Item(status=' ', wc_rev=3)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Merge r3 to 'A_COPY/D/H', creating A_COPY/D/H/N' and 'A_COPY/D/H/N/nu'.\n # Commit the merge as r4.\n expected_output = wc.State(H_COPY_path, {\n 'N' : Item(status='A '),\n 'N/nu' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(H_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(H_COPY_path, {\n })\n expected_status = wc.State(H_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'psi' : Item(status=' ', wc_rev=2),\n 'omega' : Item(status=' ', wc_rev=2),\n 'chi' : Item(status=' ', wc_rev=2),\n 'N' : Item(status='A ', copied='+', wc_rev='-'),\n 'N/nu' : Item(status=' ', copied='+', wc_rev='-'),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:2-3'}),\n 'psi' : Item(\"This is the file 'psi'.\\n\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n 'N' : Item(),\n 'N/nu' : Item(\"This is the file 'nu'.\\n\"),\n })\n expected_skip = wc.State(H_COPY_path, {})\n svntest.actions.run_and_verify_merge(H_COPY_path,\n None, None,\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n expected_output = wc.State(wc_dir, {\n 'A_COPY/D/H' : Item(verb='Sending'),\n 'A_COPY/D/H/N' : Item(verb='Adding'),\n })\n wc_status.add({'A_COPY/D/H/N' : Item(status=' ', wc_rev=4),\n 'A_COPY/D/H/N/nu' : Item(status=' ', wc_rev=4)})\n wc_status.tweak('A_COPY/D/H', wc_rev=4)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Make a text change to 'A_COPY/D/H/N/nu' and commit it as r5. This\n # is the first place issue #3240 appears over DAV layers, and the\n # commit fails with an error like this:\n # trunk>svn ci -m \"\" merge_tests-100\n # Sending merge_tests-100\\A_COPY\\D\\H\\N\\nu\n # Transmitting file data ...\\..\\..\\subversion\\libsvn_client\\commit.c:919:\n # (apr_err=20014)\n # svn: Commit failed (details follow):\n # ..\\..\\..\\subversion\\libsvn_ra_neon\\merge.c:260: (apr_err=20014)\n # svn: A MERGE response for '/svn-test-work/repositories/merge_tests-100/\n # A/D/H/N/nu' is not a child of the destination\n # ('/svn-test-work/repositories/merge_tests-100/A_COPY/D/H/N')\n svntest.main.file_write(nu_COPY_path, \"New content\")\n expected_output = wc.State(wc_dir,\n {'A_COPY/D/H/N/nu' : Item(verb='Sending')})\n wc_status.tweak('A_COPY/D/H/N/nu', wc_rev=5)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n # The second place issue #3240 shows up is in the fact that the commit\n # *did* succeed, but the wrong path ('A/D/H/nu' rather than 'A_COPY/D/H/nu')\n # is affected. We can see this by running an update; since we just\n # committed there shouldn't be any incoming changes.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(5), [], 'up',\n wc_dir)\n\n\n#----------------------------------------------------------------------\n# Tests for merging the deletion of a node, where the node to be deleted\n# is the same as or different from the node that was deleted.\n\n#----------------------------------------------------------------------\ndef del_identical_file(sbox):\n \"merge tries to delete a file of identical content\"\n\n # Set up a standard greek tree in r1.\n sbox.build()\n\n saved_cwd = os.getcwd()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n # Set up a modification and deletion in the source branch.\n source = 'A/D/G'\n s_rev_orig = 1\n svn_modfile(source+\"/tau\")\n sbox.simple_commit(source)\n s_rev_mod = 2\n sbox.simple_rm(source+\"/tau\")\n sbox.simple_commit(source)\n s_rev_del = 3\n\n # Make an identical copy, and merge a deletion to it.\n target = 'A/D/G2'\n svn_copy(s_rev_mod, source, target)\n sbox.simple_commit(target)\n # Should be deleted quietly.\n svn_merge(s_rev_del, source, target,\n ['D %s\\n' % local_path('A/D/G2/tau')])\n\n # Make a differing copy, locally modify it so it's the same,\n # and merge a deletion to it.\n target = 'A/D/G3'\n svn_copy(s_rev_orig, source, target)\n sbox.simple_commit(target)\n svn_modfile(target+\"/tau\")\n # Should be deleted quietly.\n svn_merge(s_rev_del, source, target,\n ['D %s\\n' % local_path('A/D/G3/tau')])\n\n os.chdir(saved_cwd)\n\n#----------------------------------------------------------------------\ndef del_sched_add_hist_file(sbox):\n \"merge tries to delete identical sched-add file\"\n\n # Setup a standard greek tree in r1.\n sbox.build()\n\n saved_cwd = os.getcwd()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n # Set up a creation in the source branch.\n source = 'A/D/G'\n s_rev_orig = 1\n svn_mkfile(source+\"/file\")\n sbox.simple_commit(source)\n s_rev_add = 2\n\n # Merge a creation, and delete by reverse-merging into uncommitted WC.\n target = 'A/D/G2'\n svn_copy(s_rev_orig, source, target)\n sbox.simple_commit(target)\n s_rev = 3\n svn_merge(s_rev_add, source, target,\n ['A %s\\n' % local_path('A/D/G2/file')])\n # Should be deleted quietly.\n svn_merge(-s_rev_add, source, target,\n ['D %s\\n' % local_path('A/D/G2/file')], elides=['A/D/G2'])\n\n os.chdir(saved_cwd)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef subtree_merges_dont_cause_spurious_conflicts(sbox):\n \"subtree merges dont cause spurious conflicts\"\n\n # Fix a merge bug where previous merges are incorrectly reversed leading\n # to repeat merges and spurious conflicts. These can occur when a subtree\n # needs a range M:N merged that is older than the ranges X:Y needed by the\n # merge target *and* there are changes in the merge source between N:X that\n # affect parts of the merge target other than the subtree. An actual case\n # where our own epository encountered this problem is described here:\n # http://subversion.tigris.org/servlets/ReadMsg?listName=dev&msgNo=141832\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n rho_path = sbox.ospath('A/D/G/rho')\n A_COPY_path = sbox.ospath('A_COPY')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n\n # Make a branch to merge to.\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n\n # r7 Make a text change to A/D/G/rho.\n svntest.main.file_write(rho_path, \"Newer content\")\n expected_output = wc.State(wc_dir, {'A/D/G/rho' : Item(verb='Sending')})\n wc_status.tweak('A/D/G/rho', wc_rev=7)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n wc_disk.tweak('A/D/G/rho', contents=\"Newer content\")\n\n # r8 Make another text change to A/D/G/rho.\n svntest.main.file_write(rho_path, \"Even *newer* content\")\n expected_output = wc.State(wc_dir, {'A/D/G/rho' : Item(verb='Sending')})\n wc_status.tweak('A/D/G/rho', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n wc_disk.tweak('A/D/G/rho', contents=\"Even *newer* content\")\n\n # Update the WC to allow full mergeinfo inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [], 'up',\n wc_dir)\n wc_status.tweak(wc_rev=8)\n\n # r9 Merge r0:7 from A to A_COPY, then create a subtree with differing\n # mergeinfo under A_COPY by reverse merging r3 from A_COPY/D/H/psi.\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U '),\n 'D/G/rho' : Item(status='U '),\n 'D/H/omega' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=8),\n 'B' : Item(status=' ', wc_rev=8),\n 'mu' : Item(status=' ', wc_rev=8),\n 'B/E' : Item(status=' ', wc_rev=8),\n 'B/E/alpha' : Item(status=' ', wc_rev=8),\n 'B/E/beta' : Item(status='M ', wc_rev=8),\n 'B/lambda' : Item(status=' ', wc_rev=8),\n 'B/F' : Item(status=' ', wc_rev=8),\n 'C' : Item(status=' ', wc_rev=8),\n 'D' : Item(status=' ', wc_rev=8),\n 'D/G' : Item(status=' ', wc_rev=8),\n 'D/G/pi' : Item(status=' ', wc_rev=8),\n 'D/G/rho' : Item(status='M ', wc_rev=8),\n 'D/G/tau' : Item(status=' ', wc_rev=8),\n 'D/gamma' : Item(status=' ', wc_rev=8),\n 'D/H' : Item(status=' ', wc_rev=8),\n 'D/H/chi' : Item(status=' ', wc_rev=8),\n 'D/H/psi' : Item(status='M ', wc_rev=8),\n 'D/H/omega' : Item(status='M ', wc_rev=8),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-7'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"Newer content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\",),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '0', '7',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n # run_and_verify_merge doesn't support merging to a file WCPATH\n # so use run_and_verify_svn.\n ### TODO: We can use run_and_verify_merge() here now.\n svntest.actions.run_and_verify_svn(expected_merge_output([[-3]],\n ['G ' + psi_COPY_path + '\\n',\n ' G ' + psi_COPY_path + '\\n']),\n [], 'merge', '-c-3',\n sbox.repo_url + '/A/D/H/psi',\n psi_COPY_path)\n # Commit the two merges.\n expected_output = svntest.wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D/G/rho' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n })\n wc_status.tweak('A_COPY',\n 'A_COPY/B/E/beta',\n 'A_COPY/D/G/rho',\n 'A_COPY/D/H/psi',\n 'A_COPY/D/H/omega',\n wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Update the WC to allow full mergeinfo inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [], 'up',\n wc_dir)\n wc_status.tweak(wc_rev=9)\n\n # r9 Merge all available revisions from A to A_COPY.\n #\n # This is where the bug revealed itself, instead of cleanly merging\n # just r3 and then r8-9, the first merge editor drive of r3 set A_COPY\n # to the state it was in r7, effectively reverting the merge committed\n # in r9. So we saw unexpected merges to omega, rho, and beta, as they\n # are returned to their r7 state and then a conflict on rho as the editor\n # attempted to merge r8:\n #\n # trunk>svn merge %url%/A merge_tests-104\\A_COPY\n # --- Merging r3 into 'merge_tests-104\\A_COPY\\D\\H\\psi':\n # U merge_tests-104\\A_COPY\\D\\H\\psi\n # --- Merging r8 through r9 into 'merge_tests-104\\A_COPY':\n # U merge_tests-104\\A_COPY\\D\\H\\omega\n # U merge_tests-104\\A_COPY\\D\\G\\rho\n # U merge_tests-104\\A_COPY\\B\\E\\beta\n # Conflict discovered in 'merge_tests-104/A_COPY/D/G/rho'.\n # Select: (p) postpone, (df) diff-full, (e) edit,\n # (mc) mine-conflict, (tc) theirs-conflict,\n # (s) show all options: p\n # --- Merging r8 through r9 into 'merge_tests-104\\A_COPY':\n # C merge_tests-104\\A_COPY\\D\\G\\rho\n expected_output = wc.State(A_COPY_path, {\n 'D/G/rho' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H/psi' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n 'D/H/psi' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status=' ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status='M ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' ', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/psi' : Item(status='MM', wc_rev=9),\n 'D/H/omega' : Item(status=' ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-9'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"Even *newer* content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"), # Mergeinfo elides to A_COPY\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for yet another variant of issue #3067.\n@Issue(3067)\n@SkipUnless(server_has_mergeinfo)\ndef merge_target_and_subtrees_need_nonintersecting_ranges(sbox):\n \"target and subtrees need nonintersecting revs\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n nu_path = sbox.ospath('A/D/G/nu')\n A_COPY_path = sbox.ospath('A_COPY')\n nu_COPY_path = sbox.ospath('A_COPY/D/G/nu')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n\n # Make a branch to merge to.\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n\n # Add file A/D/G/nu in r7.\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n expected_output = wc.State(wc_dir, {'A/D/G/nu' : Item(verb='Adding')})\n wc_status.add({'A/D/G/nu' : Item(status=' ', wc_rev=7)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Make a text mod to A/D/G/nu in r8.\n svntest.main.file_write(nu_path, \"New content\")\n expected_output = wc.State(wc_dir, {'A/D/G/nu' : Item(verb='Sending')})\n wc_status.tweak('A/D/G/nu', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # Do several merges to setup a situation where the merge\n # target and two of its subtrees need non-intersecting ranges\n # merged when doing a synch (a.k.a. cherry harvest) merge.\n #\n # 1) Merge -r0:7 from A to A_COPY.\n #\n # 2) Merge -c8 from A/D/G/nu to A_COPY/D/G/nu.\n #\n # 3) Merge -c-6 from A/D/H/omega to A_COPY/D/H/omega.\n #\n # Commit this group of merges as r9. Since we already test these type\n # of merges to death we don't use run_and_verify_merge() on these\n # intermediate merges.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[2,7]],\n ['U ' + beta_COPY_path + '\\n',\n 'A ' + nu_COPY_path + '\\n',\n 'U ' + rho_COPY_path + '\\n',\n 'U ' + omega_COPY_path + '\\n',\n 'U ' + psi_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',]\n ),\n [], 'merge', '-r0:7', sbox.repo_url + '/A', A_COPY_path)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[8]], ['U ' + nu_COPY_path + '\\n',\n ' G ' + nu_COPY_path + '\\n']),\n [], 'merge', '-c8', sbox.repo_url + '/A/D/G/nu', nu_COPY_path)\n\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[-6]], ['G ' + omega_COPY_path + '\\n',\n ' G ' + omega_COPY_path + '\\n']),\n [], 'merge', '-c-6', sbox.repo_url + '/A/D/H/omega', omega_COPY_path)\n wc_status.add({'A_COPY/D/G/nu' : Item(status=' ', wc_rev=9)})\n wc_status.tweak('A_COPY',\n 'A_COPY/B/E/beta',\n 'A_COPY/D/G/rho',\n 'A_COPY/D/H/omega',\n 'A_COPY/D/H/psi',\n wc_rev=9)\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D/G/rho' : Item(verb='Sending'),\n 'A_COPY/D/G/nu' : Item(verb='Adding'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Update the WC to allow full mergeinfo inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [], 'up',\n wc_dir)\n\n # Merge all available revisions from A to A_COPY, the merge logic\n # should handle this situation (no \"svn: Working copy path 'D/G/nu'\n # does not exist in repository\" errors!). The mergeinfo on\n # A_COPY/D/H/omega elides to the root, but the mergeinfo on\n # A_COPY/D/G/nu, untouched by the merge, does not get updated so\n # does not elide.\n expected_output = wc.State(A_COPY_path, {\n 'D/H/omega': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H/omega': Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n 'D/H/omega': Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status=' ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status=' ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/G/nu' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' ', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/psi' : Item(status=' ', wc_rev=9),\n 'D/H/omega' : Item(status='MM', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-9'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/G/nu' : Item(\"New content\",\n props={SVN_PROP_MERGEINFO : '/A/D/G/nu:2-8'}),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Part of this test is a regression test for issue #3250 \"Repeated merging\n# of conflicting properties fails\".\n@Issue(3250)\ndef merge_two_edits_to_same_prop(sbox):\n \"merge two successive edits to the same property\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make a branch to merge to. (This is r6.)\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n initial_rev = 6\n\n # Change into the WC dir for convenience\n was_cwd = os.getcwd()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_disk.wc_dir = ''\n wc_status.wc_dir = ''\n\n # Some paths we'll care about\n A_path = \"A\"\n A_COPY_path = \"A_COPY\"\n mu_path = os.path.join(A_path, \"mu\")\n mu_COPY_path = os.path.join(A_COPY_path, \"mu\")\n\n # In the source, make two successive changes to the same property\n sbox.simple_propset('p', 'new-val-1', 'A/mu')\n sbox.simple_commit('A/mu')\n rev1 = initial_rev + 1\n sbox.simple_propset('p', 'new-val-2', 'A/mu')\n sbox.simple_commit('A/mu')\n rev2 = initial_rev + 2\n\n # Merge the first change, then the second, to a target branch.\n svn_merge(rev1, A_path, A_COPY_path)\n svn_merge(rev2, A_path, A_COPY_path)\n\n # Both changes should merge automatically: the second one should not\n # complain about the local mod which the first one caused. The starting\n # value in the target (\"mine\") for the second merge is exactly equal to\n # the merge-left source value.\n\n # A merge-tracking version of this problem is when the merge-tracking\n # algorithm breaks a single requested merge into two phases because of\n # some other target within the same merge requiring only a part of the\n # revision range.\n\n # ====================================================================\n\n # We test issue #3250 here: that is, test that we can make two successive\n # conflicting changes to the same property on the same node (here a file;\n # in #3250 it was on a dir).\n #\n # ### But we no longer support merging into a node that's already in\n # conflict, and the 'rev3' merge here has been tweaked to resolve\n # the conflict, so it no longer tests the original #3250 scenario.\n #\n # Revert changes to branch wc\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', A_COPY_path)\n\n # In the branch, make two successive changes to the same property\n sbox.simple_propset('p', 'new-val-3', 'A_COPY/mu')\n sbox.simple_commit('A_COPY/mu')\n rev3 = initial_rev + 3\n sbox.simple_propset('p', 'new-val-4', 'A_COPY/mu')\n sbox.simple_commit('A_COPY/mu')\n rev4 = initial_rev + 4\n\n # Merge the two changes together to trunk.\n svn_merge([rev3, rev4], A_COPY_path, A_path, [\n \" C %s\\n\" % mu_path,\n ], prop_conflicts=1, args=['--allow-mixed-revisions'])\n\n # Revert changes to trunk wc, to test next scenario of #3250\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', A_path)\n\n # Merge the first change, then the second, to trunk.\n svn_merge(rev3, A_COPY_path, A_path, [\n \" C %s\\n\" % mu_path,\n \"Resolved .* '%s'\\n\" % mu_path,\n ], prop_resolved=1,\n args=['--allow-mixed-revisions',\n '--accept=working'])\n svn_merge(rev4, A_COPY_path, A_path, [\n \" C %s\\n\" % mu_path,\n ], prop_conflicts=1, args=['--allow-mixed-revisions'])\n\n os.chdir(was_cwd)\n\n#----------------------------------------------------------------------\ndef merge_an_eol_unification_and_set_svn_eol_style(sbox):\n \"merge an EOL unification and set svn:eol-style\"\n # In svn 1.5.2, merging the two changes between these three states:\n # r1. inconsistent EOLs and no svn:eol-style\n # r2. consistent EOLs and no svn:eol-style\n # r3. consistent EOLs and svn:eol-style=native\n # fails if attempted as a single merge (e.g. \"svn merge r1:3\") though it\n # succeeds if attempted in two phases (e.g. \"svn merge -c2,3\").\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Make a branch to merge to. (This will be r6.)\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n initial_rev = 6\n\n # Change into the WC dir for convenience\n was_cwd = os.getcwd()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_disk.wc_dir = ''\n wc_status.wc_dir = ''\n\n content1 = 'Line1\\nLine2\\r\\n' # write as 'binary' to get these exact EOLs\n content2 = 'Line1\\nLine2\\n' # write as 'text' to get native EOLs in file\n\n # In the source branch, create initial state and two successive changes.\n # Use binary mode to write the first file so no newline conversion occurs.\n svntest.main.file_write('A/mu', content1, 'wb')\n sbox.simple_commit('A/mu')\n rev1 = initial_rev + 1\n # Use text mode to write the second copy of the file to get native EOLs.\n svntest.main.file_write('A/mu', content2, 'w')\n sbox.simple_commit('A/mu')\n rev2 = initial_rev + 2\n sbox.simple_propset('svn:eol-style', 'native', 'A/mu')\n sbox.simple_commit('A/mu')\n rev3 = initial_rev + 3\n\n # Merge the initial state (inconsistent EOLs) to the target branch.\n svn_merge(rev1, 'A', 'A_COPY')\n sbox.simple_commit('A_COPY')\n\n # Merge the two changes together to the target branch.\n svn_merge([rev2, rev3], 'A', 'A_COPY',\n args=['--allow-mixed-revisions'])\n\n # That merge should succeed.\n # Surprise: setting svn:eol-style='LF' instead of 'native' doesn't fail.\n # Surprise: if we don't merge the file's 'rev1' state first, it doesn't fail\n # nor even raise a conflict.\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_adds_mergeinfo_correctly(sbox):\n \"merge adds mergeinfo to subtrees correctly\"\n\n # A merge may add explicit mergeinfo to the subtree of a merge target\n # as a result of changes in the merge source. These paths may have\n # inherited mergeinfo prior to the merge, if so the subtree should end up\n # with mergeinfo that reflects all of the following:\n #\n # A) The mergeinfo added from the merge source\n #\n # B) The mergeinfo the subtree inherited prior to the merge.\n #\n # C) Mergeinfo describing the merge performed.\n #\n # See http://subversion.tigris.org/servlets/ReadMsg?listName=dev&msgNo=142460\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Setup a 'trunk' and two branches.\n wc_disk, wc_status = set_up_branch(sbox, False, 2)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n D_COPY_path = sbox.ospath('A_COPY/D')\n A_COPY_2_path = sbox.ospath('A_COPY_2')\n D_COPY_2_path = sbox.ospath('A_COPY_2/D')\n\n # Update working copy to allow full inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(7), [],\n 'up', wc_dir)\n wc_status.tweak(wc_rev=7)\n\n # Merge r5 from A to A_COPY and commit as r8.\n # This creates explicit mergeinfo on A_COPY of '/A:5'.\n expected_output = wc.State(A_COPY_path, {\n 'D/G/rho': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=7),\n 'B' : Item(status=' ', wc_rev=7),\n 'mu' : Item(status=' ', wc_rev=7),\n 'B/E' : Item(status=' ', wc_rev=7),\n 'B/E/alpha' : Item(status=' ', wc_rev=7),\n 'B/E/beta' : Item(status=' ', wc_rev=7),\n 'B/lambda' : Item(status=' ', wc_rev=7),\n 'B/F' : Item(status=' ', wc_rev=7),\n 'C' : Item(status=' ', wc_rev=7),\n 'D' : Item(status=' ', wc_rev=7),\n 'D/G' : Item(status=' ', wc_rev=7),\n 'D/G/pi' : Item(status=' ', wc_rev=7),\n 'D/G/rho' : Item(status='M ', wc_rev=7),\n 'D/G/tau' : Item(status=' ', wc_rev=7),\n 'D/gamma' : Item(status=' ', wc_rev=7),\n 'D/H' : Item(status=' ', wc_rev=7),\n 'D/H/chi' : Item(status=' ', wc_rev=7),\n 'D/H/psi' : Item(status=' ', wc_rev=7),\n 'D/H/omega' : Item(status=' ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '4', '5',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n wc_status.tweak('A_COPY',\n 'A_COPY/D/G/rho',\n wc_rev=8)\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/D/G/rho' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Merge r7 from A/D to A_COPY_2/D and commit as r9.\n # This creates explicit mergeinfo on A_COPY_2/D of '/A/D:7'.\n expected_output = wc.State(D_COPY_2_path, {\n 'H/omega': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(D_COPY_2_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(D_COPY_2_path, {\n })\n expected_status = wc.State(D_COPY_2_path, {\n '' : Item(status=' M', wc_rev=7),\n 'G' : Item(status=' ', wc_rev=7),\n 'G/pi' : Item(status=' ', wc_rev=7),\n 'G/rho' : Item(status=' ', wc_rev=7),\n 'G/tau' : Item(status=' ', wc_rev=7),\n 'gamma' : Item(status=' ', wc_rev=7),\n 'H' : Item(status=' ', wc_rev=7),\n 'H/chi' : Item(status=' ', wc_rev=7),\n 'H/psi' : Item(status=' ', wc_rev=7),\n 'H/omega' : Item(status='M ', wc_rev=7),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D:7'}),\n 'G' : Item(),\n 'G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'H' : Item(),\n 'H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(D_COPY_2_path, '6', '7',\n sbox.repo_url + '/A/D', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n wc_status.tweak('A_COPY_2/D',\n 'A_COPY_2/D/H/omega',\n wc_rev=9)\n expected_output = wc.State(wc_dir, {\n 'A_COPY_2/D' : Item(verb='Sending'),\n 'A_COPY_2/D/H/omega' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Merge r9 from A_COPY_2 to A_COPY. A_COPY/D gets the explicit mergeinfo\n # '/A/D/:7' added from r9. But it prior to the merge it inherited '/A/D:5'\n # from A_COPY, so this should be present in its explicit mergeinfo. Lastly,\n # the mergeinfo describing this merge '/A_COPY_2:9' should also be present\n # in A_COPY's explicit mergeinfo.\n # Update working copy to allow full inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [],\n 'up', wc_dir)\n expected_output = wc.State(A_COPY_path, {\n 'D' : Item(status=' U'),\n 'D/H/omega': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status=' ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' M', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status=' ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' ', wc_rev=9),\n 'D/H/chi' : Item(status=' ', wc_rev=9),\n 'D/H/psi' : Item(status=' ', wc_rev=9),\n 'D/H/omega' : Item(status='M ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5\\n/A_COPY_2:9'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(props={SVN_PROP_MERGEINFO : '/A/D:5,7\\n/A_COPY_2/D:9'}),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '8', '9',\n sbox.repo_url + '/A_COPY_2', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Revert and repeat the above merge, but this time create some\n # uncommitted mergeinfo on A_COPY/D, this should not cause a write\n # lock error as was seen in http://subversion.tigris.org/\n # ds/viewMessage.do?dsForumId=462&dsMessageId=103945\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '-R', wc_dir)\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', SVN_PROP_MERGEINFO, '',\n D_COPY_path)\n expected_output = wc.State(A_COPY_path, {\n 'D' : Item(status=' G'), # Merged with local svn:mergeinfo\n 'D/H/omega': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n svntest.actions.run_and_verify_merge(A_COPY_path, '8', '9',\n sbox.repo_url + '/A_COPY_2', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef natural_history_filtering(sbox):\n \"natural history filtering permits valid mergeinfo\"\n\n # While filtering self-referential mergeinfo (e.g. natural history) that\n # a merge tries to add to a target, we may encounter contiguous revision\n # ranges that describe *both* natural history and valid mergeinfo. The\n # former should be filtered, but the latter allowed and recorded on the\n # target. See\n # http://subversion.tigris.org/servlets/ReadMsg?listName=dev&msgNo=142777.\n #\n # To set up a situation where this can occur we'll do the following:\n #\n # trunk -1-----3-4-5-6-------8----------- A\n # \\ \\ \\\n # branch1 2-----------\\-------9-------- A_COPY\n # \\ \\\n # branch2 7--------10---- A_COPY_2\n #\n # 1) Create a 'trunk'.\n #\n # 2) Copy 'trunk' to 'branch1'.\n #\n # 3) Make some changes under 'trunk'.\n #\n # 4) Copy 'trunk' to 'branch2'.\n #\n # 5) Make some more changes under 'trunk'.\n #\n # 6) Merge all available revisions from 'trunk' to 'branch1' and commit.\n #\n # 7) Merge all available revisions from 'branch1' to 'branch2'.\n # 'branch2' should have explicit merginfo for both 'branch1' *and* for\n # the revisions on 'trunk' which occurred after 'branch2' was copied as\n # these are not part of 'branch2's natural history.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n A_COPY_2_path = sbox.ospath('A_COPY_2')\n chi_path = sbox.ospath('A/D/H/chi')\n\n # r1-r6: Setup a 'trunk' (A) and a 'branch' (A_COPY).\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n\n # r7: Make a second 'branch': Copy A to A_COPY_2\n expected = svntest.verify.UnorderedOutput(\n [\"A \" + os.path.join(A_COPY_2_path, \"B\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"B\", \"lambda\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"B\", \"E\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"B\", \"E\", \"alpha\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"B\", \"E\", \"beta\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"B\", \"F\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"mu\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"C\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"gamma\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"G\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"G\", \"pi\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"G\", \"rho\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"G\", \"tau\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"H\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"H\", \"chi\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"H\", \"omega\") + \"\\n\",\n \"A \" + os.path.join(A_COPY_2_path, \"D\", \"H\", \"psi\") + \"\\n\",\n \"Checked out revision 6.\\n\",\n \"A \" + A_COPY_2_path + \"\\n\"])\n wc_status.add({\n \"A_COPY_2\" + \"/B\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/B/lambda\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/B/E\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/B/E/alpha\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/B/E/beta\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/B/F\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/mu\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/C\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/gamma\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/G\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/G/pi\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/G/rho\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/G/tau\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/H\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/H/chi\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/H/omega\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" + \"/D/H/psi\" : Item(status=' ', wc_rev=7),\n \"A_COPY_2\" : Item(status=' ', wc_rev=7),\n })\n wc_disk.add({\n \"A_COPY_2\" : Item(),\n \"A_COPY_2\" + '/B' : Item(),\n \"A_COPY_2\" + '/B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n \"A_COPY_2\" + '/B/E' : Item(),\n \"A_COPY_2\" + '/B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n \"A_COPY_2\" + '/B/E/beta' : Item(\"New content\"),\n \"A_COPY_2\" + '/B/F' : Item(),\n \"A_COPY_2\" + '/mu' : Item(\"This is the file 'mu'.\\n\"),\n \"A_COPY_2\" + '/C' : Item(),\n \"A_COPY_2\" + '/D' : Item(),\n \"A_COPY_2\" + '/D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n \"A_COPY_2\" + '/D/G' : Item(),\n \"A_COPY_2\" + '/D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n \"A_COPY_2\" + '/D/G/rho' : Item(\"New content\"),\n \"A_COPY_2\" + '/D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n \"A_COPY_2\" + '/D/H' : Item(),\n \"A_COPY_2\" + '/D/H/chi' : Item(\"New content\"),\n \"A_COPY_2\" + '/D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n \"A_COPY_2\" + '/D/H/psi' : Item(\"New content\"),\n })\n svntest.actions.run_and_verify_svn(expected, [], 'copy',\n sbox.repo_url + \"/A\",\n A_COPY_2_path)\n expected_output = wc.State(wc_dir, {\"A_COPY_2\" : Item(verb='Adding')})\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n wc_status)\n\n # r8: Make a text change under A, to A/D/H/chi.\n svntest.main.file_write(chi_path, \"New content\")\n expected_output = wc.State(wc_dir, {'A/D/H/chi' : Item(verb='Sending')})\n wc_status.tweak('A/D/H/chi', wc_rev=8)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n wc_disk.tweak('A/D/H/psi', contents=\"New content\")\n\n # r9: Merge all available revisions from A to A_COPY. But first\n # update working copy to allow full inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [],\n 'up', wc_dir)\n wc_status.tweak(wc_rev=8)\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U '),\n 'D/G/rho' : Item(status='U '),\n 'D/H/chi' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n 'D/H/omega': Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=8),\n 'B' : Item(status=' ', wc_rev=8),\n 'mu' : Item(status=' ', wc_rev=8),\n 'B/E' : Item(status=' ', wc_rev=8),\n 'B/E/alpha' : Item(status=' ', wc_rev=8),\n 'B/E/beta' : Item(status='M ', wc_rev=8),\n 'B/lambda' : Item(status=' ', wc_rev=8),\n 'B/F' : Item(status=' ', wc_rev=8),\n 'C' : Item(status=' ', wc_rev=8),\n 'D' : Item(status=' ', wc_rev=8),\n 'D/G' : Item(status=' ', wc_rev=8),\n 'D/G/pi' : Item(status=' ', wc_rev=8),\n 'D/G/rho' : Item(status='M ', wc_rev=8),\n 'D/G/tau' : Item(status=' ', wc_rev=8),\n 'D/gamma' : Item(status=' ', wc_rev=8),\n 'D/H' : Item(status=' ', wc_rev=8),\n 'D/H/chi' : Item(status='M ', wc_rev=8),\n 'D/H/psi' : Item(status='M ', wc_rev=8),\n 'D/H/omega' : Item(status='M ', wc_rev=8),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-8'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"New content\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n wc_status.tweak('A_COPY',\n 'A_COPY/B/E/beta',\n 'A_COPY/D/G/rho',\n 'A_COPY/D/H/chi',\n 'A_COPY/D/H/psi',\n 'A_COPY/D/H/omega',\n wc_rev=9)\n expected_output = wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D/G/rho' : Item(verb='Sending'),\n 'A_COPY/D/H/chi' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # Again update the working copy to allow full inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [],\n 'up', wc_dir)\n wc_status.tweak(wc_rev=9)\n\n # Merge all available revisions from A_COPY to A_COPY_2. The mergeinfo on\n # A_COPY_2 should reflect both the merge of revisions 2-9 from A_COPY *and*\n # revisions 7-8 from A. Reivisions 2-6 from A should not be part of the\n # explicit mergeinfo on A_COPY_2 as they are already part of its natural\n # history.\n expected_output = wc.State(A_COPY_2_path, {\n '' : Item(status=' U'),\n 'D/H/chi' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_2_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_2_path, {\n })\n expected_status = wc.State(A_COPY_2_path, {\n '' : Item(status=' M', wc_rev=9),\n 'B' : Item(status=' ', wc_rev=9),\n 'mu' : Item(status=' ', wc_rev=9),\n 'B/E' : Item(status=' ', wc_rev=9),\n 'B/E/alpha' : Item(status=' ', wc_rev=9),\n 'B/E/beta' : Item(status=' ', wc_rev=9),\n 'B/lambda' : Item(status=' ', wc_rev=9),\n 'B/F' : Item(status=' ', wc_rev=9),\n 'C' : Item(status=' ', wc_rev=9),\n 'D' : Item(status=' ', wc_rev=9),\n 'D/G' : Item(status=' ', wc_rev=9),\n 'D/G/pi' : Item(status=' ', wc_rev=9),\n 'D/G/rho' : Item(status=' ', wc_rev=9),\n 'D/G/tau' : Item(status=' ', wc_rev=9),\n 'D/gamma' : Item(status=' ', wc_rev=9),\n 'D/H' : Item(status=' ', wc_rev=9),\n 'D/H/chi' : Item(status='M ', wc_rev=9),\n 'D/H/psi' : Item(status=' ', wc_rev=9),\n 'D/H/omega' : Item(status=' ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:7-8\\n/A_COPY:2-9'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"New content\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_2_path, None, None,\n sbox.repo_url + '/A_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(3067)\ndef subtree_gets_changes_even_if_ultimately_deleted(sbox):\n \"subtree gets changes even if ultimately deleted\"\n\n # merge_tests.py 101 'merge tries to delete a file of identical content'\n # demonstrates how a file can be deleted by a merge if the file is identical\n # to the file deleted in the merge source. If the file differs then it\n # should be 'skipped' as a tree-conflict. But suppose the file has\n # mergeinfo such that the requested merge should bring the file into a state\n # identical to the deleted source *before* attempting to delete it. Then the\n # file should get those changes first and then be deleted rather than skipped.\n #\n # This problem, as discussed here,\n # http://subversion.tigris.org/servlets/ReadMsg?listName=dev&msgNo=141533,\n # is only nominally a tree conflict issue. More accurately this is yet\n # another issue #3067 problem, in that the merge target has a subtree which\n # doesn't exist in part of the requested merge range.\n\n # r1: Create a greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n psi_path = sbox.ospath('A/D/H/psi')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n\n # r2 - r6: Copy A to A_COPY and then make some text changes under A.\n set_up_branch(sbox)\n\n # r7: Make an additional text mod to A/D/H/psi.\n svntest.main.file_write(psi_path, \"Even newer content\")\n sbox.simple_commit(message='mod psi')\n\n # r8: Delete A/D/H/psi.\n svntest.actions.run_and_verify_svn(None, [],\n 'delete', psi_path)\n sbox.simple_commit(message='delete psi')\n\n # Update WC before merging so mergeinfo elision and inheritance\n # occur smoothly.\n svntest.main.run_svn(None, 'up', wc_dir)\n\n # r9: Merge r3,7 from A/D/H to A_COPY/D/H, then reverse merge r7 from\n # A/D/H/psi to A_COPY/D/H/psi.\n expected_output = wc.State(H_COPY_path, {\n 'psi' : Item(status='G ', prev_status='U '), # Touched twice\n })\n expected_mergeinfo_output = wc.State(H_COPY_path, {\n '' : Item(status=' G', prev_status=' U'),\n })\n expected_elision_output = wc.State(H_COPY_path, {\n })\n expected_status = wc.State(H_COPY_path, {\n '' : Item(status=' M', wc_rev=8),\n 'psi' : Item(status='M ', wc_rev=8),\n 'omega' : Item(status=' ', wc_rev=8),\n 'chi' : Item(status=' ', wc_rev=8),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:3,7'}),\n 'psi' : Item(\"Even newer content\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(H_COPY_path, { })\n\n svntest.actions.run_and_verify_merge(H_COPY_path, None, None,\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, False,\n '-c3,7', H_COPY_path)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[-7]],\n ['G ' + psi_COPY_path + '\\n',\n ' G ' + psi_COPY_path + '\\n',]),\n [], 'merge', '-c-7', sbox.repo_url + '/A/D/H/psi@7', psi_COPY_path)\n sbox.simple_commit(message='merge -c3,7 from A/D/H,' \\\n 'reverse merge -c-7 from A/D/H/psi')\n\n # Merge all available revisions from A/D/H to A_COPY/D/H. This merge\n # ultimately tries to delete A_COPY/D/H/psi, but first it should merge\n # r7 to A_COPY/D/H/psi, since that is one of the available revisions.\n # Then when merging the deletion of A_COPY/D/H/psi in r8 the file will\n # be identical to the deleted source A/D/H/psi and the deletion will\n # succeed.\n #\n # Update WC before merging so mergeinfo elision and inheritance\n # occur smoothly.\n svntest.main.run_svn(None, 'up', wc_dir)\n expected_output = wc.State(H_COPY_path, {\n 'omega' : Item(status='U '),\n 'psi' : Item(status='D ', prev_status='U '),\n })\n expected_mergeinfo_output = wc.State(H_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(H_COPY_path, {\n })\n expected_status = wc.State(H_COPY_path, {\n '' : Item(status=' M', wc_rev=9),\n 'psi' : Item(status='D ', wc_rev=9),\n 'omega' : Item(status='M ', wc_rev=9),\n 'chi' : Item(status=' ', wc_rev=9),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:2-9'}),\n 'omega' : Item(\"New content\"),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n })\n expected_skip = wc.State(H_COPY_path, { })\n\n svntest.actions.run_and_verify_merge(H_COPY_path, None, None,\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status, expected_skip,\n [], True, False)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef no_self_referential_filtering_on_added_path(sbox):\n \"no self referential filtering on added path\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n C_COPY_path = sbox.ospath('A_COPY/C')\n A_path = sbox.ospath('A')\n C_path = sbox.ospath('A/C')\n A_COPY_2_path = sbox.ospath('A_COPY_2')\n\n # r1-r7: Setup a 'trunk' and two 'branches'.\n wc_disk, wc_status = set_up_branch(sbox, False, 2)\n\n # r8: Make a prop change on A_COPY/C.\n svntest.actions.run_and_verify_svn([\"property 'propname' set on '\" +\n C_COPY_path + \"'\\n\"], [],\n 'ps', 'propname', 'propval',\n C_COPY_path)\n expected_output = svntest.wc.State(wc_dir,\n {'A_COPY/C' : Item(verb='Sending')})\n wc_status.tweak('A_COPY/C', wc_rev=8)\n wc_disk.tweak(\"A_COPY/C\",\n props={'propname' : 'propval'})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # r9: Merge r8 from A_COPY to A.\n #\n # Update first to avoid an out of date error.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [], 'up',\n wc_dir)\n wc_status.tweak(wc_rev=8)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[8]],\n [' U ' + C_path + '\\n',\n ' U ' + A_path + '\\n',]),\n [], 'merge', '-c8', sbox.repo_url + '/A_COPY', A_path)\n expected_output = svntest.wc.State(wc_dir,\n {'A' : Item(verb='Sending'),\n 'A/C' : Item(verb='Sending')})\n wc_status.tweak('A', 'A/C', wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n wc_disk.tweak(\"A/C\",\n props={'propname' : 'propval'})\n wc_disk.tweak(\"A\",\n props={SVN_PROP_MERGEINFO : '/A_COPY:8'})\n\n # r10: Move A/C to A/C_MOVED.\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 10.\\n'],\n [], 'move',\n sbox.repo_url + '/A/C',\n sbox.repo_url + '/A/C_MOVED',\n '-m', 'Copy A/C to A/C_MOVED')\n svntest.actions.run_and_verify_svn(None, [], 'up',\n wc_dir)\n\n # Now try to merge all available revisions from A to A_COPY_2.\n # This should try to add the directory A_COPY_2/C_MOVED which has\n # explicit mergeinfo. This should not break self-referential mergeinfo\n # filtering logic...in fact there is no reason to even attempt such\n # filtering since the file is *new*.\n\n expected_output = wc.State(A_COPY_2_path, {\n '' : Item(status=' U'),\n 'B/E/beta' : Item(status='U '),\n 'D/G/rho' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n 'D/H/omega' : Item(status='U '),\n 'C' : Item(status='D '),\n 'C_MOVED' : Item(status='A '),\n })\n # Why is C_MOVED notified as ' G' rather than ' U'? C_MOVED was\n # added by the merge and there is only a single editor drive, so\n # how can any prop changes be merged to it? The answer is that\n # the merge code does some quiet housekeeping, merging C_MOVED's\n # inherited mergeinfo into its incoming mergeinfo, see\n # https://issues.apache.org/jira/browse/SVN-4309\n # This test is not covering issue #4309 so we let the current\n # behavior pass.\n expected_mergeinfo_output = wc.State(A_COPY_2_path, {\n '' : Item(status=' G'),\n 'C_MOVED' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_2_path, {\n })\n expected_A_COPY_2_status = wc.State(A_COPY_2_path, {\n '' : Item(status=' M', wc_rev=10),\n 'B' : Item(status=' ', wc_rev=10),\n 'mu' : Item(status=' ', wc_rev=10),\n 'B/E' : Item(status=' ', wc_rev=10),\n 'B/E/alpha' : Item(status=' ', wc_rev=10),\n 'B/E/beta' : Item(status='M ', wc_rev=10),\n 'B/lambda' : Item(status=' ', wc_rev=10),\n 'B/F' : Item(status=' ', wc_rev=10),\n 'C' : Item(status='D ', wc_rev=10),\n 'C_MOVED' : Item(status='A ', wc_rev='-', copied='+'),\n 'D' : Item(status=' ', wc_rev=10),\n 'D/G' : Item(status=' ', wc_rev=10),\n 'D/G/pi' : Item(status=' ', wc_rev=10),\n 'D/G/rho' : Item(status='M ', wc_rev=10),\n 'D/G/tau' : Item(status=' ', wc_rev=10),\n 'D/gamma' : Item(status=' ', wc_rev=10),\n 'D/H' : Item(status=' ', wc_rev=10),\n 'D/H/chi' : Item(status=' ', wc_rev=10),\n 'D/H/psi' : Item(status='M ', wc_rev=10),\n 'D/H/omega' : Item(status='M ', wc_rev=10),\n })\n expected_A_COPY_2_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3-10\\n/A_COPY:8'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n # What's up with the mergeinfo\n 'C_MOVED' : Item(props={SVN_PROP_MERGEINFO : '/A/C_MOVED:10\\n' +\n '/A_COPY/C:8\\n' +\n '/A_COPY/C_MOVED:8',\n 'propname' : 'propval'}),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_A_COPY_2_skip = wc.State(A_COPY_2_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_2_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_COPY_2_disk,\n expected_A_COPY_2_status,\n expected_A_COPY_2_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for issue #3324\n# https://issues.apache.org/jira/browse/SVN-3324\n@Issue(3324)\n@SkipUnless(server_has_mergeinfo)\ndef merge_range_prior_to_rename_source_existence(sbox):\n \"merge prior to rename src existence still dels src\"\n\n # Replicate a merge bug found while synching up a feature branch on the\n # Subversion repository with trunk. See r874121 of\n # http://svn.apache.org/repos/asf/subversion/branches/ignore-mergeinfo, in which\n # a move was merged to the target, but the delete half of the move\n # didn't occur.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n nu_path = sbox.ospath('A/D/H/nu')\n nu_moved_path = sbox.ospath('A/D/H/nu_moved')\n A_path = sbox.ospath('A')\n alpha_path = sbox.ospath('A/B/E/alpha')\n A_COPY_path = sbox.ospath('A_COPY')\n A_COPY_2_path = sbox.ospath('A_COPY_2')\n B_COPY_path = sbox.ospath('A_COPY/B')\n B_COPY_2_path = sbox.ospath('A_COPY_2/B')\n alpha_COPY_path = sbox.ospath('A_COPY/B/E/alpha')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n gamma_COPY_path = sbox.ospath('A_COPY/D/gamma')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n nu_COPY_path = sbox.ospath('A_COPY/D/H/nu')\n\n # Setup our basic 'trunk' and 'branch':\n # r2 - Copy A to A_COPY\n # r3 - Copy A to A_COPY_2\n # r4 - Text change to A/D/H/psi\n # r5 - Text change to A/D/G/rho\n # r6 - Text change to A/B/E/beta\n # r7 - Text change to A/D/H/omega\n wc_disk, wc_status = set_up_branch(sbox, False, 2)\n\n # r8 - Text change to A/B/E/alpha\n svntest.main.file_write(alpha_path, \"New content\")\n wc_status.tweak('A/B/E/alpha', wc_rev=8)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Text change', wc_dir)\n\n # r9 - Add the file A/D/H/nu and make another change to A/B/E/alpha.\n svntest.main.file_write(alpha_path, \"Even newer content\")\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n expected_output = wc.State(wc_dir,\n {'A/D/H/nu' : Item(verb='Adding'),\n 'A/B/E/alpha' : Item(verb='Sending')})\n wc_status.add({'A/D/H/nu' : Item(status=' ', wc_rev=9)})\n wc_status.tweak('A/B/E/alpha', wc_rev=9)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r10 - Merge all available revisions (i.e. -r1:9) from A to A_COPY.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [], 'up',\n wc_dir)\n wc_status.tweak(wc_rev=9)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[2,9]],\n ['A ' + nu_COPY_path + '\\n',\n 'U ' + alpha_COPY_path + '\\n',\n 'U ' + beta_COPY_path + '\\n',\n 'U ' + rho_COPY_path + '\\n',\n 'U ' + omega_COPY_path + '\\n',\n 'U ' + psi_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',]),\n [], 'merge', sbox.repo_url + '/A', A_COPY_path)\n expected_output = wc.State(wc_dir,\n {'A_COPY' : Item(verb='Sending'),\n 'A_COPY/D/H/nu' : Item(verb='Adding'),\n 'A_COPY/B/E/alpha' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending'),\n 'A_COPY/D/G/rho' : Item(verb='Sending'),\n 'A_COPY/D/H/omega' : Item(verb='Sending'),\n 'A_COPY/D/H/psi' : Item(verb='Sending')})\n wc_status.tweak('A_COPY',\n 'A_COPY/B/E/alpha',\n 'A_COPY/B/E/beta',\n 'A_COPY/D/G/rho',\n 'A_COPY/D/H/omega',\n 'A_COPY/D/H/psi',\n wc_rev=10)\n wc_status.add({'A_COPY/D/H/nu' : Item(status=' ', wc_rev=10)})\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r11 - Reverse merge -r9:1 from A/B to A_COPY/B\n svntest.actions.run_and_verify_svn(exp_noop_up_out(10), [], 'up',\n wc_dir)\n wc_status.tweak(wc_rev=10)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[9,2]], ['U ' + alpha_COPY_path + '\\n',\n 'U ' + beta_COPY_path + '\\n',\n ' G ' + B_COPY_path + '\\n',]),\n [], 'merge', sbox.repo_url + '/A/B', B_COPY_path, '-r9:1')\n expected_output = wc.State(wc_dir,\n {'A_COPY/B' : Item(verb='Sending'),\n 'A_COPY/B/E/alpha' : Item(verb='Sending'),\n 'A_COPY/B/E/beta' : Item(verb='Sending')})\n wc_status.tweak('A_COPY/B',\n 'A_COPY/B/E/alpha',\n 'A_COPY/B/E/beta',\n wc_rev=11)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n\n # r12 - Move A/D/H/nu to A/D/H/nu_moved\n svntest.actions.run_and_verify_svn([\"Committing transaction...\\n\",\n \"Committed revision 12.\\n\"], [],\n 'move', sbox.repo_url + '/A/D/H/nu',\n sbox.repo_url + '/A/D/H/nu_moved',\n '-m', 'Move nu to nu_moved')\n expected_output = svntest.verify.UnorderedOutput(\n [\"Updating '%s':\\n\" % (wc_dir),\n \"D \" + nu_path + \"\\n\",\n \"A \" + nu_moved_path + \"\\n\",\n \"Updated to revision 12.\\n\"],\n )\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'up', wc_dir)\n\n # Now merge -r7:12 from A to A_COPY.\n # A_COPY needs only -r10:12, which amounts to the rename of nu.\n # The subtree A_COPY/B needs the entire range -r7:12 because of\n # the reverse merge we performed in r11; the only operative change\n # here is the text mod to alpha made in r9.\n #\n # This merge previously failed because the delete half of the A_COPY/D/H/nu\n # to A_COPY/D/H/nu_moved move was reported in the notifications, but didn't\n # actually happen.\n expected_output = wc.State(A_COPY_path, {\n 'B/E/alpha' : Item(status='U '),\n 'D/H/nu' : Item(status='D '),\n 'D/H/nu_moved' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'B' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=12),\n 'B' : Item(status=' M', wc_rev=12),\n 'mu' : Item(status=' ', wc_rev=12),\n 'B/E' : Item(status=' ', wc_rev=12),\n 'B/E/alpha' : Item(status='M ', wc_rev=12),\n 'B/E/beta' : Item(status=' ', wc_rev=12),\n 'B/lambda' : Item(status=' ', wc_rev=12),\n 'B/F' : Item(status=' ', wc_rev=12),\n 'C' : Item(status=' ', wc_rev=12),\n 'D' : Item(status=' ', wc_rev=12),\n 'D/G' : Item(status=' ', wc_rev=12),\n 'D/G/pi' : Item(status=' ', wc_rev=12),\n 'D/G/rho' : Item(status=' ', wc_rev=12),\n 'D/G/tau' : Item(status=' ', wc_rev=12),\n 'D/gamma' : Item(status=' ', wc_rev=12),\n 'D/H' : Item(status=' ', wc_rev=12),\n 'D/H/nu' : Item(status='D ', wc_rev=12),\n 'D/H/nu_moved' : Item(status='A ', wc_rev='-', copied='+'),\n 'D/H/chi' : Item(status=' ', wc_rev=12),\n 'D/H/psi' : Item(status=' ', wc_rev=12),\n 'D/H/omega' : Item(status=' ', wc_rev=12),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-12'}),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B' : Item(props={SVN_PROP_MERGEINFO : '/A/B:8-12'}),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"Even newer content\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/nu_moved' : Item(\"This is the file 'nu'.\\n\"),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, {})\n svntest.actions.run_and_verify_merge(A_COPY_path, 7, 12,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Merge -r7:12 from A to A_COPY', wc_dir)\n\n # Now run a similar scenario as above on the second branch, but with\n # a reverse merge this time.\n #\n # r14 - Merge all available revisions from A/B to A_COPY_B and then merge\n # -r2:9 from A to A_COPY_2. Among other things, this adds A_COPY_2/D/H/nu\n # and leaves us with mergeinfo on the A_COPY_2 branch of:\n #\n # Properties on 'A_COPY_2':\n # svn:mergeinfo\n # /A:3-9\n # Properties on 'A_COPY_2\\B':\n # svn:mergeinfo\n # /A/B:3-13\n svntest.actions.run_and_verify_svn(exp_noop_up_out(13), [], 'up',\n wc_dir)\n svntest.actions.run_and_verify_svn(None, # Don't check stdout, we test this\n # type of merge to death elsewhere.\n [], 'merge', sbox.repo_url + '/A/B',\n B_COPY_2_path)\n svntest.actions.run_and_verify_svn(None,[], 'merge', '-r', '2:9',\n sbox.repo_url + '/A', A_COPY_2_path)\n svntest.actions.run_and_verify_svn(\n None, [], 'ci', '-m',\n 'Merge all from A/B to A_COPY_2/B\\nMerge -r2:9 from A to A_COPY_2',\n wc_dir)\n svntest.actions.run_and_verify_svn(exp_noop_up_out(14), [], 'up',\n wc_dir)\n\n # Now reverse merge -r13:7 from A to A_COPY_2.\n #\n # Recall:\n #\n # >svn log -r8:13 ^/A -v\n # ------------------------------------------------------------------------\n # r8 | jrandom | 2010-10-14 11:25:59 -0400 (Thu, 14 Oct 2010) | 1 line\n # Changed paths:\n # M /A/B/E/alpha\n #\n # Text change\n # ------------------------------------------------------------------------\n # r9 | jrandom | 2010-10-14 11:25:59 -0400 (Thu, 14 Oct 2010) | 1 line\n # Changed paths:\n # M /A/B/E/alpha\n # A /A/D/H/nu\n #\n # log msg\n # ------------------------------------------------------------------------\n # r12 | jrandom | 2010-10-14 11:26:01 -0400 (Thu, 14 Oct 2010) | 1 line\n # Changed paths:\n # D /A/D/H/nu\n # A /A/D/H/nu_moved (from /A/D/H/nu:11)\n #\n # Move nu to nu_moved\n # ------------------------------------------------------------------------\n #\n # We can only reverse merge changes from the explicit mergeinfo or\n # natural history of a target, but since all of these changes intersect with\n # the target's explicit mergeinfo (including subtrees), all should be\n # reverse merged, including the deletion of A_COPY/D/H/nu. Like the forward\n # merge performed earlier, this test previously failed when A_COPY/D/H/nu\n # was reported as deleted, but still remained as a versioned item in the WC.\n expected_output = wc.State(A_COPY_2_path, {\n 'B/E/alpha' : Item(status='U '),\n 'D/H/nu' : Item(status='D '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_2_path, {\n '' : Item(status=' U'),\n 'B' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_2_path, {\n 'B' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_2_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' M'),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status='M '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/nu' : Item(status='D '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=14)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3-7'}),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B' : Item(),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, {})\n svntest.actions.run_and_verify_merge(A_COPY_2_path, 13, 7,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n#----------------------------------------------------------------------\ndef set_up_natural_history_gap(sbox):\n '''Starting with standard greek tree, do the following:\n r2 - A/D/H/psi\n r3 - A/D/G/rho\n r4 - A/B/E/beta\n r5 - A/D/H/omega\n r6 - Delete A\n r7 - \"Resurrect\" A, by copying A@2 to A\n r8 - Copy A to A_COPY\n r9 - Text mod to A/D/gamma\n Lastly it updates the WC to r9.\n All text mods set file contents to \"New content\".\n Return (expected_disk, expected_status).'''\n\n # r1: Create a standard greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # r2-5: Make some changes under 'A' (no branches yet).\n wc_disk, wc_status = set_up_branch(sbox, False, 0)\n\n # Some paths we'll care about.\n A_COPY_path = sbox.ospath('A_COPY')\n gamma_path = sbox.ospath('A/D/gamma')\n\n # r6: Delete 'A'\n exit_code, out, err = svntest.actions.run_and_verify_svn(\n [\"Committing transaction...\\n\",\n \"Committed revision 6.\\n\"], [],\n 'delete', sbox.repo_url + '/A', '-m', 'Delete A')\n\n # r7: Resurrect 'A' by copying 'A@2' to 'A'.\n exit_code, out, err = svntest.actions.run_and_verify_svn(\n [\"Committing transaction...\\n\",\n \"Committed revision 7.\\n\"], [],\n 'copy', sbox.repo_url + '/A@2', sbox.repo_url + '/A',\n '-m', 'Resurrect A from A@2')\n\n # r8: Branch the resurrected 'A' to 'A_COPY'.\n exit_code, out, err = svntest.actions.run_and_verify_svn(\n [\"Committing transaction...\\n\",\n \"Committed revision 8.\\n\"], [],\n 'copy', sbox.repo_url + '/A', sbox.repo_url + '/A_COPY',\n '-m', 'Copy A to A_COPY')\n\n # Update to bring all the repos side changes down.\n exit_code, out, err = svntest.actions.run_and_verify_svn(None, [],\n 'up', wc_dir)\n wc_status.add({\n \"A_COPY/B\" : Item(status=' '),\n \"A_COPY/B/lambda\" : Item(status=' '),\n \"A_COPY/B/E\" : Item(status=' '),\n \"A_COPY/B/E/alpha\" : Item(status=' '),\n \"A_COPY/B/E/beta\" : Item(status=' '),\n \"A_COPY/B/F\" : Item(status=' '),\n \"A_COPY/mu\" : Item(status=' '),\n \"A_COPY/C\" : Item(status=' '),\n \"A_COPY/D\" : Item(status=' '),\n \"A_COPY/D/gamma\" : Item(status=' '),\n \"A_COPY/D/G\" : Item(status=' '),\n \"A_COPY/D/G/pi\" : Item(status=' '),\n \"A_COPY/D/G/rho\" : Item(status=' '),\n \"A_COPY/D/G/tau\" : Item(status=' '),\n \"A_COPY/D/H\" : Item(status=' '),\n \"A_COPY/D/H/chi\" : Item(status=' '),\n \"A_COPY/D/H/omega\" : Item(status=' '),\n \"A_COPY/D/H/psi\" : Item(status=' '),\n \"A_COPY\" : Item(status=' ')})\n wc_status.tweak(wc_rev=8)\n\n # r9: Make a text change to 'A/D/gamma'.\n svntest.main.file_write(gamma_path, \"New content\")\n expected_output = wc.State(wc_dir, {'A/D/gamma' : Item(verb='Sending')})\n wc_status.tweak('A/D/gamma', wc_rev=9)\n\n # Update the WC to a uniform revision.\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n wc_status)\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [],\n 'up', wc_dir)\n return wc_disk, wc_status\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef dont_merge_gaps_in_history(sbox):\n \"mergeinfo aware merges ignore natural history gaps\"\n\n ## See http://svn.haxx.se/dev/archive-2008-11/0618.shtml ##\n\n wc_dir = sbox.wc_dir\n\n # Create a branch with gaps in its natural history.\n set_up_natural_history_gap(sbox)\n\n # Some paths we'll care about.\n A_COPY_path = sbox.ospath('A_COPY')\n\n # Now merge all available changes from 'A' to 'A_COPY'. The only\n # available revisions are r8 and r9. Only r9 effects the source/target\n # so this merge should change 'A/D/gamma' from r9. The fact that 'A_COPY'\n # has 'broken' natural history, i.e.\n #\n # /A:2,7 <-- Recall 'A@7' was copied from 'A@2'.\n # /A_COPY:8-9\n #\n # should have no impact, but currently this fact is causing a failure:\n #\n # >svn merge %url127%/A merge_tests-127\\A_COPY\n # ..\\..\\..\\subversion\\libsvn_repos\\reporter.c:1162: (apr_err=160005)\n # svn: Target path '/A' does not exist.\n expected_output = wc.State(A_COPY_path, {\n 'D/gamma' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status='M '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=9)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:8-9'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"New content\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for issue #3432 'Merge can record mergeinfo from natural history\n# gaps'. See https://issues.apache.org/jira/browse/SVN-3432\n@Issue(3432)\n@SkipUnless(server_has_mergeinfo)\ndef handle_gaps_in_implicit_mergeinfo(sbox):\n \"correctly consider natural history gaps\"\n\n wc_dir = sbox.wc_dir\n\n # Create a branch with gaps in its natural history.\n #\n # r1--------r2--------r3--------r4--------r5--------r6\n # Add 'A' edit edit edit edit Delete A\n # psi rho beta omega\n # |\n # V\n # r7--------r9----------------->\n # Rez 'A' edit\n # | gamma\n # |\n # V\n # r8--------------------------->\n # Copy 'A@7' to\n # 'A_COPY'\n #\n expected_disk, expected_status = set_up_natural_history_gap(sbox)\n\n # Some paths we'll care about.\n A_COPY_path = sbox.ospath('A_COPY')\n\n # Merge r4 to 'A_COPY' from A@4, which is *not* part of A_COPY's history.\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status='M '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=9)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:4'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"), # From the merge of A@4\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"), # From A@2\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, 3, 4,\n sbox.repo_url + '/A@4', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Now reverse merge -r9:2 from 'A@HEAD' to 'A_COPY'. This should be\n # a no-op since the only operative change made on 'A@HEAD' between r2:9\n # is the text mod to 'A/D/gamma' made in r9, but since that was after\n # 'A_COPY' was copied from 'A 'and that change was never merged, we don't\n # try to reverse merge it.\n #\n # Also, the mergeinfo recorded by the previous merge, i.e. '/A:4', should\n # *not* be removed! A@4 is not on the same line of history as 'A@9'.\n expected_output = wc.State(A_COPY_path, {})\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' G'),\n })\n svntest.actions.run_and_verify_merge(A_COPY_path, 9, 2,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Now merge all available revisions from 'A' to 'A_COPY'.\n # The mergeinfo '/A:4' on 'A_COPY' should have no impact on this merge\n # since it refers to another line of history. Since 'A_COPY' was copied\n # from 'A@7' the only available revisions are r8 and r9.\n expected_output = wc.State(A_COPY_path, {\n 'D/gamma' : Item(status='U '),\n })\n expected_status.tweak('D/gamma', status='M ')\n expected_disk.tweak('D/gamma', contents='New content')\n expected_disk.tweak('', props={SVN_PROP_MERGEINFO : '/A:4,8-9'})\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for issue #3323 'Mergeinfo deleted by a merge should disappear'\n@Issue(3323)\n@SkipUnless(server_has_mergeinfo)\ndef mergeinfo_deleted_by_a_merge_should_disappear(sbox):\n \"mergeinfo deleted by a merge should disappear\"\n\n\n # r1: Create a greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n D_COPY_path = sbox.ospath('A_COPY/D')\n A_COPY_path = sbox.ospath('A_COPY')\n A_COPY_2_path = sbox.ospath('A_COPY_2')\n\n # r2 - r6: Copy A to A_COPY and then make some text changes under A.\n wc_disk, wc_status = set_up_branch(sbox)\n\n # r7: Merge all available revisions from A/D to A_COPY/D, this creates\n # mergeinfo on A_COPY/D.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, # Don't check stdout, we test this\n # type of merge to death elsewhere.\n [], 'merge', sbox.repo_url + '/A/D',\n D_COPY_path)\n svntest.actions.run_and_verify_svn(\n None, [], 'ci', '-m',\n 'Merge all available revisions from A/D to A_COPY/D', wc_dir)\n\n # r8: Copy A_COPY to A_COPY_2, this carries the mergeinf on A_COPY/D\n # to A_COPY_2/D.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None,[],\n 'copy', A_COPY_path, A_COPY_2_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Copy A_COPY to A_COPY_2', wc_dir)\n\n # r9: Propdel the mergeinfo on A_COPY/D.\n svntest.actions.run_and_verify_svn(None,[],\n 'pd', SVN_PROP_MERGEINFO, D_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Propdel the mergeinfo on A_COPY/D',\n wc_dir)\n\n # r10: Merge r5 from A to A_COPY_2 so the latter gets some explicit\n # mergeinfo.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'merge', '-c5',\n sbox.repo_url + '/A', A_COPY_2_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Merge r5 from A to A_COPY_2', wc_dir)\n\n # Now merge r9 from A_COPY to A_COPY_2. Since the merge itself cleanly\n # removes all explicit mergeinfo from A_COPY_2/D, we should not set any\n # mergeinfo on that subtree describing the merge.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(A_COPY_2_path, {\n 'D' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(A_COPY_2_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_2_path, {\n })\n expected_status = wc.State(A_COPY_2_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' M'),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=10)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:5\\n/A_COPY:9'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_2_path, '8', '9',\n sbox.repo_url + '/A_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# File merge optimization caused segfault during noop file merge\n# when multiple ranges are eligible for merge, see\n# http://svn.haxx.se/dev/archive-2009-05/0363.shtml\n@SkipUnless(server_has_mergeinfo)\ndef noop_file_merge(sbox):\n \"noop file merge does not segfault\"\n\n # r1: Create a greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n chi_COPY_path = sbox.ospath('A_COPY/D/H/chi')\n\n # r2 - r6: Copy A to A_COPY and then make some text changes under A.\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Merge r5 from A to A_COPY and commit as r7. This will split the\n # eligible ranges to be merged to A_COPY/D/H/chi into two discrete\n # sets: r1-4 and r5-HEAD\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5]],\n ['U ' + beta_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',]),\n [], 'merge', '-c5', sbox.repo_url + '/A', A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'commit', '-m',\n 'Merge r5 from A to A_COPY',\n wc_dir)\n\n # Update working copy to allow full inheritance and elision.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(7), [],\n 'up', wc_dir)\n\n # Merge all available revisions from A/D/H/chi to A_COPY/D/H/chi.\n # There are no operative changes in the source, so this should\n # not produce any output other than mergeinfo updates on\n # A_COPY/D/H/chi. This is where the segfault occurred.\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A/D/H/chi',\n chi_COPY_path)\n svntest.actions.run_and_verify_svn([' M ' + chi_COPY_path + '\\n'],\n [], 'st', chi_COPY_path)\n svntest.actions.run_and_verify_svn(['/A/D/H/chi:2-7\\n'],\n [], 'pg', SVN_PROP_MERGEINFO,\n chi_COPY_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(2690)\ndef copy_then_replace_via_merge(sbox):\n \"copy then replace via merge\"\n # Testing issue #2690 with deleted/added/replaced files and subdirs.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n j = os.path.join\n\n A = j(wc_dir, 'A')\n AJ = j(wc_dir, 'A', 'J')\n AJK = j(AJ, 'K')\n AJL = j(AJ, 'L')\n AJM = j(AJ, 'M')\n AJ_sigma = j(AJ, 'sigma')\n AJ_theta = j(AJ, 'theta')\n AJ_omega = j(AJ, 'omega')\n AJK_zeta = j(AJK, 'zeta')\n AJL_zeta = j(AJL, 'zeta')\n AJM_zeta = j(AJM, 'zeta')\n branch = j(wc_dir, 'branch')\n branch_J = j(wc_dir, 'branch', 'J')\n url_A = sbox.repo_url + '/A'\n url_branch = sbox.repo_url + '/branch'\n\n # Create a branch.\n main.run_svn(None, 'cp', url_A, url_branch, '-m', 'create branch') # r2\n\n # Create a tree J in A.\n os.makedirs(AJK)\n os.makedirs(AJL)\n main.file_append(AJ_sigma, 'new text')\n main.file_append(AJ_theta, 'new text')\n main.file_append(AJK_zeta, 'new text')\n main.file_append(AJL_zeta, 'new text')\n main.run_svn(None, 'add', AJ)\n sbox.simple_commit(message='create tree J') # r3\n main.run_svn(None, 'up', wc_dir)\n\n # Copy J to the branch via merge\n main.run_svn(None, 'merge', url_A, branch)\n sbox.simple_commit(message='merge to branch') # r4\n main.run_svn(None, 'up', wc_dir)\n\n # In A, replace J with a slightly different tree\n main.run_svn(None, 'rm', AJ)\n sbox.simple_commit(message='rm AJ') # r5\n main.run_svn(None, 'up', wc_dir)\n\n os.makedirs(AJL)\n os.makedirs(AJM)\n main.file_append(AJ_theta, 'really new text')\n main.file_append(AJ_omega, 'really new text')\n main.file_append(AJL_zeta, 'really new text')\n main.file_append(AJM_zeta, 'really new text')\n main.run_svn(None, 'add', AJ)\n sbox.simple_commit(message='create tree J again') # r6\n main.run_svn(None, 'up', wc_dir)\n\n # Run merge to replace /branch/J in one swell foop.\n main.run_svn(None, 'merge', url_A, branch)\n\n # Check status:\n # sigma and K are deleted (not copied!)\n # theta and L are replaced (deleted then copied-here)\n # omega and M are copied-here\n expected_status = wc.State(branch_J, {\n '' : Item(status='R ', copied='+', wc_rev='-'),\n 'sigma' : Item(status='D ', wc_rev=6),\n 'K' : Item(status='D ', wc_rev=6),\n 'K/zeta' : Item(status='D ', wc_rev=6),\n 'theta' : Item(status=' ', copied='+', wc_rev='-'),\n 'L' : Item(status=' ', copied='+', wc_rev='-'),\n 'L/zeta' : Item(status=' ', copied='+', wc_rev='-'),\n 'omega' : Item(status=' ', copied='+', wc_rev='-'),\n 'M' : Item(status=' ', copied='+', wc_rev='-'),\n 'M/zeta' : Item(status=' ', copied='+', wc_rev='-'),\n })\n actions.run_and_verify_status(branch_J, expected_status)\n\n # Update and commit, just to make sure the WC isn't busted.\n main.run_svn(None, 'up', branch_J)\n expected_output = wc.State(branch_J, {\n '' : Item(verb='Replacing'),\n })\n expected_status = wc.State(branch_J, {\n '' : Item(status=' ', wc_rev=7),\n 'theta' : Item(status=' ', wc_rev=7),\n 'L' : Item(status=' ', wc_rev=7),\n 'L/zeta' : Item(status=' ', wc_rev=7),\n 'omega' : Item(status=' ', wc_rev=7),\n 'M' : Item(status=' ', wc_rev=7),\n 'M/zeta' : Item(status=' ', wc_rev=7),\n })\n actions.run_and_verify_commit(branch_J,\n expected_output,\n expected_status)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef record_only_merge(sbox):\n \"record only merge applies mergeinfo diffs\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n nu_path = sbox.ospath('A/C/nu')\n A_COPY_path = sbox.ospath('A_COPY')\n A2_path = sbox.ospath('A2')\n Z_path = sbox.ospath('A/B/Z')\n Z_COPY_path = sbox.ospath('A_COPY/B/Z')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n nu_COPY_path = sbox.ospath('A_COPY/C/nu')\n\n # r7 - Copy the branch A_COPY@2 to A2 and update the WC.\n svntest.actions.run_and_verify_svn(None, [],\n 'copy', A_COPY_path, A2_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'commit', '-m', 'Branch the branch',\n wc_dir)\n # r8 - Add A/C/nu and A/B/Z.\n # Add a new file with mergeinfo in the foreign repos.\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', Z_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'commit', '-m', 'Add subtrees',\n wc_dir)\n\n # r9 - Edit A/C/nu and add a random property on A/B/Z.\n svntest.main.file_write(nu_path, \"New content.\\n\")\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', 'propname', 'propval', Z_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'commit', '-m', 'Subtree changes',\n wc_dir)\n\n # r10 - Merge r8 from A to A_COPY.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(9), [], 'up',\n wc_dir)\n svntest.actions.run_and_verify_svn(expected_merge_output(\n [[8]],\n ['A ' + Z_COPY_path + '\\n',\n 'A ' + nu_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',]),\n [], 'merge', '-c8',\n sbox.repo_url + '/A',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'commit', '-m', 'Root merge of r8',\n wc_dir)\n\n # r11 - Do several subtree merges:\n #\n # r4 from A/D/G/rho to A_COPY/D/G/rho\n # r6 from A/D/H to A_COPY/D/H\n # r9 from A/C/nu to A_COPY/C/nu\n # r9 from A/B/Z to A_COPY/B/Z\n svntest.actions.run_and_verify_svn(expected_merge_output(\n [[4]],\n ['U ' + rho_COPY_path + '\\n',\n ' U ' + rho_COPY_path + '\\n',]),\n [], 'merge', '-c4',\n sbox.repo_url + '/A/D/G/rho',\n rho_COPY_path)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[6]],\n ['U ' + omega_COPY_path + '\\n',\n ' U ' + H_COPY_path + '\\n',]),\n [], 'merge', '-c6', sbox.repo_url + '/A/D/H', H_COPY_path)\n svntest.actions.run_and_verify_svn(expected_merge_output(\n [[9]],\n ['U ' + nu_COPY_path + '\\n',\n ' G ' + nu_COPY_path + '\\n',]),\n [], 'merge', '-c9',\n sbox.repo_url + '/A/C/nu',\n nu_COPY_path)\n svntest.actions.run_and_verify_svn(expected_merge_output(\n [[9]],\n [' U ' + Z_COPY_path + '\\n',\n ' G ' + Z_COPY_path + '\\n']),\n [], 'merge', '-c9',\n sbox.repo_url + '/A/B/Z',\n Z_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'commit', '-m', 'Several subtree merges',\n wc_dir)\n\n svntest.actions.run_and_verify_svn(exp_noop_up_out(11), [], 'up',\n wc_dir)\n\n # Now do a --record-only merge of r10 and r11 from A_COPY to A2.\n #\n # We only expect svn:mergeinfo changes to be applied to existing paths:\n #\n # From r10 the mergeinfo '/A:r8' is recorded on A_COPY.\n #\n # From r11 the mergeinfo of '/A/D/G/rho:r4' is recorded on A_COPY/D/G/rho\n # and the mergeinfo of '/A/D/H:r6' is recorded on A_COPY/D/H. Rev 8 should\n # also be recorded on both subtrees because explicit mergeinfo must be\n # complete.\n #\n # The mergeinfo describing the merge source itself, '/A_COPY:10-11' should\n # also be recorded on the root and the two subtrees.\n #\n # The mergeinfo changes from r10 to A_COPY/C/nu and A_COPY/B/Z cannot be\n # applied because the corresponding paths don't exist under A2; this should\n # not cause any problems.\n expected_output = wc.State(A2_path, {\n '' : Item(status=' U'),\n 'D/G/rho' : Item(status=' U'),\n 'D/H' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(A2_path, {\n '' : Item(status=' G'),\n 'D/H' : Item(status=' G'),\n 'D/G/rho' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A2_path, {\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:8\\n/A_COPY:10-11'}),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B' : Item(),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(props={SVN_PROP_MERGEINFO :\n '/A/D/H:6,8\\n/A_COPY/D/H:10-11'}),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\",\n props={SVN_PROP_MERGEINFO :\n '/A/D/G/rho:4,8\\n/A_COPY/D/G/rho:10-11'}),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n })\n expected_status = wc.State(A2_path, {\n '' : Item(status=' M'),\n 'mu' : Item(status=' '),\n 'B' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' M'),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' M'),\n 'D/G/tau' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=11)\n expected_skip = wc.State('', { })\n svntest.actions.run_and_verify_merge(A2_path, '9', '11',\n sbox.repo_url + '/A_COPY', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--record-only', A2_path)\n\n#----------------------------------------------------------------------\n# Test for issue #3514 'svn merge --accept [ base | theirs-full ]\n# doesn't work'\n@Issue(3514)\ndef merge_automatic_conflict_resolution(sbox):\n \"automatic conflict resolutions work with merge\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n\n # r7 - Make a change on A_COPY that will conflict with r3 on A\n svntest.main.file_write(psi_COPY_path, \"BASE.\\n\")\n svntest.actions.run_and_verify_svn(None, [],\n 'commit', '-m', 'log msg', wc_dir)\n\n # Set up our base expectations, we'll tweak accordingly for each option.\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'B' : Item(status=' ', wc_rev=2),\n 'mu' : Item(status=' ', wc_rev=2),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha' : Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status=' ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' ', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status=' ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' ', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status=' ', wc_rev=7),\n 'D/H/omega' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n\n # Test --accept postpone\n expected_output = wc.State(A_COPY_path, {'D/H/psi' : Item(status='C ')})\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_disk.tweak('D/H/psi', contents=\"<<<<<<< .working\\n\"\n \"BASE.\\n\"\n \"||||||| .merge-left.r2\\n\"\n \"This is the file 'psi'.\\n\"\n \"=======\\n\"\n \"New content>>>>>>> .merge-right.r3\\n\")\n expected_status.tweak('D/H/psi', status='C ')\n psi_conflict_support_files = [\"psi\\.working\",\n \"psi\\.merge-right\\.r3\",\n \"psi\\.merge-left\\.r2\"]\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--accept', 'postpone',\n '--allow-mixed-revisions',\n A_COPY_path,\n extra_files=\n list(psi_conflict_support_files))\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', wc_dir)\n\n # Test --accept mine-conflict and mine-full\n ### TODO: Also test that the output has a 'Resolved' line for this path.\n expected_output = wc.State(A_COPY_path, {'D/H/psi' : Item(status='C ')})\n expected_disk.tweak('D/H/psi', contents=\"BASE.\\n\")\n expected_status.tweak('D/H/psi', status=' ')\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--accept', 'mine-conflict',\n '--allow-mixed-revisions',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', wc_dir)\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--accept', 'mine-full',\n '--allow-mixed-revisions',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', wc_dir)\n\n # Test --accept theirs-conflict and theirs-full\n ### TODO: Also test that the output has a 'Resolved' line for this path.\n expected_output = wc.State(A_COPY_path, {'D/H/psi' : Item(status='C ')})\n expected_disk.tweak('D/H/psi', contents=\"New content\")\n expected_status.tweak('D/H/psi', status='M ')\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--accept', 'theirs-conflict',\n '--allow-mixed-revisions',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', wc_dir)\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--accept', 'theirs-full',\n '--allow-mixed-revisions',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', wc_dir)\n # Test --accept base\n ### TODO: Also test that the output has a 'Resolved' line for this path.\n expected_output = wc.State(A_COPY_path, {'D/H/psi' : Item(status='C ')})\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_disk.tweak('D/H/psi', contents=\"This is the file 'psi'.\\n\")\n expected_status.tweak('D/H/psi', status='M ')\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--accept', 'base',\n '--allow-mixed-revisions',\n A_COPY_path)\n\n#----------------------------------------------------------------------\n# Test for issue #3440 'Skipped paths get incorrect override mergeinfo\n# during merge'.\n@SkipUnless(server_has_mergeinfo)\n@Issue(3440)\ndef skipped_files_get_correct_mergeinfo(sbox):\n \"skipped files get correct mergeinfo set\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n psi_path = sbox.ospath('A/D/H/psi')\n\n # Setup our basic 'trunk' and 'branch':\n # r2 - Copy A to A_COPY\n # r3 - Text change to A/D/H/psi\n # r4 - Text change to A/D/G/rho\n # r5 - Text change to A/B/E/beta\n # r6 - Text change to A/D/H/omega\n wc_disk, wc_status = set_up_branch(sbox, False, 1)\n\n # r7 Make another text change to A/D/H/psi\n svntest.main.file_write(psi_path, \"Even newer content\")\n expected_output = wc.State(wc_dir, {'A/D/H/psi' : Item(verb='Sending')})\n svntest.main.run_svn(None, 'commit', '-m', 'another change to A/D/H/psi',\n wc_dir)\n\n # Merge r3 from A to A_COPY, this will create explicit mergeinfo of\n # '/A:3' on A_COPY. Commit this merge as r8.\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[3]],\n ['U ' + psi_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',]),\n [], 'merge', '-c3', sbox.repo_url + '/A', A_COPY_path)\n svntest.main.run_svn(None, 'commit', '-m', 'initial merge', wc_dir)\n\n # Update WC to uniform revision and then set the depth on A_COPY/D/H to\n # empty. Then merge all available revisions from A to A_COPY.\n # A_COPY/D/H/psi and A_COPY/D/H/omega are not present due to their\n # parent's depth and should be reported as skipped. A_COPY/D/H should\n # get explicit mergeinfo set on it reflecting what it previously inherited\n # from A_COPY after the first merge, i.e. '/A/D/H:3', plus non-inheritable\n # mergeinfo describing what was done during this merge,\n # i.e. '/A/D/H:2*,4-8*'.\n #\n # Issue #3440 occurred when empty mergeinfo was set on A_COPY/D/H, making\n # it appear that r3 was never merged.\n svntest.actions.run_and_verify_svn(exp_noop_up_out(8), [],\n 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [],\n 'up', '--set-depth=empty', H_COPY_path)\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status='M '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status='M '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' M'),\n })\n expected_status.tweak(wc_rev=8)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-8'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:2*,3,4-8*'}),\n })\n expected_skip = wc.State(\n A_COPY_path,\n {'D/H/psi' : Item(verb='Skipped missing target'),\n 'D/H/omega' : Item(verb='Skipped missing target')})\n expected_output = wc.State(A_COPY_path,\n {'B/E/beta' : Item(status='U '),\n 'D/G/rho' : Item(status='U ')})\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H' : Item(status=' G'), # ' G' because override mergeinfo gets set\n # on this, the root of a 'missing' subtree.\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n#----------------------------------------------------------------------\n# Test for issue #3115 'Case only renames resulting from merges don't\n# work or break the WC on case-insensitive file systems'.\n@Issue(3115)\ndef committed_case_only_move_and_revert(sbox):\n \"committed case only move causes revert to fail\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, True)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n\n # r3: A case-only file rename on the server\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 3.\\n'],\n [], 'move',\n sbox.repo_url + '/A/mu',\n sbox.repo_url + '/A/MU',\n '-m', 'Move A/mu to A/MU')\n\n # Now merge that rename into the WC\n expected_output = wc.State(A_COPY_path, {\n 'mu' : Item(status='D '),\n 'MU' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M', wc_rev=2),\n 'B' : Item(status=' ', wc_rev=2),\n 'mu' : Item(status='D ', wc_rev=2),\n 'MU' : Item(status='A ', wc_rev='-', copied='+'),\n 'B/E' : Item(status=' ', wc_rev=2),\n 'B/E/alpha' : Item(status=' ', wc_rev=2),\n 'B/E/beta' : Item(status=' ', wc_rev=2),\n 'B/lambda' : Item(status=' ', wc_rev=2),\n 'B/F' : Item(status=' ', wc_rev=2),\n 'C' : Item(status=' ', wc_rev=2),\n 'D' : Item(status=' ', wc_rev=2),\n 'D/G' : Item(status=' ', wc_rev=2),\n 'D/G/pi' : Item(status=' ', wc_rev=2),\n 'D/G/rho' : Item(status=' ', wc_rev=2),\n 'D/G/tau' : Item(status=' ', wc_rev=2),\n 'D/gamma' : Item(status=' ', wc_rev=2),\n 'D/H' : Item(status=' ', wc_rev=2),\n 'D/H/chi' : Item(status=' ', wc_rev=2),\n 'D/H/psi' : Item(status=' ', wc_rev=2),\n 'D/H/omega' : Item(status=' ', wc_rev=2),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3'}),\n 'B' : Item(),\n 'MU' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, '2', '3',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n\n # Commit the merge\n expected_output = svntest.wc.State(wc_dir, {\n 'A_COPY' : Item(verb='Sending'),\n 'A_COPY/mu' : Item(verb='Deleting'),\n 'A_COPY/MU' : Item(verb='Adding'),\n })\n wc_status.tweak('A_COPY', wc_rev=4)\n wc_status.remove('A_COPY/mu')\n wc_status.add({'A_COPY/MU': Item(status=' ', wc_rev=4)})\n\n svntest.actions.run_and_verify_commit(wc_dir, expected_output, wc_status)\n\n # In issue #3115 the WC gets corrupted and any subsequent revert\n # attempts fail with this error:\n # svn.exe revert -R \"svn-test-work\\working_copies\\merge_tests-139\"\n # ..\\..\\..\\subversion\\svn\\revert-cmd.c:81: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_client\\revert.c:167: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_client\\revert.c:103: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\adm_ops.c:2232: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\adm_ops.c:2232: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\adm_ops.c:2232: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\adm_ops.c:2176: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\adm_ops.c:2053: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\adm_ops.c:1869: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\workqueue.c:520: (apr_err=2)\n # ..\\..\\..\\subversion\\libsvn_wc\\workqueue.c:490: (apr_err=2)\n # svn: Error restoring text for 'C:\\SVN\\src-trunk\\Debug\\subversion\\tests\n # \\cmdline\\svn-test-work\\working_copies\\merge_tests-139\\A_COPY\\MU'\n svntest.actions.run_and_verify_svn([], [], 'revert', '-R', wc_dir)\n\n # r5: A case-only directory rename on the server\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 5.\\n'],\n [], 'move',\n sbox.repo_url + '/A/C',\n sbox.repo_url + '/A/c',\n '-m', 'Move A/C to A/c')\n expected_output = wc.State(A_COPY_path, {\n 'C' : Item(status='D '),\n 'c' : Item(status='A '),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_disk.tweak('', props={SVN_PROP_MERGEINFO : '/A:3,5'})\n expected_disk.add({'c' : Item()})\n expected_disk.remove('C')\n expected_status.tweak('MU', status=' ', wc_rev=4, copied=None)\n expected_status.remove('mu')\n expected_status.tweak('C', status='D ')\n expected_status.tweak('', wc_rev=4)\n expected_status.add({'c' : Item(status='A ', copied='+', wc_rev='-')})\n # This merge succeeds. It used to leave a strange state, added with\n # history but missing:\n #\n # M merge_tests-139\\A_COPY\n # ! + merge_tests-139\\A_COPY\\c\n # R + merge_tests-139\\A_COPY\\C\n svntest.actions.run_and_verify_merge(A_COPY_path, '4', '5',\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False,\n '--allow-mixed-revisions', A_COPY_path)\n\n#----------------------------------------------------------------------\n# This is a test for issue #3221 'Unable to merge into working copy of\n# deleted branch'.\n@SkipUnless(server_has_mergeinfo)\n@Issue(3221)\ndef merge_into_wc_for_deleted_branch(sbox):\n \"merge into WC of deleted branch should work\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Copy 'A' to 'A_COPY' then make some changes under 'A'\n wc_disk, wc_status = set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n gamma_path = sbox.ospath('A/D/gamma')\n\n # r7 - Delete the branch on the repository, obviously it still\n # exists in our WC.\n svntest.actions.run_and_verify_svn(None, [],\n 'delete', sbox.repo_url + '/A_COPY',\n '-m', 'Delete A_COPY directly in repos')\n\n # r8 - Make another change under 'A'.\n svntest.main.file_write(gamma_path, \"Content added after A_COPY deleted\")\n expected_output = wc.State(wc_dir, {'A/D/gamma' : Item(verb='Sending')})\n svntest.main.run_svn(None, 'commit',\n '-m', 'Change made on A after A_COPY was deleted',\n wc_dir)\n\n # Now merge all available revisions from A to A_COPY:\n expected_output = wc.State(A_COPY_path, {\n 'B/E/beta' : Item(status='U '),\n 'D/G/rho' : Item(status='U '),\n 'D/H/omega' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n 'D/gamma' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status='M '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status='M '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status='M '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status='M '),\n 'D/H/omega' : Item(status='M '),\n })\n expected_status.tweak(wc_rev=2)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-8'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"Content added after A_COPY deleted\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n # Issue #3221: Previously this merge failed with:\n # ..\\..\\..\\subversion\\svn\\util.c:900: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:9383: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:8029: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:7577: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:4132: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:3312: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_client\\ra.c:659: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_repos\\rev_hunt.c:696: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_repos\\rev_hunt.c:539: (apr_err=160013)\n # ..\\..\\..\\subversion\\libsvn_fs_fs\\tree.c:2818: (apr_err=160013)\n # svn: File not found: revision 8, path '/A_COPY'\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\ndef foreign_repos_del_and_props(sbox):\n \"merge del and ps variants from a foreign repos\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc2_dir = sbox.add_wc_path('wc2')\n\n (r2_path, r2_url) = sbox.add_repo_path('fgn')\n svntest.main.create_repos(r2_path)\n\n svntest.actions.run_and_verify_svn(None, [], 'checkout',\n r2_url, wc2_dir)\n\n svntest.actions.run_and_verify_svn(None, [], 'propset',\n 'svn:eol-style', 'native',\n sbox.ospath('iota'))\n\n svntest.actions.run_and_verify_svn(None, [], 'cp',\n sbox.ospath('A/D'),\n sbox.ospath('D'))\n\n svntest.actions.run_and_verify_svn(None, [], 'rm',\n sbox.ospath('A/D'),\n sbox.ospath('D/G'))\n\n new_file = sbox.ospath('new-file')\n svntest.main.file_write(new_file, 'new-file')\n svntest.actions.run_and_verify_svn(None, [], 'add', new_file)\n\n svntest.actions.run_and_verify_svn(None, [], 'propset',\n 'svn:eol-style', 'native', new_file)\n\n svntest.actions.run_and_verify_svn(None, [], 'commit', wc_dir,\n '-m', 'changed')\n\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url, wc2_dir,\n '-r', '0:1')\n\n expected_status = svntest.actions.get_virginal_state(wc2_dir, 0)\n expected_status.tweak(status='A ')\n expected_status.add(\n {\n '' : Item(status=' ', wc_rev='0'),\n })\n svntest.actions.run_and_verify_status(wc2_dir, expected_status)\n\n expected_status = svntest.actions.get_virginal_state(wc2_dir, 1)\n\n svntest.actions.run_and_verify_svn(None, [], 'commit', wc2_dir,\n '-m', 'Merged r1')\n\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url, wc2_dir,\n '-r', '1:2', '--allow-mixed-revisions')\n\n expected_status.tweak('A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/tau', 'A/D/G/pi',\n 'A/D/gamma', 'A/D/H', 'A/D/H/psi', 'A/D/H/omega',\n 'A/D/H/chi', status='D ')\n expected_status.tweak(wc_rev='1')\n expected_status.tweak('', wc_rev='0')\n expected_status.tweak('iota', status=' M')\n\n expected_status.add(\n {\n 'new-file' : Item(status='A ', wc_rev='0'),\n 'D' : Item(status='A ', wc_rev='0'),\n 'D/H' : Item(status='A ', wc_rev='0'),\n 'D/H/omega' : Item(status='A ', wc_rev='0'),\n 'D/H/psi' : Item(status='A ', wc_rev='0'),\n 'D/H/chi' : Item(status='A ', wc_rev='0'),\n 'D/gamma' : Item(status='A ', wc_rev='0'),\n })\n\n svntest.actions.run_and_verify_status(wc2_dir, expected_status)\n\n expected_output = [\"Properties on '%s':\\n\" % (os.path.join(wc2_dir, 'iota')),\n \" svn:eol-style\\n\",\n \"Properties on '%s':\\n\" % (os.path.join(wc2_dir, 'new-file')),\n \" svn:eol-style\\n\" ]\n svntest.actions.run_and_verify_svn(expected_output, [], 'proplist',\n os.path.join(wc2_dir, 'iota'),\n os.path.join(wc2_dir, 'new-file'))\n\n#----------------------------------------------------------------------\n# Test for issue #3642 'immediate depth merges don't create proper subtree\n# mergeinfo'. See https://issues.apache.org/jira/browse/SVN-3642\n@Issue(3642)\ndef immediate_depth_merge_creates_minimal_subtree_mergeinfo(sbox):\n \"no spurious mergeinfo from immediate depth merges\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n B_path = sbox.ospath('A/B')\n B_COPY_path = sbox.ospath('A_COPY/B')\n\n\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Merge -c5 from A/B to A_COPY/B at --depth immediates.\n # This should create only the minimum subtree mergeinfo\n # required to describe the merge. This means that A_COPY/B/E gets\n # non-inheritable mergeinfo for r5, because a full depth merge would\n # affect that subtree. The other child of the merge target, A_COPY/B/F\n # would never be affected by r5, so it doesn't need any explicit\n # mergeinfo.\n expected_output = wc.State(B_COPY_path, {})\n expected_mergeinfo_output = wc.State(B_COPY_path, {\n '' : Item(status=' U'),\n 'E' : Item(status=' U'), # A_COPY/B/E would be affected by r5 if the\n # merge was at infinite depth, so it needs\n # non-inheritable override mergeinfo.\n #'F' : Item(status=' U'), No override mergeinfo, r5 is\n # inoperative on this child.\n })\n expected_elision_output = wc.State(B_COPY_path, {\n })\n expected_status = wc.State(B_COPY_path, {\n '' : Item(status=' M'),\n 'F' : Item(status=' '),\n 'E' : Item(status=' M'),\n 'E/alpha' : Item(status=' '),\n 'E/beta' : Item(status=' '),\n 'lambda' : Item(status=' '),\n\n })\n expected_status.tweak(wc_rev=6)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:5'}),\n 'E' : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:5*'}),\n 'E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F' : Item(),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\")\n })\n expected_skip = wc.State(B_COPY_path, { })\n svntest.actions.run_and_verify_merge(B_COPY_path, '4', '5',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--depth', 'immediates',\n B_COPY_path)\n\n#----------------------------------------------------------------------\n# Test for issue #3646 'cyclic --record-only merges create self-referential\n# mergeinfo'\n@SkipUnless(server_has_mergeinfo)\n@Issue(3646)\ndef record_only_merge_creates_self_referential_mergeinfo(sbox):\n \"merge creates self referential mergeinfo\"\n\n # Given a copy of trunk@M to branch, committed in r(M+1), if we\n # --record-only merge the branch back to trunk with no revisions\n # specified, then trunk gets self-referential mergeinfo recorded\n # reflecting its entire natural history.\n\n # Setup a standard greek tree in r1.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n mu_path = sbox.ospath('A/mu')\n A_path = sbox.ospath('A')\n A_branch_path = sbox.ospath('A-branch')\n\n # Make a change to A/mu in r2.\n svntest.main.file_write(mu_path, \"Trunk edit\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m', 'trunk edit',\n wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n # Copy A to A-branch in r3\n svntest.actions.run_and_verify_svn(None, [],\n 'copy', A_path, A_branch_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci',\n '-m', 'Branch A to A-branch', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Merge A-branch back to A. This should record the mergeinfo '/A-branch:3'\n # on A.\n expected_output = wc.State(A_path, {})\n expected_mergeinfo_output = wc.State(A_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_path, {})\n expected_A_status = wc.State(A_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_A_status.tweak(wc_rev=3)\n expected_A_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A-branch:3'}),\n 'B' : Item(),\n 'mu' : Item(\"Trunk edit\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_A_skip = wc.State(A_path, {})\n svntest.actions.run_and_verify_merge(A_path, None, None,\n sbox.repo_url + '/A-branch', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_A_disk,\n expected_A_status,\n expected_A_skip,\n [], True, True,\n '--record-only', A_path)\n\n#----------------------------------------------------------------------\n# Test for issue #3657 'dav update report handler in skelta mode can cause\n# spurious conflicts'.\n@Issue(3657)\ndef dav_skelta_mode_causes_spurious_conflicts(sbox):\n \"dav skelta mode can cause spurious conflicts\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n mu_path = sbox.ospath('A/mu')\n A_path = sbox.ospath('A')\n C_path = sbox.ospath('A/C')\n A_branch_path = sbox.ospath('A-branch')\n C_branch_path = sbox.ospath('A-branch/C')\n\n # r2 - Set some initial properties:\n #\n # 'dir-prop'='value1' on A/C.\n # 'svn:eol-style'='native' on A/mu.\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', 'dir-prop', 'initial-val',\n C_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', 'svn:eol-style', 'native',\n mu_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'Set some properties',\n wc_dir)\n\n # r3 - Branch 'A' to 'A-branch':\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [],\n 'copy', A_path, A_branch_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'Create a branch of A',\n wc_dir)\n\n # r4 - Make a text mod to 'A/mu' and add new props to 'A/mu' and 'A/C':\n svntest.main.file_write(mu_path, \"The new mu!\\n\")\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', 'prop-name', 'prop-val', mu_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', 'another-dir-prop', 'initial-val',\n C_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Edit a file and make some prop changes',\n wc_dir)\n\n # r5 - Modify the sole property on 'A-branch/C':\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', 'dir-prop', 'branch-val',\n C_branch_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'prop mod on branch', wc_dir)\n\n # Now merge r4 from 'A' to 'A-branch'.\n #\n # Previously this failed over ra_neon and ra_serf on Windows:\n #\n # >svn merge ^^/A A-branch -c4\n # Conflict discovered in 'C:/SVN/src-trunk/Debug/subversion/tests/cmdline\n # /svn-test-work/working_copies/merge_tests-110/A-branch/mu'.\n # Select: (p) postpone, (df) diff-full, (e) edit,\n # (mc) mine-conflict, (tc) theirs-conflict,\n # (s) show all options: p\n # --- Merging r4 into 'A-branch':\n # CU A-branch\\mu\n # Conflict for property 'another-dir-prop' discovered on 'C:/SVN/src-trunk\n # /Debug/subversion/tests/cmdline/svn-test-work/working_copies/\n # merge_tests-110/A-branch/C'.\n # Select: (p) postpone,\n # (mf) mine-full, (tf) theirs-full,\n # (s) show all options: p\n # C A-branch\\C\n # --- Recording mergeinfo for merge of r4 into 'A-branch':\n # U A-branch\n # Summary of conflicts:\n # Text conflicts: 1\n # Property conflicts: 1\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(A_branch_path, {\n 'mu' : Item(status='UU'),\n 'C' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(A_branch_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_branch_path, {})\n expected_status = wc.State(A_branch_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status='MM'),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' M'),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=5)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO :\n '/A:4'}),\n 'B' : Item(),\n 'mu' : Item(\"The new mu!\\n\",\n props={'prop-name' : 'prop-val',\n 'svn:eol-style' : 'native'}),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(props={'dir-prop' : 'branch-val',\n 'another-dir-prop' : 'initial-val'}),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_branch_path, {})\n svntest.actions.run_and_verify_merge(A_branch_path, 3, 4,\n sbox.repo_url + '/A',\n None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n\n#----------------------------------------------------------------------\ndef merge_into_locally_added_file(sbox):\n \"merge into locally added file\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n pi_path = sbox.ospath(\"A/D/G/pi\")\n new_path = sbox.ospath(\"A/D/G/new\")\n\n shutil.copy(pi_path, new_path)\n svntest.main.file_append(pi_path, \"foo\\n\")\n sbox.simple_commit() # r2\n\n sbox.simple_add('A/D/G/new')\n\n expected_output = wc.State(wc_dir, {\n 'A/D/G/new' : Item(status='G '),\n })\n expected_mergeinfo_output = wc.State(wc_dir, {\n 'A/D/G/new' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_dir, {})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({ 'A/D/G/new' : Item(status='A ', wc_rev=0)})\n expected_status.tweak('A/D/G/pi', wc_rev=2)\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak('A/D/G/pi', contents=\"This is the file 'pi'.\\nfoo\\n\")\n expected_disk.add({'A/D/G/new' : Item(\"This is the file 'pi'.\\nfoo\\n\",\n props={SVN_PROP_MERGEINFO : '/A/D/G/pi:2'})})\n expected_skip = wc.State(wc_dir, {})\n\n svntest.actions.run_and_verify_merge(wc_dir, '1', '2',\n sbox.repo_url + '/A/D/G/pi', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n new_path)\n sbox.simple_commit()\n\n#----------------------------------------------------------------------\ndef merge_into_locally_added_directory(sbox):\n \"merge into locally added directory\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n G_path = sbox.ospath(\"A/D/G\")\n pi_path = sbox.ospath(\"A/D/G/pi\")\n new_dir_path = sbox.ospath(\"A/D/new_dir\")\n\n svntest.main.file_append_binary(pi_path, \"foo\\n\")\n sbox.simple_commit() # r2\n\n os.mkdir(new_dir_path)\n svntest.main.file_append_binary(os.path.join(new_dir_path, 'pi'),\n \"This is the file 'pi'.\\n\")\n svntest.main.file_append_binary(os.path.join(new_dir_path, 'rho'),\n \"This is the file 'rho'.\\n\")\n svntest.main.file_append_binary(os.path.join(new_dir_path, 'tau'),\n \"This is the file 'tau'.\\n\")\n sbox.simple_add('A/D/new_dir')\n\n expected_output = wc.State(wc_dir, {\n 'A/D/new_dir/pi' : Item(status='G '),\n })\n expected_mergeinfo_output = wc.State(wc_dir, {\n 'A/D/new_dir' : Item(status=' U'),\n })\n expected_elision_output = wc.State(wc_dir, {})\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({ 'A/D/new_dir' : Item(status='A ', wc_rev=0)})\n expected_status.add({ 'A/D/new_dir/pi' : Item(status='A ', wc_rev=0)})\n expected_status.add({ 'A/D/new_dir/rho' : Item(status='A ', wc_rev=0)})\n expected_status.add({ 'A/D/new_dir/tau' : Item(status='A ', wc_rev=0)})\n expected_status.tweak('A/D/G/pi', wc_rev=2)\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak('A/D/G/pi', contents=\"This is the file 'pi'.\\nfoo\\n\")\n expected_disk.add({'A/D/new_dir' :\n Item(props={SVN_PROP_MERGEINFO : '/A/D/G:2'})})\n expected_disk.add({'A/D/new_dir/pi' :\n Item(contents=\"This is the file 'pi'.\\nfoo\\n\")})\n expected_disk.add({'A/D/new_dir/rho' :\n Item(contents=\"This is the file 'rho'.\\n\")})\n expected_disk.add({'A/D/new_dir/tau' :\n Item(contents=\"This is the file 'tau'.\\n\")})\n expected_skip = wc.State(wc_dir, {})\n\n svntest.actions.run_and_verify_merge(wc_dir, '1', '2',\n sbox.repo_url + '/A/D/G', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n new_dir_path)\n sbox.simple_commit()\n\n#----------------------------------------------------------------------\n# Test for issue #2915 'Handle mergeinfo for subtrees missing due to removal\n# by non-svn command'\n@SkipUnless(server_has_mergeinfo)\n@Issue(2915)\ndef merge_with_os_deleted_subtrees(sbox):\n \"merge tracking fails if target missing subtrees\"\n\n # r1: Create a greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # r2 - r6: Copy A to A_COPY and then make some text changes under A.\n set_up_branch(sbox)\n\n # Some paths we'll care about\n A_COPY_path = sbox.ospath('A_COPY')\n C_COPY_path = sbox.ospath('A_COPY/C')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n mu_COPY_path = sbox.ospath('A_COPY/mu')\n G_COPY_path = sbox.ospath('A_COPY/D/G')\n\n # Remove several subtrees from disk.\n svntest.main.safe_rmtree(C_COPY_path)\n svntest.main.safe_rmtree(G_COPY_path)\n os.remove(psi_COPY_path)\n os.remove(mu_COPY_path)\n\n # Be sure the regex paths are properly escaped on Windows, see the\n # note about \"The Backslash Plague\" in expected_merge_output().\n if sys.platform == 'win32':\n re_sep = '\\\\\\\\'\n else:\n re_sep = os.sep\n\n # Common part of the expected error message for all cases we will test.\n err_re = \"svn: E195016: Merge tracking not allowed with missing subtrees; \" + \\\n \"try restoring these items first:\" + \\\n \"|(\\n)\" + \\\n \"|\" + svntest.main.stack_trace_regexp\n\n # Case 1: Infinite depth merge into infinite depth WC target.\n # Every missing subtree under the target should be reported as missing.\n missing = \"|(.*A_COPY\" + re_sep + \"mu\\n)\" + \\\n \"|(.*A_COPY\" + re_sep + \"D\" + re_sep + \"G\\n)\" + \\\n \"|(.*A_COPY\" + re_sep + \"C\\n)\" + \\\n \"|(.*A_COPY\" + re_sep + \"D\" + re_sep + \"H\" + re_sep + \"psi\\n)\"\n exit_code, out, err = svntest.actions.run_and_verify_svn(\n [], svntest.verify.AnyOutput,\n 'merge', sbox.repo_url + '/A', A_COPY_path)\n svntest.verify.verify_outputs(\"Merge failed but not in the way expected\",\n err, None, err_re + missing, None,\n True) # Match *all* lines of stderr\n\n # Case 2: Immediates depth merge into infinite depth WC target.\n # Only the two immediate children of the merge target should be reported\n # as missing.\n missing = \"|(.*A_COPY\" + re_sep + \"mu\\n)\" + \\\n \"|(.*A_COPY\" + re_sep + \"C\\n)\"\n exit_code, out, err = svntest.actions.run_and_verify_svn(\n [], svntest.verify.AnyOutput,\n 'merge', sbox.repo_url + '/A', A_COPY_path, '--depth=immediates')\n svntest.verify.verify_outputs(\"Merge failed but not in the way expected\",\n err, None, err_re + missing, None, True)\n\n # Case 3: Files depth merge into infinite depth WC target.\n # Only the single file child of the merge target should be reported\n # as missing.\n missing = \"|(.*A_COPY\" + re_sep + \"mu\\n)\"\n exit_code, out, err = svntest.actions.run_and_verify_svn(\n [], svntest.verify.AnyOutput,\n 'merge', sbox.repo_url + '/A', A_COPY_path, '--depth=files')\n svntest.verify.verify_outputs(\"Merge failed but not in the way expected\",\n err, None, err_re + missing, None, True)\n\n # Case 4: Empty depth merge into infinite depth WC target.\n # Only the...oh, wait, the target is present and that is as deep\n # as the merge goes, so this merge should succeed!\n svntest.actions.run_and_verify_svn(\n svntest.verify.AnyOutput, [], 'merge', sbox.repo_url + '/A',\n A_COPY_path, '--depth=empty')\n\n#----------------------------------------------------------------------\n# Test for issue #3668 'inheritance can result in self-referential\n# mergeinfo' and issue #3669 'inheritance can result in mergeinfo\n# describing nonexistent sources'\n@Issue(3668,3669)\n@XFail()\ndef no_self_referential_or_nonexistent_inherited_mergeinfo(sbox):\n \"don't inherit bogus mergeinfo\"\n\n # r1: Create a greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # r2 - r6: Copy A to A_COPY and then make some text changes under A.\n set_up_branch(sbox, nbr_of_branches=1)\n\n # Some paths we'll care about\n nu_path = sbox.ospath('A/C/nu')\n nu_COPY_path = sbox.ospath('A_COPY/C/nu')\n J_path = sbox.ospath('A/D/J')\n J_COPY_path = sbox.ospath('A_COPY/D/J')\n zeta_path = sbox.ospath('A/D/J/zeta')\n A_COPY_path = sbox.ospath('A_COPY')\n\n # r7 - Add the file A/C/nu\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Add file', wc_dir)\n\n # r8 - Sync merge A to A_COPY\n svntest.actions.run_and_verify_svn(\n svntest.verify.AnyOutput, [], 'merge', sbox.repo_url + '/A',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Sync A_COPY with A', wc_dir)\n\n # r9 - Add the subtree A/D/J\n # A/D/J/zeta\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', J_path)\n svntest.main.file_write(zeta_path, \"This is the file 'zeta'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', zeta_path)\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Add subtree', wc_dir)\n\n # Update the WC in preparation for merges.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # r10 - Sync merge A to A_COPY\n svntest.actions.run_and_verify_svn(\n svntest.verify.AnyOutput, [], 'merge', sbox.repo_url + '/A',\n A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Sync A_COPY with A', wc_dir)\n\n # r11 - Text changes to A/C/nu and A/D/J/zeta.\n svntest.main.file_write(nu_path, \"This is the EDITED file 'nu'.\\n\")\n svntest.main.file_write(zeta_path, \"This is the EDITED file 'zeta'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Edit added files', wc_dir)\n\n # Update the WC in preparation for merges.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # This test is marked as XFail because the following two merges\n # create mergeinfo with both non-existent path-revs and self-referential\n # mergeinfo.\n #\n # Merge all available revisions from A/C/nu to A_COPY/C/nu.\n # The target has no explicit mergeinfo of its own but inherits mergeinfo\n # from A_COPY. A_COPY has the mergeinfo '/A:2-9' so the naive mergeinfo\n # A_COPY/C/nu inherits is '/A/C/nu:2-9'. However, '/A/C/nu:2-6' don't\n # actually exist (issue #3669) and '/A/C/nu:7-8' is self-referential\n # (issue #3668). Neither of these should be present in the resulting\n # mergeinfo for A_COPY/C/nu, only '/A/C/nu:8-11'\n expected_output = wc.State(nu_COPY_path, {\n '' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(nu_COPY_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(nu_COPY_path, {\n })\n expected_status = wc.State(nu_COPY_path, {\n '' : Item(status='MM', wc_rev=11),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/C/nu:8-11'}),\n })\n expected_skip = wc.State(nu_COPY_path, { })\n svntest.actions.run_and_verify_merge(nu_COPY_path, None, None,\n sbox.repo_url + '/A/C/nu', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n # Merge all available revisions from A/D/J to A_COPY/D/J. Like the\n # previous merge, the target should not have any non-existent ('/A/D/J:2-8')\n # or self-referential mergeinfo ('/A/D/J:9') recorded on it post-merge.\n expected_output = wc.State(J_COPY_path, {\n 'zeta' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(J_COPY_path, {\n '' : Item(status=' G'),\n })\n expected_elision_output = wc.State(J_COPY_path, {\n })\n expected_status = wc.State(J_COPY_path, {\n '' : Item(status=' M', wc_rev=11),\n 'zeta' : Item(status='M ', wc_rev=11),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/J:10-11'}),\n 'zeta' : Item(\"This is the EDITED file 'zeta'.\\n\")\n })\n expected_skip = wc.State(J_COPY_path, { })\n svntest.actions.run_and_verify_merge(J_COPY_path, None, None,\n sbox.repo_url + '/A/D/J', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# Test for issue #3756 'subtree merge can inherit invalid working mergeinfo',\n# issue #3668 'inheritance can result in self-referential mergeinfo', and\n# issue #3669 'inheritance can result in mergeinfo describing nonexistent\n# sources'.\n@XFail()\n@Issue(3756,3668,3669)\ndef subtree_merges_inherit_invalid_working_mergeinfo(sbox):\n \"don't inherit bogus working mergeinfo\"\n\n # r1: Create a greek tree.\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # r2 - r6: Copy A to A_COPY and then make some text changes under A.\n set_up_branch(sbox, nbr_of_branches=1)\n\n # Some paths we'll care about\n nu_path = sbox.ospath('A/C/nu')\n nu_COPY_path = sbox.ospath('A_COPY/C/nu')\n A_COPY_path = sbox.ospath('A_COPY')\n\n # r7 - Add the file A/C/nu\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Add file', wc_dir)\n\n # r8 Merge c7 from A to A_COPY.\n svntest.actions.run_and_verify_svn(\n svntest.verify.AnyOutput, [], 'merge', sbox.repo_url + '/A',\n A_COPY_path, '-c7')\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Merge subtree file addition',\n wc_dir)\n\n # r9 - A text change to A/C/nu.\n svntest.main.file_write(nu_path, \"This is the EDITED file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'commit',\n '-m', 'Edit added file', wc_dir)\n\n # Update the WC in preparation for merges.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Now do two merges. The first, r3 to the root of the branch A_COPY.\n # This creates working mergeinfo '/A:3,7' on A_COPY. Then do a subtree\n # file merge of r9 from A/C/nu to A_COPY/C/nu. Since the target has no\n # explicit mergeinfo, the mergeinfo set to record the merge of r9 should\n # include the mergeinfo inherited from A_COPY. *But* that raw inherited\n # mergeinfo, '/A/C/nu:3,7' is wholly invalid: '/A/C/nu:3' simply doesn't\n # exist in the repository and '/A/C/nu:7' is self-referential. So the\n # resulting mergeinfo on 'A_COPY/C/nu' should be only '/A/C/nu:9'.\n #\n # Currently this test is marked as XFail because the resulting mergeinfo is\n # '/A/C/nu:3,7,9' and thus includes a non-existent path-rev.\n svntest.actions.run_and_verify_svn(\n svntest.verify.AnyOutput, [], 'merge', sbox.repo_url + '/A',\n A_COPY_path, '-c3')\n svntest.actions.run_and_verify_svn(\n svntest.verify.AnyOutput, [], 'merge', sbox.repo_url + '/A/C/nu',\n nu_COPY_path, '-c9')\n svntest.actions.run_and_verify_svn(\n '/A/C/nu:9', [], 'pg', SVN_PROP_MERGEINFO, nu_COPY_path)\n\n\n#----------------------------------------------------------------------\n# Test for issue #3686 'executable flag not correctly set on merge'\n# See https://issues.apache.org/jira/browse/SVN-3686\n@Issue(3686)\n@SkipUnless(server_has_mergeinfo)\n@SkipUnless(svntest.main.is_posix_os)\ndef merge_change_to_file_with_executable(sbox):\n \"executable flag is maintained during binary merge\"\n\n # Scenario: When merging a change to a binary file with the 'svn:executable'\n # property set, the file is not marked as 'executable'. After commit, the\n # executable bit is set correctly.\n sbox.build()\n wc_dir = sbox.wc_dir\n trunk_url = sbox.repo_url + '/A/B/E'\n\n alpha_path = sbox.ospath('A/B/E/alpha')\n beta_path = sbox.ospath('A/B/E/beta')\n\n # Force one of the files to be a binary type\n svntest.actions.run_and_verify_svn2(None,\n binary_mime_type_on_text_file_warning, 0,\n 'propset', 'svn:mime-type',\n 'application/octet-stream',\n alpha_path)\n\n # Set the 'svn:executable' property on both files\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'svn:executable', 'ON',\n beta_path)\n\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'svn:executable', 'ON',\n alpha_path)\n\n # Verify the executable bit has been set before committing\n if not os.access(alpha_path, os.X_OK):\n raise svntest.Failure(\"alpha not marked as executable before commit\")\n if not os.access(beta_path, os.X_OK):\n raise svntest.Failure(\"beta is not marked as executable before commit\")\n\n # Commit change (r2)\n sbox.simple_commit()\n\n # Verify the executable bit has remained after committing\n if not os.access(alpha_path, os.X_OK):\n raise svntest.Failure(\"alpha not marked as executable before commit\")\n if not os.access(beta_path, os.X_OK):\n raise svntest.Failure(\"beta is not marked as executable before commit\")\n\n # Create the branch\n svntest.actions.run_and_verify_svn(None, [], 'cp',\n trunk_url,\n sbox.repo_url + '/branch',\n '-m', \"Creating the Branch\")\n\n # Modify the files + commit (r3)\n svntest.main.file_append(alpha_path, 'appended alpha text')\n svntest.main.file_append(beta_path, 'appended beta text')\n sbox.simple_commit()\n\n # Re-root the WC at the branch\n svntest.main.safe_rmtree(wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'checkout',\n sbox.repo_url + '/branch', wc_dir)\n\n # Recalculate the paths\n alpha_path = sbox.ospath('alpha')\n beta_path = sbox.ospath('beta')\n\n expected_output = wc.State(wc_dir, {\n 'beta' : Item(status='U '),\n 'alpha' : Item(status='U '),\n })\n expected_mergeinfo_output = wc.State(wc_dir, {\n '' : Item(status=' U')\n })\n expected_elision_output = wc.State(wc_dir, {\n })\n expected_disk = wc.State('', {\n '.' : Item(props={'svn:mergeinfo':'/A/B/E:3-4'}),\n 'alpha' : Item(contents=\"This is the file 'alpha'.\\nappended alpha text\",\n props={'svn:executable':'*',\n 'svn:mime-type':'application/octet-stream'}),\n 'beta' : Item(contents=\"This is the file 'beta'.\\nappended beta text\",\n props={\"svn:executable\" : '*'}),\n })\n expected_status = wc.State(wc_dir, {\n '' : Item(status=' M', wc_rev='4'),\n 'alpha' : Item(status='M ', wc_rev='4'),\n 'beta' : Item(status='M ', wc_rev='4'),\n })\n expected_skip = wc.State(wc_dir, { })\n\n # Merge the changes across\n svntest.actions.run_and_verify_merge(wc_dir, None, None,\n trunk_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True)\n\n\n # Verify the executable bit has been set\n if not os.access(alpha_path, os.X_OK):\n raise svntest.Failure(\"alpha is not marked as executable after merge\")\n if not os.access(beta_path, os.X_OK):\n raise svntest.Failure(\"beta is not marked as executable after merge\")\n\n # Commit (r4)\n sbox.simple_commit()\n\n # Verify the executable bit has been set\n if not os.access(alpha_path, os.X_OK):\n raise svntest.Failure(\"alpha is not marked as executable after commit\")\n if not os.access(beta_path, os.X_OK):\n raise svntest.Failure(\"beta is not marked as executable after commit\")\n\ndef dry_run_merge_conflicting_binary(sbox):\n \"dry run shouldn't resolve conflicts\"\n\n # This test-case is to showcase the regression caused by\n # r1075802. Here is the link to the relevant discussion:\n # http://svn.haxx.se/dev/archive-2011-03/0145.shtml\n\n sbox.build()\n wc_dir = sbox.wc_dir\n # Add a binary file to the project\n theta_contents = open(os.path.join(sys.path[0], \"theta.bin\"), 'rb').read()\n # Write PNG file data into 'A/theta'.\n theta_path = sbox.ospath('A/theta')\n svntest.main.file_write(theta_path, theta_contents, 'wb')\n\n svntest.main.run_svn(None, 'add', theta_path)\n\n # Commit the new binary file, creating revision 2.\n expected_output = svntest.wc.State(wc_dir, {\n 'A/theta' : Item(verb='Adding (bin)'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/theta' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Make the \"other\" working copy\n other_wc = sbox.add_wc_path('other')\n svntest.actions.duplicate_dir(wc_dir, other_wc)\n\n # Change the binary file in first working copy, commit revision 3.\n svntest.main.file_append(theta_path, \"some extra junk\")\n expected_output = wc.State(wc_dir, {\n 'A/theta' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/theta' : Item(status=' ', wc_rev=3),\n })\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # In second working copy, append different content to the binary\n # and attempt to 'svn merge -r 2:3'.\n # We should see a conflict during the merge.\n other_theta_path = os.path.join(other_wc, 'A', 'theta')\n svntest.main.file_append(other_theta_path, \"some other junk\")\n expected_output = wc.State(other_wc, {\n 'A/theta' : Item(status='C '),\n })\n expected_mergeinfo_output = wc.State(other_wc, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(other_wc, {\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n '' : Item(props={SVN_PROP_MERGEINFO : '/:3'}),\n 'A/theta' : Item(theta_contents + b\"some other junk\",\n props={'svn:mime-type' : 'application/octet-stream'}),\n })\n\n # verify content of base(left) file\n expected_disk.add({\n 'A/theta.merge-left.r2' :\n Item(contents = theta_contents )\n })\n # verify content of theirs(right) file\n expected_disk.add({\n 'A/theta.merge-right.r3' :\n Item(contents= theta_contents + b\"some extra junk\")\n })\n\n expected_status = svntest.actions.get_virginal_state(other_wc, 1)\n expected_status.add({\n '' : Item(status=' M', wc_rev=1),\n 'A/theta' : Item(status='C ', wc_rev=2),\n })\n expected_skip = wc.State('', { })\n\n svntest.actions.run_and_verify_merge(other_wc, '2', '3',\n sbox.repo_url, None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--allow-mixed-revisions',\n other_wc)\n\n#----------------------------------------------------------------------\n@Issue(3857)\ndef foreign_repos_prop_conflict(sbox):\n \"prop conflict from foreign repos merge\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Create a second repository and working copy with the original\n # greek tree.\n repo_dir = sbox.repo_dir\n other_repo_dir, other_repo_url = sbox.add_repo_path(\"other\")\n other_wc_dir = sbox.add_wc_path(\"other\")\n svntest.main.copy_repos(repo_dir, other_repo_dir, 1, 1)\n svntest.actions.run_and_verify_svn(None, [], 'co', other_repo_url,\n other_wc_dir)\n\n # Add properties in the first repos and commit.\n sbox.simple_propset('red', 'rojo', 'A/D/G')\n sbox.simple_propset('yellow', 'amarillo', 'A/D/G')\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'spenglish', wc_dir)\n\n # Tweak properties in the first repos and commit.\n sbox.simple_propset('red', 'rosso', 'A/D/G')\n sbox.simple_propset('yellow', 'giallo', 'A/D/G')\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'engtalian', wc_dir)\n\n # Now, merge the propchange to the *second* working copy.\n expected_output = [' C %s\\n' % (os.path.join(other_wc_dir,\n \"A\", \"D\", \"G\"))]\n expected_output = expected_merge_output([[3]], expected_output, True,\n prop_conflicts=1)\n svntest.actions.run_and_verify_svn(expected_output,\n [], 'merge', '-c3',\n sbox.repo_url,\n other_wc_dir)\n\n#----------------------------------------------------------------------\n# Test for issue #3975 'adds with explicit mergeinfo don't get mergeinfo\n# describing merge which added them'\n@Issue(3975)\n@SkipUnless(server_has_mergeinfo)\ndef merge_adds_subtree_with_mergeinfo(sbox):\n \"merge adds subtree with mergeinfo\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, False, 2)\n\n A_path = sbox.ospath('A')\n nu_path = sbox.ospath('A/C/nu')\n nu_COPY_path = sbox.ospath('A_COPY/C/nu')\n A_COPY2_path = sbox.ospath('A_COPY_2')\n\n # r8 - Add the file A_COPY/C/nu.\n svntest.main.file_write(nu_COPY_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Add a file on the A_COPY branch',\n wc_dir)\n\n # r9 - Cherry pick r8 from A_COPY to A.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A_COPY',\n A_path, '-c8')\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Merge r8 from A_COPY to A', wc_dir)\n\n # r10 - Make a modification to A_COPY/C/nu\n svntest.main.file_append(nu_COPY_path,\n \"More work on the A_COPY branch.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Some work on the A_COPY branch', wc_dir)\n\n # r9 - Cherry pick r10 from A_COPY/C/nu to A/C/nu. Make some\n # changes to A/C/nu before committing the merge.\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A_COPY/C/nu',\n nu_path, '-c10')\n svntest.main.file_append(nu_path, \"A faux conflict resolution.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Merge r8 from A_COPY to A', wc_dir)\n\n # Sync merge A to A_COPY_2\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(A_COPY2_path, {\n 'B/E/beta' : Item(status='U '),\n 'C/nu' : Item(status='A '),\n 'D/G/rho' : Item(status='U '),\n 'D/H/omega' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n '' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(A_COPY2_path, {\n '' : Item(status=' G'),\n 'C/nu' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY2_path, {\n })\n expected_status = wc.State(A_COPY2_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status='M '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'C/nu' : Item(status='A ', copied='+'),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status='M '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status='M '),\n 'D/H/omega' : Item(status='M '),\n })\n expected_status.tweak(wc_rev=11)\n expected_status.tweak('C/nu', wc_rev='-')\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3-11\\n/A_COPY:8'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n # C/nu will pick up the mergeinfo A_COPY/C/nu:8 which is self-referential.\n # This is issue #3668 'inheritance can result in self-referential\n # mergeinfo', but we'll allow it in this test since issue #3668 is\n # tested elsewhere and is not the point of *this* test.\n 'C/nu' : Item(\"This is the file 'nu'.\\n\" \\\n \"More work on the A_COPY branch.\\n\" \\\n \"A faux conflict resolution.\\n\",\n props={SVN_PROP_MERGEINFO :\n '/A/C/nu:9-11\\n/A_COPY/C/nu:8,10'}),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State('.', { })\n svntest.actions.run_and_verify_merge(A_COPY2_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# A test for issue #3978 'reverse merge which adds subtree fails'.\n@Issue(3978,4057)\n@SkipUnless(server_has_mergeinfo)\ndef reverse_merge_adds_subtree(sbox):\n \"reverse merge adds subtree\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n A_path = sbox.ospath('A')\n chi_path = sbox.ospath('A/D/H/chi')\n A_COPY_path = sbox.ospath('A_COPY')\n H_COPY_path = sbox.ospath('A_COPY/D/H')\n\n # r7 - Delete A\\D\\H\\chi\n svntest.actions.run_and_verify_svn(None, [], 'delete', chi_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Delete a file', wc_dir)\n\n # r8 - Merge r7 from A to A_COPY\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A',\n A_COPY_path, '-c7')\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Cherry-pick r7 from A to A_COPY', wc_dir)\n\n # r9 - File depth sync merge from A/D/H to A_COPY/D/H/\n # This shallow merge does not create non-inheritable mergeinfo because of\n # the issue #4057 fix; all subtrees affected by the diff are present, so\n # non-inheritable mergeinfo is not required.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A/D/H',\n H_COPY_path, '--depth', 'files')\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Cherry-pick r7 from A to A_COPY', wc_dir)\n\n # Reverse merge r7 from A to A_COPY\n #\n # Prior to the issue #3978 fix this merge failed with an assertion:\n #\n # >svn merge ^/A A_COPY -c-7\n # --- Reverse-merging r7 into 'A_COPY\\D\\H':\n # A A_COPY\\D\\H\\chi\n # --- Recording mergeinfo for reverse merge of r7 into 'A_COPY':\n # U A_COPY\n # --- Recording mergeinfo for reverse merge of r7 into 'A_COPY\\D\\H':\n # U A_COPY\\D\\H\n # ..\\..\\..\\subversion\\svn\\util.c:913: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10990: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10944: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10944: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10914: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:8928: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:7850: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_client\\mergeinfo.c:120: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_wc\\props.c:2472: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_wc\\props.c:2247: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_wc\\props.c:2576: (apr_err=200020)\n # ..\\..\\..\\subversion\\libsvn_subr\\mergeinfo.c:705: (apr_err=200020)\n # svn: E200020: Could not parse mergeinfo string '-7'\n # ..\\..\\..\\subversion\\libsvn_subr\\mergeinfo.c:688: (apr_err=200022)\n # ..\\..\\..\\subversion\\libsvn_subr\\mergeinfo.c:607: (apr_err=200022)\n # ..\\..\\..\\subversion\\libsvn_subr\\mergeinfo.c:504: (apr_err=200022)\n # ..\\..\\..\\subversion\\libsvn_subr\\kitchensink.c:57: (apr_err=200022)\n # svn: E200022: Negative revision number found parsing '-7'\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(A_COPY_path, {\n 'D/H/chi' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'D/H' : Item(status=' U'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' M'),\n 'D/H/chi' : Item(status='A ', copied='+'),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=9)\n expected_status.tweak('D/H/chi', wc_rev='-')\n expected_disk = wc.State('', {\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:2-6,8'}),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State('.', { })\n svntest.actions.run_and_verify_merge(A_COPY_path, 7, 6,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# A test for issue #3989 'merge which deletes file with native eol-style\n# raises spurious tree conflict'.\n@Issue(3989)\n@SkipUnless(server_has_mergeinfo)\ndef merged_deletion_causes_tree_conflict(sbox):\n \"merged deletion causes spurious tree conflict\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n psi_path = sbox.ospath('A/D/H/psi')\n H_branch_path = sbox.ospath('branch/D/H')\n\n # r2 - Set svn:eol-style native on A/D/H/psi\n svntest.actions.run_and_verify_svn(None, [], 'ps', 'svn:eol-style',\n 'native', psi_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Set eol-style native on a path',\n wc_dir)\n\n # r3 - Branch ^/A to ^/branch\n svntest.actions.run_and_verify_svn(None, [], 'copy',\n sbox.repo_url + '/A',\n sbox.repo_url + '/branch',\n '-m', 'Copy ^/A to ^/branch')\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # r4 - Delete A/D/H/psi\n svntest.actions.run_and_verify_svn(None, [], 'delete', psi_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'Delete a a path with native eol-style',\n wc_dir)\n\n # Sync merge ^/A/D/H to branch/D/H.\n #\n # branch/D/H/psi is, ignoring differences caused by svn:eol-style, identical\n # to ^/A/D/H/psi when the latter was deleted, so the deletion should merge\n # cleanly.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(H_branch_path, {\n 'psi' : Item(status='D '),\n })\n expected_mergeinfo_output = wc.State(H_branch_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = wc.State(H_branch_path, {})\n expected_status = wc.State(H_branch_path, {\n '' : Item(status=' M'),\n 'chi' : Item(status=' '),\n 'psi' : Item(status='D '),\n 'omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=4)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/D/H:3-4'}),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State('.', { })\n svntest.actions.run_and_verify_merge(H_branch_path, None, None,\n sbox.repo_url + '/A/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n# A test for issue #3976 'record-only merges which add new subtree mergeinfo\n# don't record mergeinfo describing merge'.\n@Issue(3976)\n@SkipUnless(server_has_mergeinfo)\ndef record_only_merge_adds_new_subtree_mergeinfo(sbox):\n \"record only merge adds new subtree mergeinfo\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox)\n\n psi_path = sbox.ospath('A/D/H/psi')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n H_COPY2_path = sbox.ospath('A_COPY_2/D/H')\n\n # r7 - Copy ^/A_COPY to ^/A_COPY_2\n svntest.actions.run_and_verify_svn(None, [],\n 'copy', '-m', 'copy A_COPY to A_COPY_2',\n sbox.repo_url + '/A_COPY',\n sbox.repo_url + '/A_COPY_2')\n\n # r8 - Set a property on A/D/H/psi. It doesn't matter what property\n # we use, just as long as we have a change that can be merged independently\n # of the text change to A/D/H/psi in r3.\n svntest.main.run_svn(None, 'propset', 'svn:eol-style', 'native', psi_path)\n svntest.main.run_svn(None, 'commit', '-m', 'set svn:eol-style', wc_dir)\n\n # r9 - Merge r3 from ^/A/D/H/psi to A_COPY/D/H/psi.\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A/D/H/psi',\n psi_COPY_path, '-c3')\n svntest.main.run_svn(None, 'commit', '-m', 'Subtree merge', wc_dir)\n\n # r10 - Merge r8 from ^/A/D/H/psi to A_COPY/D/H/psi.\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A/D/H/psi',\n psi_COPY_path, '-c8')\n svntest.main.run_svn(None, 'commit', '-m', 'Subtree merge', wc_dir)\n\n # Merge r10 from ^/A_COPY/D/H to A_COPY_2/D/H. This should leave\n # A_COPY_2/D/H/psi with three new property additions:\n #\n # 1) The 'svn:eol-style=native' from r10 via r8.\n #\n # 2) The mergeinfo '/A/D/H/psi:8' from r10.\n #\n # 3) The mergeinfo '/A_COPY/D/H/psi:10' describing the merge itself.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(H_COPY2_path, {\n 'psi' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(H_COPY2_path, {\n '' : Item(status=' U'),\n 'psi' : Item(status=' G'),\n })\n expected_elision_output = wc.State(H_COPY2_path, {})\n expected_status = wc.State(H_COPY2_path, {\n '' : Item(status=' M'),\n 'chi' : Item(status=' '),\n 'psi' : Item(status=' M'),\n 'omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=10)\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A_COPY/D/H:10'}),\n 'psi' : Item(\"This is the file 'psi'.\\n\",\n props={SVN_PROP_MERGEINFO :\n '/A/D/H/psi:8\\n/A_COPY/D/H/psi:10',\n 'svn:eol-style' : 'native'}),\n 'chi' : Item(\"This is the file 'chi'.\\n\"),\n 'omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State('.', { })\n svntest.actions.run_and_verify_merge(H_COPY2_path, 9, 10,\n sbox.repo_url + '/A_COPY/D/H', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n\n#----------------------------------------------------------------------\n# Test for issue #4056 \"don't record non-inheritable mergeinfo if missing\n# subtrees are not touched by the full-depth diff\".\n@Issue(4056)\n@SkipUnless(server_has_mergeinfo)\ndef unnecessary_noninheritable_mergeinfo_missing_subtrees(sbox):\n \"missing subtrees untouched by infinite depth merge\"\n\n B_branch_path = sbox.ospath('branch/B')\n\n # Setup a simple branch to which\n expected_output, expected_mergeinfo_output, expected_elision_output, \\\n expected_status, expected_disk, expected_skip = \\\n noninheritable_mergeinfo_test_set_up(sbox)\n\n # Create a shallow merge target; set depth of branch/B to files.\n svntest.main.run_svn(None, 'up', '--set-depth=files', B_branch_path)\n expected_status.remove('E', 'E/alpha', 'E/beta', 'F')\n expected_disk.remove('E', 'E/alpha', 'E/beta', 'F')\n\n # Merge r3 from ^/A/B to branch/B\n #\n # Merge is smart enough to realize that despite the shallow merge target,\n # the diff can only affect branch/B/lambda, which is still present, so there\n # is no need to record non-inheritable mergeinfo on the target\n # or any subtree mergeinfo whatsoever:\n #\n # >svn pg svn:mergeinfo -vR\n # Properties on 'branch\\B':\n # svn:mergeinfo\n # /A/B:3 <-- Nothing was skipped, so doesn't need\n # to be non-inheritable.\n svntest.actions.run_and_verify_merge(B_branch_path,\n '2', '3',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n B_branch_path)\n\n#----------------------------------------------------------------------\n# Test for issue #4057 \"don't record non-inheritable mergeinfo in shallow\n# merge if entire diff is within requested depth\".\n@Issue(4057)\n@SkipUnless(server_has_mergeinfo)\ndef unnecessary_noninheritable_mergeinfo_shallow_merge(sbox):\n \"shallow merge reaches all necessary subtrees\"\n\n B_branch_path = sbox.ospath('branch/B')\n E_path = sbox.ospath('A/B/E')\n\n # Setup a simple branch to which\n expected_output, expected_mergeinfo_output, expected_elision_output, \\\n expected_status, expected_disk, expected_skip = \\\n noninheritable_mergeinfo_test_set_up(sbox)\n\n # Merge r3 from ^/A/B to branch/B at operational depth=files\n #\n # Previously this failed because merge wasn't smart enough to\n # realize that despite being a shallow merge, the diff can\n # only affect branch/B/lambda, which is within the specified\n # depth, so there is no need to record non-inheritable mergeinfo\n # or subtree mergeinfo:\n #\n # >svn pg svn:mergeinfo -vR\n # Properties on 'branch\\B':\n # svn:mergeinfo\n # /A/B:3* <-- Should be inheritable\n # Properties on 'branch\\B\\lambda':\n # svn:mergeinfo\n # /A/B/lambda:3 <-- Not necessary\n expected_skip = wc.State(B_branch_path, {})\n svntest.actions.run_and_verify_merge(B_branch_path, '2', '3',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--depth', 'files', B_branch_path)\n\n # Revert the merge and then make a prop change to A/B/E in r4.\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '--recursive', sbox.wc_dir)\n svntest.actions.run_and_verify_svn([\"property 'prop:name' set on '\" +\n E_path + \"'\\n\"], [], 'ps',\n 'prop:name', 'propval', E_path)\n svntest.actions.run_and_verify_svn(None, [],\n 'ci', '-m', 'A new property on a dir',\n sbox.wc_dir)\n svntest.actions.run_and_verify_svn(None, [],\n 'up', sbox.wc_dir)\n\n # Merge r4 from ^/A/B to branch/B at operational depth=immediates\n #\n # Previously this failed because the mergetracking logic didn't realize\n # that despite being a shallow merge, the diff only affected branch/B/E,\n # which was within the specified depth, so there was no need to record\n # non-inheritable mergeinfo or subtree mergeinfo:\n #\n # >svn pg svn:mergeinfo -vR\n # Properties on 'branch\\B':\n # svn:mergeinfo\n # /A/B:4* <-- Should be inheritable\n # Properties on 'branch\\B\\E':\n # svn:mergeinfo\n # /A/B/E:4 <-- Not necessary\n expected_output = wc.State(B_branch_path, {\n 'E' : Item(status=' U'),\n })\n expected_mergeinfo_output = wc.State(B_branch_path, {\n '' : Item(status=' U'),\n 'E' : Item(status=' U'),\n })\n expected_elision_output = wc.State(B_branch_path, {\n 'E' : Item(status=' U'),\n })\n expected_status = wc.State(B_branch_path, {\n '' : Item(status=' M'),\n 'lambda' : Item(status=' '),\n 'E' : Item(status=' M'),\n 'E/alpha' : Item(status=' '),\n 'E/beta' : Item(status=' '),\n 'F' : Item(status=' '),\n })\n expected_status.tweak(wc_rev='4')\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:4'}),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'E' : Item(props={'prop:name' : 'propval'}),\n 'E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'F' : Item(),\n })\n svntest.actions.run_and_verify_merge(B_branch_path, '3', '4',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, True,\n '--depth', 'immediates', B_branch_path)\n\n#----------------------------------------------------------------------\n# Test for issue #4132, \"merge of replaced source asserts\".\n# The original use-case is the following merges, which both asserted:\n# svn merge -cr1295005 ^/subversion/trunk@1295000 ../src\n# svn merge -cr1295004 ^/subversion/trunk/@r1295004 ../src\n@Issue(4132)\ndef svnmucc_abuse_1(sbox):\n \"svnmucc: merge a replacement\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n ## Using A/ as our trunk, since one cannot replace the root.\n\n ## r2: open a branch\n sbox.simple_repo_copy('A', 'A_COPY')\n\n ## r3: padding (to make the revnums-mod-10 match)\n sbox.simple_repo_copy('iota', 'padding')\n\n ## r4: trunk: accidental change\n sbox.simple_append('A/mu', 'accidental change')\n sbox.simple_commit()\n\n ## r5: fail to revert it\n svntest.actions.run_and_verify_svnmucc(None, [],\n '-m', 'r5',\n '-U', sbox.repo_url,\n 'rm', 'A',\n 'cp', 'HEAD', 'A', 'A')\n\n ## r6: really revert it\n svntest.actions.run_and_verify_svnmucc(None, [],\n '-m', 'r6',\n '-U', sbox.repo_url,\n 'rm', 'A',\n 'cp', '3', 'A', 'A')\n\n ## Attempt to merge that.\n # This used to assert:\n # --- Recording mergeinfo for merge of r5 into \\\n # 'svn-test-work/working_copies/merge_tests-125/A_COPY':\n # subversion/libsvn_subr/mergeinfo.c:1172: (apr_err=235000)\n # svn: E235000: In file 'subversion/libsvn_subr/mergeinfo.c' \\\n # line 1172: assertion failed (IS_VALID_FORWARD_RANGE(first))\n #\n # Then, prior to the fix asserted this way:\n #\n # >svn merge -c5 ^/A@r5 A_COPY\n # subversion\\libsvn_client\\merge.c:4871: (apr_err=235000)\n # svn: E235000: In file 'subversion\\libsvn_client\\merge.c'\n # line 4871: assertion failed (*gap_start < *gap_end)\n sbox.simple_update()\n svntest.main.run_svn(None, 'merge', '-c', 'r5', '^/A@r5',\n sbox.ospath('A_COPY'))\n\n#----------------------------------------------------------------------\n# Test for issue #4138 'replacement in merge source not notified correctly'.\n@SkipUnless(server_has_mergeinfo)\n@Issue(4138)\ndef merge_source_with_replacement(sbox):\n \"replacement in merge source not notified correctly\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about.\n A_path = sbox.ospath('A')\n omega_path = sbox.ospath('A/D/H/omega')\n A_COPY_path = sbox.ospath('A_COPY')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n\n # branch A@1 to A_COPY in r2, then make a few edits under A in r3-6:\n wc_disk, wc_status = set_up_branch(sbox)\n\n # r7 Delete A, replace it with A@5, effectively reverting the change\n # made to A/D/H/omega in r6:\n svntest.main.run_svn(None, 'up', wc_dir)\n svntest.main.run_svn(None, 'del', A_path)\n svntest.main.run_svn(None, 'copy', sbox.repo_url + '/A@5', A_path)\n sbox.simple_commit(message='Replace A with older version of itself')\n\n # r8: Make an edit to A/D/H/omega:\n svntest.main.file_write(omega_path, \"New content for 'omega'.\\n\")\n sbox.simple_commit(message='file edit')\n\n # Update and sync merge ^/A to A_COPY.\n #\n # text text text text text\n # edit edit edit edit edit\n # psi rho beta omega omega\n # A@r1---r3----r4----r5----r6---X r7---r8--------->\n # | | ^ |\n # | v | |\n # | +---replacement---+ |\n # copy |\n # | sync-merge\n # | |\n # v v\n # r2---A_COPY----------------------------------------->\n svntest.main.run_svn(None, 'up', wc_dir)\n # This test previously failed because the merge notifications make it look\n # like r6 from ^/A was merged and recorded:\n #\n # >svn merge ^^/A A_COPY\n # --- Merging r2 through r5 into 'A_COPY':\n # U A_COPY\\B\\E\\beta\n # U A_COPY\\D\\G\\rho\n # U A_COPY\\D\\H\\psi\n # --- Recording mergeinfo for merge of r2 through r5 into 'A_COPY':\n # U A_COPY\n # --- Merging r6 through r8 into 'A_COPY':\n # U A_COPY\\D\\H\\omega\n # --- Recording mergeinfo for merge of r6 through r8 into 'A_COPY':\n # G A_COPY\n expected_output = expected_merge_output(\n [[2,5],[7,8]],\n ['U ' + beta_COPY_path + '\\n',\n 'U ' + rho_COPY_path + '\\n',\n 'U ' + omega_COPY_path + '\\n',\n 'U ' + psi_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',\n ' G ' + A_COPY_path + '\\n',])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', sbox.repo_url + '/A',\n A_COPY_path)\n\n # Misleading notifications are one thing, incorrect mergeinfo is quite\n # another.\n svntest.actions.run_and_verify_svn([A_COPY_path + ' - /A:2-5,7-8\\n'],\n [], 'pg', SVN_PROP_MERGEINFO,\n '-R', A_COPY_path)\n\n # Commit the above merge and then reverse merge it. Again r6 is not\n # being merged and should not be part of the notifications.\n sbox.simple_commit()\n sbox.simple_update()\n expected_output = expected_merge_output(\n [[5,2],[8,7]],\n ['U ' + beta_COPY_path + '\\n',\n 'U ' + rho_COPY_path + '\\n',\n 'U ' + omega_COPY_path + '\\n',\n 'U ' + psi_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',\n ' G ' + A_COPY_path + '\\n',],\n elides=True)\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', sbox.repo_url + '/A',\n A_COPY_path, '-r8:1')\n\n#----------------------------------------------------------------------\n# Test for issue #4144 'Reverse merge with replace in source applies\n# diffs in forward order'.\n@SkipUnless(server_has_mergeinfo)\n@Issue(4144)\ndef reverse_merge_with_rename(sbox):\n \"reverse merge applies revs in reverse order\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about.\n A_path = sbox.ospath('A')\n omega_path = sbox.ospath('trunk/D/H/omega')\n A_COPY_path = sbox.ospath('A_COPY')\n beta_COPY_path = sbox.ospath('A_COPY/B/E/beta')\n psi_COPY_path = sbox.ospath('A_COPY/D/H/psi')\n rho_COPY_path = sbox.ospath('A_COPY/D/G/rho')\n omega_COPY_path = sbox.ospath('A_COPY/D/H/omega')\n\n # branch A@1 to A_COPY in r2, then make a few edits under A in r3-6:\n wc_disk, wc_status = set_up_branch(sbox)\n\n # r7 - Rename ^/A to ^/trunk.\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 7.\\n'],\n [], 'move',\n sbox.repo_url + '/A',\n sbox.repo_url + '/trunk',\n '-m', \"Rename 'A' to 'trunk'\")\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # r8 - Make and edit to trunk/D/H/omega (which was also edited in r6).\n svntest.main.file_write(omega_path, \"Edit 'omega' on trunk.\\n\")\n sbox.simple_commit(message='Another omega edit')\n\n # r9 - Sync merge ^/trunk to A_COPY.\n svntest.actions.run_and_verify_svn(None, # Don't check stdout, we test this\n # type of merge to death elsewhere.\n [], 'merge', sbox.repo_url + '/trunk',\n A_COPY_path)\n sbox.simple_commit(message='Sync A_COPY with ^/trunk')\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Reverse merge -r9:1 from ^/trunk to A_COPY. This should return\n # A_COPY to the same state it had prior to the sync merge in r2.\n #\n # This currently fails because the Subversion tries to reverse merge\n # -r6:1 first, then -r8:6, causing a spurious conflict on omega:\n #\n # >svn merge ^/trunk A_COPY -r9:1 --accept=postpone\n # --- Reverse-merging r6 through r2 into 'A_COPY':\n # U A_COPY\\B\\E\\beta\n # U A_COPY\\D\\G\\rho\n # C A_COPY\\D\\H\\omega\n # U A_COPY\\D\\H\\psi\n # --- Recording mergeinfo for reverse merge of r6 through r2 into 'A_COPY':\n # U A_COPY\n # Summary of conflicts:\n # Text conflicts: 1\n # ..\\..\\..\\subversion\\svn\\util.c:913: (apr_err=155015)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10848: (apr_err=155015)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10812: (apr_err=155015)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:8984: (apr_err=155015)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:4728: (apr_err=155015)\n # svn: E155015: One or more conflicts were produced while merging r6:1\n # into 'C:\\SVN\\src-trunk-4\\Debug\\subversion\\tests\\cmdline\\svn-test-work\n # \\working_copies\\merge_tests-127\\A_COPY' -- resolve all conflicts and\n # rerun the merge to apply the remaining unmerged revisions\n expected_output = expected_merge_output(\n [[8,7],[6,2]],\n ['U ' + beta_COPY_path + '\\n',\n 'U ' + rho_COPY_path + '\\n',\n 'U ' + omega_COPY_path + '\\n',\n 'G ' + omega_COPY_path + '\\n',\n 'U ' + psi_COPY_path + '\\n',\n ' U ' + A_COPY_path + '\\n',\n ' G ' + A_COPY_path + '\\n',], elides=True)\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', sbox.repo_url + '/trunk',\n A_COPY_path, '-r9:1')\n\n#----------------------------------------------------------------------\n# Test for issue #4166 'multiple merge editor drives which add then\n# delete a subtree fail'.\n@SkipUnless(server_has_mergeinfo)\n@Issue(4166)\ndef merge_adds_then_deletes_subtree(sbox):\n \"merge adds then deletes subtree\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about.\n A_path = sbox.ospath('A')\n nu_path = sbox.ospath('A/C/nu')\n C_branch_path = sbox.ospath('branch/C')\n nu_branch_path = sbox.ospath('branch/C/nu')\n\n # Make a branch.\n svntest.actions.run_and_verify_svn(None, [], 'copy',\n sbox.repo_url + '/A',\n sbox.repo_url + '/branch',\n '-m', 'Make a branch.')\n\n # On the branch parent: Add a file in r3 and then delete it in r4.\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,\n '-m', 'Add a file')\n svntest.actions.run_and_verify_svn(None, [], 'delete', nu_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,\n '-m', 'Delete a file')\n\n # Merge r3 and r4 from ^/A/C to branch/C as part of one merge\n # command, but as separate editor drives, i.e. 'c3,4 vs. -r2:4.\n # These should be equivalent but the former was failing with:\n #\n # >svn merge ^/A/C branch\\C -c3,4\n # --- Merging r3 into 'branch\\C':\n # A branch\\C\\nu\n # --- Recording mergeinfo for merge of r3 into 'branch\\C':\n # U branch\\C\n # --- Merging r4 into 'branch\\C':\n # D branch\\C\\nu\n # --- Recording mergeinfo for merge of r4 into 'branch\\C':\n # G branch\\C\n # ..\\..\\..\\subversion\\svn\\util.c:913: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10873: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:10837: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:8994: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_client\\merge.c:7923: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_client\\mergeinfo.c:257: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_client\\mergeinfo.c:97: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_wc\\props.c:2003: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_wc\\props.c:2024: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_wc\\wc_db.c:11473: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_wc\\wc_db.c:7247: (apr_err=155010)\n # ..\\..\\..\\subversion\\libsvn_wc\\wc_db.c:7232: (apr_err=155010)\n # svn: E155010: The node 'C:\\SVN\\src-trunk\\Debug\\subversion\\tests\n # \\cmdline\\svn-test-work\\working_copies\\merge_tests-128\\branch\\C\\nu'\n # was not found.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[3],[4]],\n ['A ' + nu_branch_path + '\\n',\n 'D ' + nu_branch_path + '\\n',\n ' U ' + C_branch_path + '\\n',\n ' G ' + C_branch_path + '\\n',]),\n [], 'merge', '-c3,4', sbox.repo_url + '/A/C', C_branch_path)\n\n#----------------------------------------------------------------------\n# Test for issue #4169 'added subtrees with non-inheritable mergeinfo\n# cause spurious subtree mergeinfo'.\n@SkipUnless(server_has_mergeinfo)\n@Issue(4169)\ndef merge_with_added_subtrees_with_mergeinfo(sbox):\n \"merge with added subtrees with mergeinfo\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about.\n A_path = sbox.ospath('A')\n Y_path = sbox.ospath('A/C/X/Y')\n Z_path = sbox.ospath('A/C/X/Y/Z')\n nu_path = sbox.ospath('A/C/X/Y/Z/nu')\n A_COPY_path = sbox.ospath('A_COPY')\n Y_COPY_path = sbox.ospath('A_COPY/C/X/Y')\n W_COPY_path = sbox.ospath('A_COPY/C/X/Y/Z/W')\n A_COPY2_path = sbox.ospath('A_COPY_2')\n\n # Make two branches of ^/A and then make a few edits under A in r4-7:\n wc_disk, wc_status = set_up_branch(sbox, nbr_of_branches=2)\n\n # r8 - Add a subtree under A.\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', '--parents',\n Z_path)\n svntest.main.file_write(nu_path, \"This is the file 'nu'.\\n\")\n svntest.actions.run_and_verify_svn(None, [], 'add', nu_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,\n '-m', 'Add a subtree on our \"trunk\"')\n\n # r9 - Sync ^/A to the first branch A_COPY.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A', A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,\n '-m', 'Sync ^/A to ^/A_COPY')\n\n # r10 - Make some edits on the first branch.\n svntest.actions.run_and_verify_svn(None, [], 'ps', 'branch-prop-foo',\n 'bar', Y_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'mkdir', W_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,\n '-m', 'Make some edits on \"branch 1\"')\n\n # r11 - Cherry-pick r10 on the first branch back to A, but\n # do so at depth=empty so non-inheritable mergeinfo is created.\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c10', '--depth=empty',\n sbox.repo_url + '/A_COPY/C/X/Y', Y_path)\n svntest.actions.run_and_verify_svn(\n None, [], 'ci', wc_dir,\n '-m', 'Depth empty subtree cherry pick from \"branch 1\" to \"trunk\"')\n\n # Sync ^/A to the second branch A_COPY_2.\n #\n # Previously this failed because spurious mergeinfo was created on\n # A_COPY_2/C/X/Y/Z:\n #\n # >svn merge ^^/A A_COPY_2\n # --- Merging r3 through r11 into 'A_COPY_2':\n # U A_COPY_2\\B\\E\\beta\n # A A_COPY_2\\C\\X\n # A A_COPY_2\\C\\X\\Y\n # A A_COPY_2\\C\\X\\Y\\Z\n # A A_COPY_2\\C\\X\\Y\\Z\\nu\n # U A_COPY_2\\D\\G\\rho\n # U A_COPY_2\\D\\H\\omega\n # U A_COPY_2\\D\\H\\psi\n # --- Recording mergeinfo for merge of r3 through r11 into 'A_COPY_2':\n # U A_COPY_2\n # --- Recording mergeinfo for merge of r3 through r11 into 'A_COPY_2\\C\\X\\Y':\n # G A_COPY_2\\C\\X\\Y\n # vvvvvvvvvvvvvvvvvvvv\n # U A_COPY_2\\C\\X\\Y\\Z\n # ^^^^^^^^^^^^^^^^^^^^\n #\n # >svn pl -vR A_COPY_2\n # Properties on 'A_COPY_2':\n # svn:mergeinfo\n # /A:3-11\n # Properties on 'A_COPY_2\\C\\X\\Y':\n # branch-prop-foo\n # bar\n # svn:mergeinfo\n # /A/C/X/Y:8-11\n # /A_COPY/C/X/Y:10*\n # vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n # Properties on 'A_COPY_2\\C\\X\\Y\\Z':\n # svn:mergeinfo\n # /A/C/X/Y/Z:8-11\n # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n expected_output = wc.State(A_COPY2_path, {\n 'B/E/beta' : Item(status='U '),\n 'D/G/rho' : Item(status='U '),\n 'D/H/omega' : Item(status='U '),\n 'D/H/psi' : Item(status='U '),\n 'C/X' : Item(status='A '),\n 'C/X/Y' : Item(status='A '),\n 'C/X/Y/Z' : Item(status='A '),\n 'C/X/Y/Z/nu' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY2_path, {\n '' : Item(status=' U'),\n 'C/X/Y' : Item(status=' U'), # Added with explicit mergeinfo\n })\n expected_elision_output = wc.State(A_COPY2_path, {\n })\n expected_status = wc.State(A_COPY2_path, {\n '' : Item(status=' M', wc_rev=11),\n 'B' : Item(status=' ', wc_rev=11),\n 'mu' : Item(status=' ', wc_rev=11),\n 'B/E' : Item(status=' ', wc_rev=11),\n 'B/E/alpha' : Item(status=' ', wc_rev=11),\n 'B/E/beta' : Item(status='M ', wc_rev=11),\n 'B/lambda' : Item(status=' ', wc_rev=11),\n 'B/F' : Item(status=' ', wc_rev=11),\n 'C' : Item(status=' ', wc_rev=11),\n 'C/X' : Item(status='A ', wc_rev='-', copied='+'),\n 'C/X/Y' : Item(status=' M', wc_rev='-', copied='+'),\n 'C/X/Y/Z' : Item(status=' ', wc_rev='-', copied='+'),\n 'C/X/Y/Z/nu' : Item(status=' ', wc_rev='-', copied='+'),\n 'D' : Item(status=' ', wc_rev=11),\n 'D/G' : Item(status=' ', wc_rev=11),\n 'D/G/pi' : Item(status=' ', wc_rev=11),\n 'D/G/rho' : Item(status='M ', wc_rev=11),\n 'D/G/tau' : Item(status=' ', wc_rev=11),\n 'D/gamma' : Item(status=' ', wc_rev=11),\n 'D/H' : Item(status=' ', wc_rev=11),\n 'D/H/chi' : Item(status=' ', wc_rev=11),\n 'D/H/psi' : Item(status='M ', wc_rev=11),\n 'D/H/omega' : Item(status='M ', wc_rev=11),\n })\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:3-11'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"New content\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(),\n 'C/X' : Item(),\n 'C/X/Y' : Item(props={\n SVN_PROP_MERGEINFO : '/A/C/X/Y:8-11\\n/A_COPY/C/X/Y:10*',\n 'branch-prop-foo' : 'bar'}),\n 'C/X/Y/Z' : Item(),\n 'C/X/Y/Z/nu' : Item(\"This is the file 'nu'.\\n\"),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"New content\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"New content\"),\n 'D/H/omega' : Item(\"New content\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY2_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n check_props=True)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef merge_with_externals_with_mergeinfo(sbox):\n \"merge with externals with mergeinfo\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about.\n A_path = sbox.ospath('A')\n A_COPY_path = sbox.ospath('A_COPY')\n file_external_path = sbox.ospath('A/file-external')\n mu_COPY_path = sbox.ospath('A_COPY/mu')\n mu_path = sbox.ospath('A/mu')\n\n # Make a branch of ^/A and then make a few edits under A in r3-6:\n wc_disk, wc_status = set_up_branch(sbox)\n\n svntest.main.file_write(mu_COPY_path, \"branch edit\")\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'file edit on the branch', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n\n # Create a file external under 'A' and set some bogus mergeinfo\n # on it (the fact that this mergeinfo is bogus has no bearing on\n # this test).\n svntest.actions.run_and_verify_svn(None, [], 'propset',\n 'svn:externals',\n '^/iota file-external', A_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'set file external', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'up', wc_dir)\n svntest.actions.run_and_verify_svn(None, [], 'ps', SVN_PROP_MERGEINFO,\n \"/bogus-mergeinfo:5\", file_external_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'set mergeinfo on file external',\n file_external_path)\n\n # Sync merge ^/A to A_COPY and then reintegrate A_COPY back to A.\n svntest.actions.run_and_verify_svn(None, [], 'merge',\n sbox.repo_url + '/A', A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', '-m',\n 'sync merge', wc_dir)\n # This was segfaulting, see\n # http://svn.haxx.se/dev/archive-2012-10/0364.shtml\n svntest.actions.run_and_verify_svn(\n expected_merge_output(None,\n ['U ' + mu_path + '\\n',\n ' U ' + A_path + '\\n'],\n two_url=True),\n [], 'merge', '--reintegrate', sbox.repo_url + '/A_COPY',\n A_path)\n\n#----------------------------------------------------------------------\n# Test merging 'binary' files with keyword expansion enabled.\n# Tests issue #4221 'Trivial merge of a binary file with svn:keywords\n# raises a conflict', among other cases.\n@SkipUnless(server_has_mergeinfo)\n@Issue(4221)\ndef merge_binary_file_with_keywords(sbox):\n \"merge binary file with keywords\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n # Some binary files, and some binary files that will become text files.\n # 'mod_src' means a content change on the branch (the merge source);\n # 'mod_tgt' means a content change on the original (the merge target);\n # 'to_txt' means svn:mime-type removed on the branch (the merge source).\n file_mod_both = 'A/B/E/alpha'\n file_mod_src = 'A/D/G/pi'\n file_mod_tgt = 'A/D/G/rho'\n file_mod_none = 'A/D/G/tau'\n file_mod_both_to_txt = 'A/B/E/beta'\n file_mod_src_to_txt = 'A/D/H/chi'\n file_mod_tgt_to_txt = 'A/D/H/psi'\n file_mod_none_to_txt = 'A/D/H/omega'\n files_bin = [ file_mod_both, file_mod_src, file_mod_tgt, file_mod_none ]\n files_txt = [ file_mod_both_to_txt, file_mod_src_to_txt,\n file_mod_tgt_to_txt, file_mod_none_to_txt ]\n files = files_bin + files_txt\n\n # make some 'binary' files with keyword expansion enabled\n for f in files:\n svntest.main.file_append(sbox.ospath(f), \"With $Revision: $ keyword.\\n\")\n svntest.main.run_svn(binary_mime_type_on_text_file_warning,\n 'propset', 'svn:mime-type',\n 'application/octet-stream', sbox.ospath(f))\n sbox.simple_propset('svn:keywords', 'Revision', f)\n sbox.simple_commit()\n\n # branch the files\n sbox.simple_repo_copy('A', 'A2')\n sbox.simple_update()\n\n # Modify the branched (source) and/or original (target) versions. Remove\n # the svn:mime-type from the 'to_txt' files on the branch.\n # The original bug in issue #4221 gave a conflict if we modified either\n # version or neither (using a single-file merge test case).\n for f in [ file_mod_both, file_mod_both_to_txt,\n file_mod_src, file_mod_src_to_txt ]:\n f_branch = 'A2' + f[1:]\n svntest.main.file_append(sbox.ospath(f_branch), \"Incoming mod.\\n\")\n for f in [ file_mod_both, file_mod_both_to_txt,\n file_mod_tgt, file_mod_tgt_to_txt ]:\n svntest.main.file_append(sbox.ospath(f), \"Mod on merge target.\\n\")\n for f in files_txt:\n f_branch = 'A2' + f[1:]\n sbox.simple_propdel('svn:mime-type', f_branch)\n sbox.simple_commit()\n sbox.simple_update()\n\n # merge back\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[3,4]],\n ['C ' + sbox.ospath(file_mod_both) + '\\n',\n 'U ' + sbox.ospath(file_mod_src) + '\\n',\n #' ' + sbox.ospath(file_mod_tgt) + '\\n',\n #' ' + sbox.ospath(file_mod_none) + '\\n',\n 'CU ' + sbox.ospath(file_mod_both_to_txt) + '\\n',\n 'UU ' + sbox.ospath(file_mod_src_to_txt) + '\\n',\n ' U ' + sbox.ospath(file_mod_tgt_to_txt) + '\\n',\n ' U ' + sbox.ospath(file_mod_none_to_txt) + '\\n',\n ' U A\\n'],\n text_conflicts=2),\n [], 'merge', '^/A2', 'A')\n\n#----------------------------------------------------------------------\n# Test for issue #4155 'Merge conflict text of expanded keyword incorrect\n# when svn:keyword property value removed'. Failed in 1.7.0 through 1.7.8.\n@SkipUnless(server_has_mergeinfo)\n@Issue(4155)\ndef merge_conflict_when_keywords_removed(sbox):\n \"merge conflict when keywords removed\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n # make a file with keyword expansion enabled\n svntest.main.file_write('A/keyfile', \"$Date$ $Revision$\\n\")\n sbox.simple_add('A/keyfile')\n sbox.simple_propset('svn:keywords', 'Date Revision', 'A/keyfile')\n sbox.simple_commit()\n sbox.simple_update()\n\n # branch the file\n sbox.simple_repo_copy('A', 'A2')\n sbox.simple_update()\n\n #\n svntest.main.file_append('A/keyfile', \" some changes\\n\")\n sbox.simple_commit()\n\n # sync merge\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[3,4]],\n ['U '+ sbox.ospath('A2/keyfile') + '\\n',\n ' U A2\\n']),\n [], 'merge', '^/A', 'A2')\n sbox.simple_commit()\n sbox.simple_update()\n\n # modify the original version: disable those KW & enable 'Id'\n sbox.simple_propset('svn:keywords', 'Id', 'A/keyfile')\n svntest.main.file_append('A/keyfile', \"$Id$\\n\")\n sbox.simple_commit()\n\n # sync merge again\n svntest.actions.run_and_verify_svn(\n expected_merge_output([[5,6]],\n ['UU ' + sbox.ospath('A2/keyfile') + '\\n',\n ' U A2\\n']),\n [], 'merge', '--accept=postpone', '^/A', 'A2')\n\n@SkipUnless(server_has_mergeinfo)\n@Issue(4139, 3274, 3503)\ndef merge_target_selection(sbox):\n \"merge target selection handling\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n # r2\n sbox.simple_mkdir('dir')\n sbox.simple_add_text('\\1\\2\\3\\4\\5', 'dir/binary-file')\n sbox.simple_add_text('abcde', 'dir/text-file')\n sbox.simple_commit()\n\n # r3\n sbox.simple_copy('dir', 'branch')\n sbox.simple_commit()\n\n # r4\n svntest.main.file_write(sbox.ospath('dir/binary-file'),\n '\\9\\8\\7\\6\\5\\4\\3\\2\\1')\n sbox.simple_commit()\n\n sbox.simple_update()\n\n os.chdir(sbox.ospath('branch'))\n\n # Merge the directory (no target)\n expected_output = [\n '--- Merging r4 into \\'.\\':\\n',\n 'U binary-file\\n',\n '--- Recording mergeinfo for merge of r4 into \\'.\\':\\n',\n ' U .\\n',\n ]\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '^/dir', '-c', '4')\n\n svntest.main.run_svn(None, 'revert', '-R', '.')\n\n # Merge the file (no target)\n expected_output = [\n '--- Merging r4 into \\'binary-file\\':\\n',\n 'U binary-file\\n',\n '--- Recording mergeinfo for merge of r4 into \\'binary-file\\':\\n',\n ' U binary-file\\n',\n ]\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '^/dir/binary-file', '-c', '4')\n\n svntest.main.run_svn(None, 'revert', '-R', '.')\n\n # Merge the directory (explicit target)\n expected_output = [\n '--- Merging r4 into \\'.\\':\\n',\n 'U binary-file\\n',\n '--- Recording mergeinfo for merge of r4 into \\'.\\':\\n',\n ' U .\\n',\n ]\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '^/dir', '-c', '4', '.')\n\n svntest.main.run_svn(None, 'revert', '-R', '.')\n\n # Merge the file (explicit target)\n expected_output = [\n '--- Merging r4 into \\'binary-file\\':\\n',\n 'U binary-file\\n',\n '--- Recording mergeinfo for merge of r4 into \\'binary-file\\':\\n',\n ' U binary-file\\n',\n ]\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '^/dir/binary-file', '-c', '4', 'binary-file')\n\n svntest.main.run_svn(None, 'revert', '-R', '.')\n\n # Merge the file (wrong target)\n expected_output = [\n '--- Merging r4 into \\'.\\':\\n',\n ' C .\\n',\n '--- Recording mergeinfo for merge of r4 into \\'.\\':\\n',\n ' U .\\n',\n ] + svntest.main.summary_of_conflicts(tree_conflicts=1)\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '^/dir/binary-file',\n '-c', '4', '.', '--accept', 'postpone')\n\n svntest.main.run_svn(None, 'revert', '-R', '.')\n\n # Merge the dir (wrong target)\n expected_output = [\n '--- Merging r4 into \\'binary-file\\':\\n',\n ' C %s\\n' % os.path.join('binary-file'),\n '--- Recording mergeinfo for merge of r4 into \\'binary-file\\':\\n',\n ' U binary-file\\n',\n ] + svntest.main.summary_of_conflicts(tree_conflicts=1)\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'merge', '^/dir', '-c', '4', 'binary-file',\n '--accept', 'postpone')\n\n@SkipUnless(server_has_mergeinfo)\n@Issue(3405) # seems to be the wrong issue number\ndef merge_properties_on_adds(sbox):\n \"merged directory properties are added\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n sbox.simple_copy('A/D/G', 'G')\n\n sbox.simple_mkdir('A/D/G/M')\n sbox.simple_mkdir('A/D/G/M/N')\n sbox.simple_add_text('QQ', 'A/D/G/file', 'A/D/G/M/file')\n sbox.simple_propset('key', 'value',\n 'A/D/G/M', 'A/D/G/file', 'A/D/G/M/N', 'A/D/G/M/file')\n sbox.simple_commit()\n sbox.simple_update()\n\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '^/A/D/G', sbox.ospath('G'))\n\n expected_output = svntest.verify.UnorderedOutput([\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G'),\n ' svn:mergeinfo\\n',\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/M'),\n ' key\\n',\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/file'),\n ' key\\n',\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/M/N'),\n ' key\\n',\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/M/file'),\n ' key\\n',\n ])\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'proplist', '-R', sbox.ospath('G'))\n\n expected_output = svntest.verify.UnorderedOutput([\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/M'),\n ' key\\n',\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/file'),\n ' key\\n',\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/M/N'),\n ' key\\n',\n 'Properties on \\'%s\\':\\n' % sbox.ospath('G/M/file'),\n ' key\\n',\n ])\n\n # I merged the tree, which should include history but only the files have\n # the properties stored in PRISTINE. All directories have the properties\n # as local changes in ACTUAL.\n svntest.actions.run_and_verify_svn(expected_output, [],\n 'proplist', '-R', sbox.ospath('G'),\n '-r', 'BASE')\n\n # Note that this is not a regression. This has been the case since 1.0.\n # ### We just made status, update and merge handle this without users\n # ### knowing about this limitation.\n\n # ### My guess is that the base merge support on svn_wc_merge_props()\n # ### was originally designed to resolve this problem, but I can't\n # ### find a released version where this was actually implemented.\n\n # For fun, also check the status: 'svn status' suppresses the M from AM.\n\n # G = sbox.ospath('G')\n #\n # expected_status = wc.State('G', {\n # '' : Item(status=' M', wc_rev='2'),\n # 'pi' : Item(status=' ', wc_rev='2'),\n # 'tau' : Item(status=' ', wc_rev='2'),\n # 'file' : Item(status='A ', copied='+', wc_rev='-'), # Copied, no changes\n # 'M' : Item(status='A ', copied='+', wc_rev='-'), # Copied, changes\n # 'M/file' : Item(status=' ', copied='+', wc_rev='-'), # Copied, no changes\n # 'M/N' : Item(status=' M', copied='+', wc_rev='-'), # Local changes\n # 'rho' : Item(status=' ', wc_rev='2'),\n # })\n # svntest.actions.run_and_verify_status(G, expected_status)\n\n\n# ======================================================================\n# Functions for parsing mergeinfo\n\ndef parse_changes_list(changes_string):\n \"\"\"Parse a string containing a list of revision numbers in the form\n of the '--change' command-line argument (e.g. '1,3,-5,7-10').\n Return a list of elements of the form [[1], [3], [-5], [7,10]].\n \"\"\"\n rev_ranges = []\n for rr in changes_string.split(','):\n if '-' in rr[1:]:\n revs = rr.split('-')\n rev_ranges.append([int(revs[0]), int(revs[1])])\n else:\n rev_ranges.append([int(rr)])\n return rev_ranges\n\ndef parse_rev_args(arg_list):\n \"\"\"Return a list of [rX:rY] or [rZ] elements representing ARG_LIST\n whose elements are strings in the form '-rX:Y' or '-cZ,X-Y,...'.\n \"\"\"\n rev_ranges = []\n for arg in arg_list:\n kind = arg[:2]\n val = arg[2:]\n if kind == '-r':\n if ':' in val:\n revs = map(int, val.split(':'))\n if revs[0] < revs[1]:\n rev_ranges.append([revs[0] + 1, revs[1]])\n else:\n rev_ranges.append([revs[0], revs[1] + 1])\n else:\n rev_ranges.append([int(val)])\n elif kind == '-c':\n rev_ranges.extend(parse_changes_list(val))\n else:\n raise ValueError(\"revision arg '\" + arg + \"' in '\" + arg_list +\n \"' does not start with -r or -c\")\n return rev_ranges\n\nclass RangeList(list):\n \"\"\"Represents of a list of revision ranges, as a list of one- or\n two-element lists, each of the form [X] meaning \"--revision (X-1):X\"\n or [X,Y] meaning \"--revision (X-1):Y\".\n \"\"\"\n def __init__(self, arg):\n \"\"\"\n \"\"\"\n self.as_given = arg\n if isinstance(arg, str):\n list.__init__(self, parse_changes_list(arg))\n elif isinstance(arg, list):\n list.__init__(self, parse_rev_args(arg))\n else:\n raise ValueError(\"RangeList needs a string or a list, not '\" + str(arg) + \"'\")\n\ndef expected_merge_output2(tgt_ospath,\n recorded_ranges,\n merged_ranges=None,\n prop_conflicts=0,\n prop_resolved=0):\n \"\"\"Return an ExpectedOutput instance corresponding to the expected\n output of a merge into TGT_OSPATH, with one 'recording\n mergeinfo...' notification per specified revision range in\n RECORDED_RANGES and one 'merging...' notification per revision\n range in MERGED_RANGES.\n\n RECORDED_RANGES is a mergeinfo-string or a RangeList.\n\n MERGED_RANGES is a list of mergeinfo-strings or a list of\n RangeLists. If None, it means [[r] for r in RECORDED_RANGES].\n \"\"\"\n # Convert RECORDED_RANGES to a RangeList.\n if isinstance(recorded_ranges, str):\n recorded_ranges = RangeList(recorded_ranges)\n # Convert MERGED_RANGES to a list of RangeLists.\n if merged_ranges is None:\n merged_ranges = [[r] for r in recorded_ranges]\n elif len(merged_ranges) > 0 and isinstance(merged_ranges[0], str):\n # List of mergeinfo-strings => list of rangelists\n merged_ranges = [RangeList(r) for r in merged_ranges]\n\n status_letters_re = (prop_conflicts or prop_resolved) and ' [UC]' or ' U'\n status_letters_mi = ' [UG]'\n lines = []\n for i, rr in enumerate(recorded_ranges):\n # Merging ...\n for sr in merged_ranges[i]:\n revstart = sr[0]\n revend = len(sr) > 1 and sr[1] or None\n lines += [svntest.main.merge_notify_line(revstart, revend,\n target=tgt_ospath)]\n lines += [status_letters_re + ' ' + re.escape(tgt_ospath) + '\\n']\n # Recording mergeinfo ...\n revstart = rr[0]\n revend = len(rr) > 1 and rr[1] or None\n lines += [svntest.main.mergeinfo_notify_line(revstart, revend,\n target=tgt_ospath)]\n lines += [status_letters_mi + ' ' + re.escape(tgt_ospath) + '\\n']\n\n # Summary of conflicts\n lines += svntest.main.summary_of_conflicts(prop_conflicts=prop_conflicts,\n prop_resolved=prop_resolved,\n as_regex=True)\n\n # The 'match_all=False' is because we also expect some\n # 'Resolved conflicted state of ...' lines.\n return RegexListOutput(lines, match_all=False)\n\ndef expected_out_and_err(tgt_ospath,\n recorded_ranges,\n merged_ranges=None,\n prop_conflicts=0,\n prop_resolved=0,\n expect_error=True):\n \"\"\"Return a tuple (expected_out, expected_err) giving the expected\n output and expected error output for a merge into TGT_OSPATH. See\n expected_merge_output2() for details of RECORDED_RANGES and\n MERGED_RANGES and PROP_CONFLICTS. EXPECT_ERROR should be true iff\n we expect the merge to abort with an error about conflicts being\n raised.\n \"\"\"\n expected_out = expected_merge_output2(tgt_ospath, recorded_ranges,\n merged_ranges,\n prop_conflicts, prop_resolved)\n if expect_error:\n expected_err = RegexListOutput([\n '^svn: E155015: .* conflicts were produced .* into$',\n \"^'.*\" + re.escape(tgt_ospath) + \"' --$\",\n '^resolve all conflicts .* remaining$',\n '^unmerged revisions$'],\n match_all=False)\n else:\n expected_err = []\n\n return expected_out, expected_err\n\ndef check_mergeinfo(expected_mergeinfo, tgt_ospath):\n \"\"\"Read the mergeinfo on TGT_OSPATH; verify that it matches\n EXPECTED_MERGEINFO (list of lines).\n \"\"\"\n svntest.actions.run_and_verify_svn(\n expected_mergeinfo, [], 'pg', SVN_PROP_MERGEINFO, tgt_ospath)\n\ndef simple_merge(src_path, tgt_ospath, rev_args):\n \"\"\"Merge from ^/SRC_PATH to TGT_OSPATH using revision arguments REV_ARGS\n (list of '-r...' or '-c...' strings); expect a single-target merge\n with no conflicts or errors.\n \"\"\"\n rev_ranges = RangeList(rev_args)\n\n expected_out = expected_merge_output(rev_ranges,\n [' U ' + tgt_ospath + '\\n',\n ' [UG] ' + tgt_ospath + '\\n'],\n target=tgt_ospath)\n src_url = '^/' + src_path\n svntest.actions.run_and_verify_svn(\n expected_out, [],\n 'merge', src_url, tgt_ospath, '--accept', 'postpone', *rev_args)\n\n@SkipUnless(server_has_mergeinfo)\n@Issue(4306)\n# Test for issue #4306 'multiple editor drive file merges record wrong\n# mergeinfo during conflicts'\ndef conflict_aborted_mergeinfo_described_partial_merge(sbox):\n \"conflicted split merge can be repeated\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n trunk = 'A'\n branch = 'A2'\n file = 'mu'\n dir = 'B'\n trunk_file = 'A/mu'\n trunk_dir = 'A/B'\n\n # r2: initial state\n for rev in range(4, 11):\n sbox.simple_propset('prop-' + str(rev), 'Old pval ' + str(rev),\n trunk_file, trunk_dir)\n sbox.simple_commit()\n\n # r3: branch\n sbox.simple_copy(trunk, branch)\n sbox.simple_commit()\n\n zero_rev = 3\n\n def edit_file_or_dir(path, rev, val):\n \"\"\"Make a local edit to the file at PATH.\"\"\"\n sbox.simple_propset('prop-' + str(rev), val + ' pval ' + str(rev), path)\n\n # r4 through r10: simple edits\n for rev in range(4, 11):\n edit_file_or_dir(trunk_file, rev, 'Edited')\n edit_file_or_dir(trunk_dir, rev, 'Edited')\n sbox.simple_commit()\n\n # r14: merge some changes to the branch so that later merges will be split\n svntest.actions.run_and_verify_svn(None, [], 'merge', '-c5,9',\n '^/' + trunk, sbox.ospath(branch),\n '--accept', 'theirs-conflict')\n sbox.simple_commit()\n sbox.simple_update()\n\n def revert_branch():\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R',\n sbox.ospath(branch))\n\n def try_merge(relpath, conflict_rev, rev_args,\n expected_out_err, expected_mi):\n \"\"\"Revert RELPATH in the branch; make a change that will conflict\n with CONFLICT_REV if not None; merge RELPATH in the trunk\n to RELPATH in the branch using revision arguments REV_ARGS (list of\n '-r...' or '-c...' strings).\n\n EXPECTED_OUT_ERR_MI is a tuple: (expected_out, expected_err,\n expected_mi). EXPECTED_OUT and EXPECTED_ERR are instances of\n ExpectedOutput.\n\n Expect to find mergeinfo EXPECTED_MI if not None. EXPECTED_MI is\n a single mergeinfo-string.\n \"\"\"\n src_path = trunk + '/' + relpath\n tgt_path = branch + '/' + relpath\n tgt_ospath = sbox.ospath(tgt_path)\n\n expected_out, expected_err = expected_out_err\n\n revert_branch()\n\n # Arrange for the merge to conflict at CONFLICT_REV.\n if conflict_rev:\n edit_file_or_dir(tgt_path, conflict_rev, 'Conflict')\n\n src_url = '^/' + src_path\n svntest.actions.run_and_verify_svn(\n expected_out, expected_err,\n 'merge', src_url, tgt_ospath, '--accept', 'postpone',\n *rev_args)\n\n if expected_mi is not None:\n expected_mergeinfo = ['/' + src_path + ':' + expected_mi + '\\n']\n check_mergeinfo(expected_mergeinfo, tgt_ospath)\n\n # In a mergeinfo-aware merge, each specified revision range is split\n # internally into sub-ranges, to avoid any already-merged revisions.\n #\n # From white-box inspection, we see there are code paths that treat\n # the last specified range and the last sub-range specially. The\n # first specified range or sub-range is not treated specially in terms\n # of the code paths, although it might be in terms of data flow.\n #\n # We test merges that raise a conflict in the first and last sub-range\n # of the first and last specified range.\n\n for target in [file, dir]:\n\n tgt_ospath = sbox.ospath(branch + '/' + target)\n\n # First test: Merge \"everything\" to the branch.\n #\n # This merge is split into three sub-ranges: r3-4, r6-8, r10-head.\n # We have arranged that the merge will raise a conflict in the first\n # sub-range. Since we are postponing conflict resolution, the merge\n # should stop after the first sub-range, allowing us to resolve and\n # repeat the merge at which point the next sub-range(s) can be merged.\n # The mergeinfo on the target then should only reflect that the first\n # sub-range (r3-4) has been merged.\n #\n # Previously the merge failed after merging only r3-4 (as it should)\n # but mergeinfo for the whole range was recorded, preventing subsequent\n # repeat merges from applying the rest of the source changes.\n expect = expected_out_and_err(tgt_ospath,\n '3-4', ['3-4'],\n prop_conflicts=1)\n try_merge(target, 4, [], expect, '3-5,9')\n\n # Try a multiple-range merge that raises a conflict in the\n # first sub-range in the first specified range;\n expect = expected_out_and_err(tgt_ospath,\n '4', ['4'],\n prop_conflicts=1)\n try_merge(target, 4, ['-c4-6,8-10'], expect, '4-5,9')\n # last sub-range in the first specified range;\n expect = expected_out_and_err(tgt_ospath,\n '4-6', ['4,6'],\n prop_conflicts=1)\n try_merge(target, 6, ['-c4-6,8-10'], expect, '4-6,9')\n # first sub-range in the last specified range;\n expect = expected_out_and_err(tgt_ospath,\n '4-6,8', ['4,6', '8'],\n prop_conflicts=1)\n try_merge(target, 8, ['-c4-6,8-10'], expect, '4-6,8-9')\n # last sub-range in the last specified range.\n # (Expect no error, because 'svn merge' does not throw an error if\n # there is no more merging to do when a conflict occurs.)\n expect = expected_out_and_err(tgt_ospath,\n '4-6,8-10', ['4,6', '8,10'],\n prop_conflicts=1, expect_error=False)\n try_merge(target, 10, ['-c4-6,8-10'], expect, '4-6,8-10')\n\n # Try similar merges but involving ranges in reverse order.\n expect = expected_out_and_err(tgt_ospath,\n '8', ['8'],\n prop_conflicts=1)\n try_merge(target, 8, ['-c8-10,4-6'], expect, '5,8-9')\n expect = expected_out_and_err(tgt_ospath,\n '8-10', ['8,10'],\n prop_conflicts=1)\n try_merge(target, 10, ['-c8-10,4-6'], expect, '5,8-10')\n expect = expected_out_and_err(tgt_ospath,\n '8-10,4', ['8,10', '4'],\n prop_conflicts=1)\n try_merge(target, 4, ['-c8-10,4-6'], expect, '4-5,8-10')\n expect = expected_out_and_err(tgt_ospath,\n '8-10,4-6', ['8,10', '4,6'],\n prop_conflicts=1, expect_error=False)\n try_merge(target, 6, ['-c8-10,4-6'], expect, '4-6,8-10')\n\n # Try some reverse merges, with ranges in forward and reverse order.\n #\n # Reverse merges start with all source changes merged except 5 and 9.\n revert_branch()\n simple_merge(trunk + '/' + target, sbox.ospath(branch + '/' + target),\n ['-c-5,-9,4,6-8,10'])\n sbox.simple_commit()\n sbox.simple_update()\n\n expect = expected_out_and_err(tgt_ospath,\n '6-4,10-8', ['-6,-4', '-10,-8'],\n expect_error=False)\n try_merge(target, None, ['-r6:3', '-r10:7'], expect, '7')\n expect = expected_out_and_err(tgt_ospath,\n '-6', ['-6'],\n prop_conflicts=1)\n try_merge(target, 6, ['-r6:3', '-r10:7'], expect, '4,7-8,10')\n expect = expected_out_and_err(tgt_ospath,\n '6-4', ['-6,-4'],\n prop_conflicts=1)\n try_merge(target, 4, ['-r6:3', '-r10:7'], expect, '7-8,10')\n expect = expected_out_and_err(tgt_ospath,\n '6-4,-10', ['-6,-4', '-10'],\n prop_conflicts=1)\n try_merge(target, 10, ['-r6:3', '-r10:7'], expect, '7-8')\n expect = expected_out_and_err(tgt_ospath,\n '6-4,10-8', ['-6,-4', '-10,-8'],\n prop_conflicts=1, expect_error=False)\n try_merge(target, 8, ['-r6:3', '-r10:7'], expect, '7')\n\n@SkipUnless(server_has_mergeinfo)\n@Issue(4310)\n# Test for issue #4310 \"each editor drive gets its own notification\n# during 'svn merge'\"\ndef multiple_editor_drive_merge_notifications(sbox):\n \"each editor drive gets its own notification\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n iota_branch_path = sbox.ospath('iota-copy')\n C_branch_path = sbox.ospath('branch')\n\n # Branch a file and a directory:\n\n # r2\n sbox.simple_copy('iota', 'iota-copy')\n sbox.simple_commit()\n\n # r3\n sbox.simple_copy('A/C', 'branch')\n sbox.simple_commit()\n\n # r4-8 - Set five non-conflicting properties on the branch parents.\n for i in range(0,5):\n sbox.simple_propset('foo' + str(i) , 'bar', 'iota')\n sbox.simple_propset('foo' + str(i) , 'bar', 'A/C')\n sbox.simple_commit()\n\n # Cherry pick merge r5 and r6 to each branch and commit.\n svntest.actions.run_and_verify_svn(None, [], 'merge', '^/iota',\n '-c', '5,7', iota_branch_path)\n svntest.actions.run_and_verify_svn(None, [], 'merge', '^/A/C',\n '-c', '5,7', C_branch_path)\n sbox.simple_commit()\n\n # Now auto merge all eligible revisions to each branch.\n # First the directory target:\n #\n # TODO: We don't use run_and_verify_merge here because it has limitations\n # re checking the merge notification headers -- which need to be improved\n # at some point.\n svntest.actions.run_and_verify_svn(\n [\"--- Merging r2 through r4 into '\" + C_branch_path + \"':\\n\",\n \" U \" + C_branch_path + \"\\n\",\n \"--- Merging r6 into '\" + C_branch_path + \"':\\n\",\n \" U \" + C_branch_path + \"\\n\",\n \"--- Merging r8 through r9 into '\" + C_branch_path + \"':\\n\",\n \" U \" + C_branch_path + \"\\n\",\n \"--- Recording mergeinfo for merge of r2 through r9 into '\" +\n C_branch_path + \"':\\n\",\n \" U \" + C_branch_path + \"\\n\"],\n [], 'merge', sbox.repo_url + '/A/C', C_branch_path)\n\n # Then the file target:\n # Previously this failed because only the first range notification was\n # printed:\n #\n # >svn merge ^/iota iota-copy\n # --- Merging r2 through r4 into 'iota-copy':\n # U iota-copy\n # U iota-copy\n # U iota-copy\n # --- Recording mergeinfo for merge of r2 through r9 into 'iota-copy':\n # U iota-copy\n #\n # This is what we expect:\n #\n # --- Merging r2 through r4 into 'iota-copy':\n # U iota-copy\n # --- Merging r6 into 'iota-copy': <-- 2nd editor drive\n # U iota-copy\n # --- Merging r8 through r9 into 'iota-copy': <-- 3rd editor drive\n # U iota-copy\n # --- Recording mergeinfo for merge of r2 through r9 into 'iota-copy':\n # U iota-copy\n svntest.actions.run_and_verify_svn(\n [\"--- Merging r2 through r4 into '\" + iota_branch_path + \"':\\n\",\n \" U \" + iota_branch_path + \"\\n\",\n \"--- Merging r6 into '\" + iota_branch_path + \"':\\n\",\n \" U \" + iota_branch_path + \"\\n\",\n \"--- Merging r8 through r9 into '\" + iota_branch_path + \"':\\n\",\n \" U \" + iota_branch_path + \"\\n\",\n \"--- Recording mergeinfo for merge of r2 through r9 into '\" +\n iota_branch_path + \"':\\n\",\n \" U \" + iota_branch_path + \"\\n\"],\n [], 'merge', sbox.repo_url + '/iota', iota_branch_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\n@Issue(4317)\n# Test for issue #4317 \"redundant notifications in single editor drive merge\".\ndef single_editor_drive_merge_notifications(sbox):\n \"single editor drive merge notifications\"\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n wc_dir = sbox.wc_dir\n\n A_copy_path = sbox.ospath('A_COPY')\n D_copy_path = sbox.ospath('A_COPY/D')\n psi_copy_path = sbox.ospath('A_COPY/D/H/psi')\n omega_copy_path = sbox.ospath('A_COPY/D/H/omega')\n beta_copy_path = sbox.ospath('A_COPY/B/E/beta')\n\n # r2 - r6: Copy A to A_COPY and then make some text changes under A.\n set_up_branch(sbox)\n\n # r7 - Subtree merge\n svntest.actions.run_and_verify_svn(None, [], 'merge', '^/A/D',\n '-c4', D_copy_path)\n sbox.simple_commit()\n sbox.simple_update()\n\n # Previously this failed because of redundant merge notifications\n # for r4-7:\n #\n # >svn merge ^/A A_COPY\n # --- Merging r2 through r3 into 'A_COPY\\D':\n # U A_COPY\\D\\H\\psi\n # --- Merging r5 through r7 into 'A_COPY\\D':\n # U A_COPY\\D\\H\\omega\n # --- Merging r4 through r7 into 'A_COPY':\n # U A_COPY\\B\\E\\beta\n # --- Recording mergeinfo for merge of r2 through r7 into 'A_COPY':\n # U A_COPY\n # --- Recording mergeinfo for merge of r2 through r7 into 'A_COPY\\D':\n # U A_COPY\\D\n # --- Eliding mergeinfo from 'A_COPY\\D':\n # U A_COPY\\D\n #\n # The order of 'beta' and 'omega' can vary, so use UnorderedOutput. This\n # raises the possibility that the test could spuriously pass if the 'U'pdate\n # notifications aren't grouped with the correct headers, but that's not what\n # is being tested here.\n expected_output = svntest.verify.UnorderedOutput(\n [\"--- Merging r2 through r3 into '\" + A_copy_path + \"':\\n\",\n \"U \" + psi_copy_path + \"\\n\",\n \"--- Merging r4 through r7 into '\" + A_copy_path + \"':\\n\",\n \"U \" + omega_copy_path + \"\\n\",\n \"U \" + beta_copy_path + \"\\n\",\n \"--- Recording mergeinfo for merge of r2 through r7 into '\" +\n A_copy_path + \"':\\n\",\n \" U \" + A_copy_path + \"\\n\",\n \"--- Recording mergeinfo for merge of r2 through r7 into '\" +\n D_copy_path + \"':\\n\",\n \" U \" + D_copy_path + \"\\n\",\n \"--- Eliding mergeinfo from '\" + D_copy_path + \"':\\n\",\n \" U \" + D_copy_path + \"\\n\"])\n svntest.actions.run_and_verify_svn(expected_output, [], 'merge',\n sbox.repo_url + '/A', A_copy_path)\n\n # r8 and r9 - Commit and do reverse subtree merge.\n sbox.simple_commit()\n sbox.simple_update()\n svntest.actions.run_and_verify_svn(None, [], 'merge', '^/A/D',\n '-c-4', D_copy_path)\n sbox.simple_commit()\n\n # Now try a reverse merge. There should only be one notification for\n # r7-5:\n sbox.simple_update()\n expected_output = svntest.verify.UnorderedOutput(\n [\"--- Reverse-merging r7 through r5 into '\" + A_copy_path + \"':\\n\",\n \"U \" + beta_copy_path + \"\\n\",\n \"U \" + omega_copy_path + \"\\n\",\n \"--- Reverse-merging r4 through r3 into '\" + A_copy_path + \"':\\n\",\n \"U \" + psi_copy_path + \"\\n\",\n \"--- Recording mergeinfo for reverse merge of r7 through r3 into '\" +\n A_copy_path + \"':\\n\",\n \" U \" + A_copy_path + \"\\n\",\n \"--- Recording mergeinfo for reverse merge of r7 through r3 into '\" +\n D_copy_path + \"':\\n\",\n \" U \" + D_copy_path + \"\\n\",\n \"--- Eliding mergeinfo from '\" + D_copy_path + \"':\\n\",\n \" U \" + D_copy_path + \"\\n\"])\n svntest.actions.run_and_verify_svn(expected_output, [], 'merge',\n '-r9:2', sbox.repo_url + '/A',\n A_copy_path)\n\n@SkipUnless(server_has_mergeinfo)\n@Issue(4316) # 'Merge errors out after resolving conflicts'\n# Very similar to conflict_aborted_mergeinfo_described_partial_merge()\n# (test number 135), except here we tell the merge to resolve the\n# conflicts that are generated part way through a multi-revision-range\n# merge, and we expect it to continue with the rest of the merge.\ndef conflicted_split_merge_with_resolve(sbox):\n \"conflicted split merge with resolve\"\n\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n trunk = 'A'\n branch = 'A2'\n file = 'mu'\n dir = 'B'\n trunk_file = 'A/mu'\n trunk_dir = 'A/B'\n\n # r2: initial state\n for rev in range(4, 11):\n sbox.simple_propset('prop-' + str(rev), 'Old pval ' + str(rev),\n trunk_file, trunk_dir)\n sbox.simple_commit()\n\n # r3: branch\n sbox.simple_update()\n sbox.simple_copy(trunk, branch)\n sbox.simple_commit()\n\n zero_rev = 3\n\n def edit_file_or_dir(path, rev, val):\n \"\"\"Make a local edit to the file at PATH.\"\"\"\n sbox.simple_propset('prop-' + str(rev), val + ' pval ' + str(rev), path)\n\n # r4 through r10: simple edits\n for rev in range(4, 11):\n edit_file_or_dir(trunk_file, rev, 'Edited')\n edit_file_or_dir(trunk_dir, rev, 'Edited')\n sbox.simple_commit()\n\n # r14: merge some changes to the branch so that later merges will be split\n svntest.actions.run_and_verify_svn(None, [], 'merge', '-c5,9',\n '^/' + trunk, sbox.ospath(branch),\n '--accept', 'theirs-conflict')\n sbox.simple_commit()\n sbox.simple_update()\n\n def revert_branch():\n svntest.actions.run_and_verify_svn(None, [], 'revert', '-R',\n sbox.ospath(branch))\n\n def try_merge(relpath, conflict_rev, rev_args,\n expected_out_err, expected_mi):\n \"\"\"Revert RELPATH in the branch; make a change that will conflict\n with CONFLICT_REV if not None; merge RELPATH in the trunk\n to RELPATH in the branch using revision arguments REV_ARGS (list of\n '-r...' or '-c...' strings).\n\n EXPECTED_OUT_ERR_MI is a tuple: (expected_out, expected_err,\n expected_mi). EXPECTED_OUT and EXPECTED_ERR are instances of\n ExpectedOutput.\n\n Expect to find mergeinfo EXPECTED_MI if not None. EXPECTED_MI is\n a single mergeinfo-string.\n \"\"\"\n src_path = trunk + '/' + relpath\n tgt_path = branch + '/' + relpath\n tgt_ospath = sbox.ospath(tgt_path)\n\n expected_out, expected_err = expected_out_err\n\n revert_branch()\n\n # Arrange for the merge to conflict at CONFLICT_REV.\n if conflict_rev:\n edit_file_or_dir(tgt_path, conflict_rev, 'Conflict')\n\n src_url = '^/' + src_path + '@11'\n svntest.actions.run_and_verify_svn(\n expected_out, expected_err,\n 'merge', src_url, tgt_ospath, '--accept', 'mine-full',\n *rev_args)\n\n if expected_mi is not None:\n expected_mergeinfo = ['/' + src_path + ':' + expected_mi + '\\n']\n check_mergeinfo(expected_mergeinfo, tgt_ospath)\n\n # In a mergeinfo-aware merge, each specified revision range is split\n # internally into sub-ranges, to avoid any already-merged revisions.\n #\n # From white-box inspection, we see there are code paths that treat\n # the last specified range and the last sub-range specially. The\n # first specified range or sub-range is not treated specially in terms\n # of the code paths, although it might be in terms of data flow.\n #\n # We test merges that raise a conflict in the first and last sub-range\n # of the first and last specified range.\n\n for target in [file, dir]:\n\n tgt_ospath = sbox.ospath(branch + '/' + target)\n\n # First test: Merge \"everything\" to the branch.\n #\n # This merge is split into three sub-ranges: r3-4, r6-8, r10-head.\n # We have arranged that the merge will raise a conflict in the first\n # sub-range. Since we are postponing conflict resolution, the merge\n # should stop after the first sub-range, allowing us to resolve and\n # repeat the merge at which point the next sub-range(s) can be merged.\n # The mergeinfo on the target then should only reflect that the first\n # sub-range (r3-4) has been merged.\n expect = expected_out_and_err(tgt_ospath,\n '3-4,6-11',\n ['3-4', '6-8,10-11'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 4, [], expect, '3-11')\n\n # Try a multiple-range merge that raises a conflict in the\n # first sub-range in the first specified range;\n expect = expected_out_and_err(tgt_ospath,\n '4,6,8-10',\n ['4', '6', '8,10'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 4, ['-c4-6,8-10'], expect, '4-6,8-10')\n # last sub-range in the first specified range;\n expect = expected_out_and_err(tgt_ospath,\n '4-6,8-10', ['4,6', '8,10'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 6, ['-c4-6,8-10'], expect, '4-6,8-10')\n # first sub-range in the last specified range;\n expect = expected_out_and_err(tgt_ospath,\n '4-6,8,10',\n ['4,6', '8', '10'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 8, ['-c4-6,8-10'], expect, '4-6,8-10')\n # last sub-range in the last specified range.\n # (Expect no error, because 'svn merge' does not throw an error if\n # there is no more merging to do when a conflict occurs.)\n expect = expected_out_and_err(tgt_ospath,\n '4-6,8-10', ['4,6', '8,10'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 10, ['-c4-6,8-10'], expect, '4-6,8-10')\n\n # Try similar merges but involving ranges in reverse order.\n expect = expected_out_and_err(tgt_ospath,\n '8', ['8'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 8, ['-c8-10,4-6'], expect, '4-6,8-10')\n expect = expected_out_and_err(tgt_ospath,\n '8-10', ['8,10'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 10, ['-c8-10,4-6'], expect, '4-6,8-10')\n expect = expected_out_and_err(tgt_ospath,\n '8-10,4', ['8,10', '4'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 4, ['-c8-10,4-6'], expect, '4-6,8-10')\n expect = expected_out_and_err(tgt_ospath,\n '8-10,4-6', ['8,10', '4,6'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 6, ['-c8-10,4-6'], expect, '4-6,8-10')\n\n # Try some reverse merges, with ranges in forward and reverse order.\n #\n # Reverse merges start with all source changes merged except 5 and 9.\n revert_branch()\n simple_merge(trunk + '/' + target, sbox.ospath(branch + '/' + target),\n ['-c-5,-9,4,6-8,10'])\n sbox.simple_commit()\n sbox.simple_update()\n\n expect = expected_out_and_err(tgt_ospath,\n '6-4,10-8', ['-6,-4', '-10,-8'],\n expect_error=False)\n try_merge(target, None, ['-r6:3', '-r10:7'], expect, '7')\n expect = expected_out_and_err(tgt_ospath,\n '-6,-4,10-8',\n ['-6', '-4', '-10,-8'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 6, ['-r6:3', '-r10:7'], expect, '7')\n expect = expected_out_and_err(tgt_ospath,\n '6-4,10-8', ['-6,-4', '-10,-8'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 4, ['-r6:3', '-r10:7'], expect, '7')\n expect = expected_out_and_err(tgt_ospath,\n '6-4,-10,-8',\n ['-6,-4', '-10', '-8'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 10, ['-r6:3', '-r10:7'], expect, '7')\n expect = expected_out_and_err(tgt_ospath,\n '6-4,10-8', ['-6,-4', '-10,-8'],\n prop_resolved=1, expect_error=False)\n try_merge(target, 8, ['-r6:3', '-r10:7'], expect, '7')\n\n#----------------------------------------------------------------------\n# Test for issue 4367 'merge to shallow WC, repeat merge to infinite\n# depth WC is broken'.\n@SkipUnless(server_has_mergeinfo)\n@Issues(4367)\ndef merge_to_empty_target_merge_to_infinite_target(sbox):\n \"repeat merge to infinite depth WC conflicts\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n wc_disk, wc_status = set_up_branch(sbox, branch_only=True)\n A_COPY_path = sbox.ospath('A_COPY')\n C_COPY_path = sbox.ospath('A_COPY/C')\n E_path = sbox.ospath('A/B/E')\n J_path = sbox.ospath('A/C/J')\n K_path = sbox.ospath('A/C/J/K')\n nu1_path = sbox.ospath('A/C/J/nu1')\n nu2_path = sbox.ospath('A/C/J/K/nu2')\n L_path = sbox.ospath('A/B/L')\n nu3_path = sbox.ospath('A/B/L/nu3')\n\n B1_path = sbox.ospath('A/B/B1')\n B1a_path = sbox.ospath('A/B/B1/B1a')\n test1_path = sbox.ospath('A/B/B1/test.txt')\n test2_path = sbox.ospath('A/B/B1/B1a/test.txt')\n\n C1_path = sbox.ospath('A/C/C1')\n test3_path = sbox.ospath('A/C/C1/test.txt')\n\n # r3 - Add some subtrees:\n # A /A/B/B1\n # A /A/B/B1/B1a\n # A /A/B/B1/B1a/test.txt\n # A /A/B/B1/test.txt\n svntest.main.run_svn(None, 'mkdir', B1_path)\n svntest.main.run_svn(None, 'mkdir', B1a_path)\n svntest.main.file_append(test1_path, \"New file.\\n\")\n svntest.main.file_append(test2_path, \"New file.\\n\")\n svntest.main.run_svn(None, 'add', test1_path, test2_path)\n sbox.simple_commit()\n\n # r4 - Add some another subtree.\n # A /A/C/C1\n # A /A/C/C1/test.txt\n svntest.main.run_svn(None, 'mkdir', C1_path)\n svntest.main.file_append(test3_path, \"New file.\\n\")\n svntest.main.run_svn(None, 'add', test3_path)\n sbox.simple_commit()\n\n # r5 - Delete part of the subtree added in r3.\n # D /A/B/B1/B1a\n svntest.main.run_svn(None, 'del', B1a_path)\n sbox.simple_commit()\n\n # r6 - Set depth of A_COPY to empty, merge all available revs from ^/A.\n svntest.actions.run_and_verify_svn(None, [], 'up',\n '--set-depth=empty', A_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'up',\n '--set-depth=infinity', C_COPY_path)\n svntest.actions.run_and_verify_svn(None, [], 'merge', '^/A',\n A_COPY_path)\n sbox.simple_commit()\n\n # Update A_COPY back to depth infinity and retry the prior merge.\n svntest.actions.run_and_verify_svn(None, [], 'up',\n '--set-depth=infinity', A_COPY_path)\n\n expected_output = wc.State(A_COPY_path, {\n 'B/B1' : Item(status='A '),\n 'B/B1/test.txt' : Item(status='A '),\n 'B/B1/B1a' : Item(status='D ', prev_status='A '),\n 'B/B1/B1a/test.txt' : Item(status='A '),\n })\n expected_mergeinfo_output = wc.State(A_COPY_path, {\n '' : Item(status=' U'),\n 'B' : Item(status=' G'),\n })\n expected_elision_output = wc.State(A_COPY_path, {\n 'B' : Item(status=' U'),\n })\n expected_status = wc.State(A_COPY_path, {\n '' : Item(status=' M'),\n 'B' : Item(status=' '),\n 'mu' : Item(status=' '),\n 'B/B1' : Item(status='A ', copied='+'),\n 'B/B1/test.txt' : Item(status=' ', copied='+'),\n 'B/B1/B1a' : Item(status='D ', copied='+'),\n 'B/B1/B1a/test.txt' : Item(status='D ', copied='+'),\n 'B/E' : Item(status=' '),\n 'B/E/alpha' : Item(status=' '),\n 'B/E/beta' : Item(status=' '),\n 'B/lambda' : Item(status=' '),\n 'B/F' : Item(status=' '),\n 'C' : Item(status=' '),\n 'C/C1' : Item(status=' '),\n 'C/C1/test.txt' : Item(status=' '),\n 'D' : Item(status=' '),\n 'D/G' : Item(status=' '),\n 'D/G/pi' : Item(status=' '),\n 'D/G/rho' : Item(status=' '),\n 'D/G/tau' : Item(status=' '),\n 'D/gamma' : Item(status=' '),\n 'D/H' : Item(status=' '),\n 'D/H/chi' : Item(status=' '),\n 'D/H/psi' : Item(status=' '),\n 'D/H/omega' : Item(status=' '),\n })\n expected_status.tweak(wc_rev=6)\n expected_status.tweak('B/B1', 'B/B1/test.txt', 'B/B1/B1a',\n 'B/B1/B1a/test.txt', wc_rev='-')\n expected_disk = wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A:2-6'}),\n 'B' : Item(),\n 'mu' : Item(\"This is the file 'mu'.\\n\"),\n 'B/B1' : Item(),\n 'B/B1/test.txt' : Item(\"New file.\\n\"),\n 'B/E' : Item(),\n 'B/E/alpha' : Item(\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'C' : Item(props={SVN_PROP_MERGEINFO : '/A/C:2-5'}),\n 'C/C1' : Item(),\n 'C/C1/test.txt' : Item(\"New file.\\n\"),\n 'D' : Item(),\n 'D/G' : Item(),\n 'D/G/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'D/G/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'D/G/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'D/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'D/H' : Item(),\n 'D/H/chi' : Item(\"This is the file 'chi'.\\n\"),\n 'D/H/psi' : Item(\"This is the file 'psi'.\\n\"),\n 'D/H/omega' : Item(\"This is the file 'omega'.\\n\"),\n })\n expected_skip = wc.State(A_COPY_path, { })\n svntest.actions.run_and_verify_merge(A_COPY_path, None, None,\n sbox.repo_url + '/A', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_disk,\n expected_status,\n expected_skip,\n [], True, False)\n\n # Commit the merge.\n #sbox.simple_commit()\n\ndef conflict_naming(sbox):\n \"verify conflict file naming\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n sbox.simple_append('file.txt', 'This is the initial content\\n')\n sbox.simple_add('file.txt')\n sbox.simple_commit()\n\n sbox.simple_append('file.txt', 'This is the new content\\n', truncate=True)\n sbox.simple_commit()\n\n sbox.simple_append('file.txt', 'This is conflicting content\\n', truncate=True)\n\n # Update - no preserve ext\n expected_status = svntest.actions.get_virginal_state(wc_dir, 2)\n expected_disk = svntest.main.greek_state.copy()\n expected_output = svntest.wc.State(wc_dir, {\n 'file.txt' : Item(status='C ')\n })\n expected_status.add({\n 'file.txt' : Item(status='C ', wc_rev='2')\n })\n\n expected_disk.add({\n 'file.txt.r3' : Item(contents=\"This is the new content\\n\"),\n 'file.txt.r2' : Item(contents=\"This is the initial content\\n\"),\n 'file.txt' : Item(contents=\"<<<<<<< .mine\\n\" \\\n \"This is conflicting content\\n\" \\\n \"||||||| .r3\\n\" \\\n \"This is the new content\\n\" \\\n \"=======\\n\" \\\n \"This is the initial content\\n\" \\\n \">>>>>>> .r2\\n\"),\n 'file.txt.mine' : Item(contents=\"This is conflicting content\\n\"),\n })\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output, expected_disk,\n expected_status,\n [], False,\n wc_dir, '-r', '2')\n\n sbox.simple_revert('file.txt')\n sbox.simple_update('', revision=3)\n sbox.simple_append('file.txt', 'This is conflicting content\\n', truncate=True)\n\n # Update - preserve ext\n expected_status = svntest.actions.get_virginal_state(wc_dir, 2)\n expected_disk = svntest.main.greek_state.copy()\n expected_output = svntest.wc.State(wc_dir, {\n 'file.txt' : Item(status='C ')\n })\n expected_status.add({\n 'file.txt' : Item(status='C ', wc_rev='2')\n })\n\n expected_disk.add({\n 'file.txt.r3.txt' : Item(contents=\"This is the new content\\n\"),\n 'file.txt.r2.txt' : Item(contents=\"This is the initial content\\n\"),\n 'file.txt' : Item(contents=\"<<<<<<< .mine.txt\\n\" \\\n \"This is conflicting content\\n\" \\\n \"||||||| .r3.txt\\n\" \\\n \"This is the new content\\n\" \\\n \"=======\\n\" \\\n \"This is the initial content\\n\" \\\n \">>>>>>> .r2.txt\\n\"),\n 'file.txt.mine.txt' : Item(contents=\"This is conflicting content\\n\"),\n })\n svntest.actions.run_and_verify_update(\n wc_dir,\n expected_output, expected_disk, expected_status,\n [], False,\n wc_dir, '-r', '2',\n '--config-option',\n 'config:miscellany:preserved-conflict-file-exts=' +\n 'c txt h')\n\n sbox.simple_revert('file.txt')\n sbox.simple_update('', revision=3)\n sbox.simple_append('file.txt', 'This is conflicting content\\n', truncate=True)\n\n # Merge - no preserve ext\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_disk = svntest.main.greek_state.copy()\n expected_status.add({\n 'file.txt' : Item(status='C ', wc_rev='3')\n })\n expected_disk.add({\n 'file.txt.merge-left.r3' : Item(contents=\"This is the new content\\n\"),\n 'file.txt.merge-right.r2': Item(contents=\"This is the initial content\\n\"),\n 'file.txt' : Item(contents=\"<<<<<<< .working\\n\" \\\n \"This is conflicting content\\n\" \\\n \"||||||| .merge-left.r3\\n\" \\\n \"This is the new content\\n\" \\\n \"=======\\n\" \\\n \"This is the initial content\\n\" \\\n \">>>>>>> .merge-right.r2\\n\"),\n 'file.txt.working' : Item(contents=\"This is conflicting content\\n\"),\n })\n\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c-3', '^/', sbox.ospath(''))\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n svntest.actions.verify_disk(wc_dir, expected_disk)\n\n sbox.simple_revert('file.txt')\n sbox.simple_append('file.txt', 'This is conflicting content\\n', truncate=True)\n\n # Merge - preserve ext\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_disk = svntest.main.greek_state.copy()\n expected_status.add({\n 'file.txt' : Item(status='C ', wc_rev='3')\n })\n expected_disk.add({\n 'file.txt.merge-left.r3.txt' : Item(contents=\"This is the new content\\n\"),\n 'file.txt.merge-right.r2.txt': Item(contents=\"This is the initial content\\n\"),\n 'file.txt' : Item(contents=\"<<<<<<< .working.txt\\n\" \\\n \"This is conflicting content\\n\" \\\n \"||||||| .merge-left.r3.txt\\n\" \\\n \"This is the new content\\n\" \\\n \"=======\\n\" \\\n \"This is the initial content\\n\" \\\n \">>>>>>> .merge-right.r2.txt\\n\"),\n 'file.txt.working.txt' : Item(contents=\"This is conflicting content\\n\"),\n })\n\n svntest.actions.run_and_verify_svn(\n None, [],\n 'merge', '-c-3', '^/', sbox.ospath(''),\n '--config-option',\n 'config:miscellany:preserved-conflict-file-exts=' +\n 'c txt h')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n svntest.actions.verify_disk(wc_dir, expected_disk)\n\ndef merge_dir_delete_force(sbox):\n \"merge a directory delete with --force\"\n\n sbox.build()\n\n sbox.simple_rm('A/D/G')\n sbox.simple_commit() # r2\n\n sbox.simple_update(revision=1)\n\n # Just merging r2 on r1 succeeds\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c2', '^/', sbox.wc_dir,\n '--ignore-ancestry')\n\n # Bring working copy to r1 again\n svntest.actions.run_and_verify_svn(None, [],\n 'revert', '-R', sbox.wc_dir)\n\n # But when using --force this same merge caused a segfault in 1.8.0-1.8.8\n svntest.actions.run_and_verify_svn(None, [],\n 'merge', '-c2', '^/', sbox.wc_dir,\n '--ignore-ancestry', '--force')\n\n########################################################################\n# Run the tests\n\n\n# list all tests here, starting with None:\ntest_list = [ None,\n textual_merges_galore,\n add_with_history,\n simple_property_merges,\n merge_with_implicit_target_using_r,\n merge_with_implicit_target_using_c,\n merge_with_implicit_target_and_revs,\n merge_similar_unrelated_trees,\n merge_with_prev,\n merge_binary_file,\n merge_one_file_using_r,\n merge_one_file_using_c,\n merge_one_file_using_implicit_revs,\n merge_record_only,\n merge_in_new_file_and_diff,\n merge_skips_obstructions,\n merge_into_missing,\n dry_run_adds_file_with_prop,\n merge_binary_with_common_ancestry,\n merge_funny_chars_on_path,\n merge_keyword_expansions,\n merge_prop_change_to_deleted_target,\n merge_file_with_space_in_its_name,\n merge_dir_branches,\n safe_property_merge,\n property_merge_from_branch,\n property_merge_undo_redo,\n cherry_pick_text_conflict,\n merge_file_replace,\n merge_dir_replace,\n merge_dir_and_file_replace,\n merge_file_replace_to_mixed_rev_wc,\n merge_ignore_whitespace,\n merge_ignore_eolstyle,\n merge_conflict_markers_matching_eol,\n merge_eolstyle_handling,\n avoid_repeated_merge_using_inherited_merge_info,\n avoid_repeated_merge_on_subtree_with_merge_info,\n obey_reporter_api_semantics_while_doing_subtree_merges,\n mergeinfo_inheritance,\n mergeinfo_elision,\n mergeinfo_inheritance_and_discontinuous_ranges,\n merge_to_target_with_copied_children,\n merge_to_switched_path,\n merge_to_path_with_switched_children,\n merge_with_implicit_target_file,\n empty_mergeinfo,\n prop_add_to_child_with_mergeinfo,\n foreign_repos_does_not_update_mergeinfo,\n avoid_reflected_revs,\n update_loses_mergeinfo,\n merge_loses_mergeinfo,\n single_file_replace_style_merge_capability,\n merge_to_out_of_date_target,\n merge_with_depth_files,\n merge_away_subtrees_noninheritable_ranges,\n merge_to_sparse_directories,\n merge_old_and_new_revs_from_renamed_dir,\n merge_with_child_having_different_rev_ranges_to_merge,\n merge_old_and_new_revs_from_renamed_file,\n merge_with_auto_rev_range_detection,\n cherry_picking,\n propchange_of_subdir_raises_conflict,\n reverse_merge_prop_add_on_child,\n merge_target_with_non_inheritable_mergeinfo,\n self_reverse_merge,\n ignore_ancestry_and_mergeinfo,\n merge_from_renamed_branch_fails_while_avoiding_repeat_merge,\n merge_source_normalization_and_subtree_merges,\n new_subtrees_should_not_break_merge,\n dont_add_mergeinfo_from_own_history,\n merge_range_predates_history,\n foreign_repos,\n foreign_repos_uuid,\n foreign_repos_2_url,\n merge_added_subtree,\n merge_unknown_url,\n reverse_merge_away_all_mergeinfo,\n dont_merge_revs_into_subtree_that_predate_it,\n merge_chokes_on_renamed_subtrees,\n dont_explicitly_record_implicit_mergeinfo,\n merge_broken_link,\n subtree_merges_dont_intersect_with_targets,\n subtree_source_missing_in_requested_range,\n subtrees_with_empty_mergeinfo,\n commit_to_subtree_added_by_merge,\n del_identical_file,\n del_sched_add_hist_file,\n subtree_merges_dont_cause_spurious_conflicts,\n merge_target_and_subtrees_need_nonintersecting_ranges,\n merge_two_edits_to_same_prop,\n merge_an_eol_unification_and_set_svn_eol_style,\n merge_adds_mergeinfo_correctly,\n natural_history_filtering,\n subtree_gets_changes_even_if_ultimately_deleted,\n no_self_referential_filtering_on_added_path,\n merge_range_prior_to_rename_source_existence,\n dont_merge_gaps_in_history,\n mergeinfo_deleted_by_a_merge_should_disappear,\n noop_file_merge,\n handle_gaps_in_implicit_mergeinfo,\n copy_then_replace_via_merge,\n record_only_merge,\n merge_automatic_conflict_resolution,\n skipped_files_get_correct_mergeinfo,\n committed_case_only_move_and_revert,\n merge_into_wc_for_deleted_branch,\n foreign_repos_del_and_props,\n immediate_depth_merge_creates_minimal_subtree_mergeinfo,\n record_only_merge_creates_self_referential_mergeinfo,\n dav_skelta_mode_causes_spurious_conflicts,\n merge_into_locally_added_file,\n merge_into_locally_added_directory,\n merge_with_os_deleted_subtrees,\n no_self_referential_or_nonexistent_inherited_mergeinfo,\n subtree_merges_inherit_invalid_working_mergeinfo,\n merge_change_to_file_with_executable,\n dry_run_merge_conflicting_binary,\n foreign_repos_prop_conflict,\n merge_adds_subtree_with_mergeinfo,\n reverse_merge_adds_subtree,\n merged_deletion_causes_tree_conflict,\n record_only_merge_adds_new_subtree_mergeinfo,\n unnecessary_noninheritable_mergeinfo_missing_subtrees,\n unnecessary_noninheritable_mergeinfo_shallow_merge,\n svnmucc_abuse_1,\n merge_source_with_replacement,\n reverse_merge_with_rename,\n merge_adds_then_deletes_subtree,\n merge_with_added_subtrees_with_mergeinfo,\n merge_with_externals_with_mergeinfo,\n merge_binary_file_with_keywords,\n merge_conflict_when_keywords_removed,\n merge_target_selection,\n merge_properties_on_adds,\n conflict_aborted_mergeinfo_described_partial_merge,\n multiple_editor_drive_merge_notifications,\n single_editor_drive_merge_notifications,\n conflicted_split_merge_with_resolve,\n merge_to_empty_target_merge_to_infinite_target,\n conflict_naming,\n merge_dir_delete_force,\n ]\n\nif __name__ == '__main__':\n svntest.main.run_tests(test_list)\n # NOTREACHED\n\n\n### End of file.\n", "id": "2384887", "language": "Python", "matching_score": 6.958718776702881, "max_stars_count": 0, "path": "subversion/tests/cmdline/merge_tests.py" }, { "content": "#!/usr/bin/env python\n#\n# switch_tests.py: testing `svn switch'.\n#\n# Subversion is a tool for revision control.\n# See http://subversion.apache.org for more information.\n#\n# ====================================================================\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n######################################################################\n\n# General modules\nimport shutil, re, os\n\n# Our testing module\nimport svntest\nfrom svntest import verify, actions, main, deeptrees\n\n# (abbreviation)\nSkip = svntest.testcase.Skip_deco\nSkipUnless = svntest.testcase.SkipUnless_deco\nXFail = svntest.testcase.XFail_deco\nIssues = svntest.testcase.Issues_deco\nIssue = svntest.testcase.Issue_deco\nWimp = svntest.testcase.Wimp_deco\nItem = svntest.wc.StateItem\n\nfrom svntest.main import SVN_PROP_MERGEINFO, server_has_mergeinfo\nfrom svntest.deeptrees import do_routine_switching, commit_routine_switching, \\\n get_routine_disk_state, get_routine_status_state\n\n######################################################################\n# Tests\n#\n\n#----------------------------------------------------------------------\n\ndef routine_switching(sbox):\n \"test some basic switching operations\"\n\n sbox.build(read_only = True)\n\n # Setup (and verify) some switched things\n do_routine_switching(sbox.wc_dir, sbox.repo_url, 1)\n\n\n#----------------------------------------------------------------------\n\ndef commit_switched_things(sbox):\n \"commits after some basic switching operations\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Setup some switched things (don't bother verifying)\n do_routine_switching(wc_dir, sbox.repo_url, 0)\n\n # Commit some stuff (and verify)\n commit_routine_switching(wc_dir, 1)\n\n\n#----------------------------------------------------------------------\n\ndef full_update(sbox):\n \"update wc that contains switched things\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Setup some switched things (don't bother verifying)\n do_routine_switching(wc_dir, sbox.repo_url, 0)\n\n # Copy wc_dir to a backup location\n wc_backup = sbox.add_wc_path('backup')\n svntest.actions.duplicate_dir(wc_dir, wc_backup)\n\n # Commit some stuff (don't bother verifying)\n commit_routine_switching(wc_backup, 0)\n\n # Some convenient path variables\n iota_path = sbox.ospath('iota')\n gamma_path = sbox.ospath('A/D/gamma')\n Bpi_path = sbox.ospath('A/B/pi')\n BZ_path = sbox.ospath('A/B/Z')\n Bzeta_path = sbox.ospath('A/B/Z/zeta')\n Gpi_path = sbox.ospath('A/D/G/pi')\n GZ_path = sbox.ospath('A/D/G/Z')\n Gzeta_path = sbox.ospath('A/D/G/Z/zeta')\n\n # Create expected output tree for an update of wc_backup.\n expected_output = svntest.wc.State(wc_dir, {\n 'iota' : Item(status='U '),\n 'A/D/gamma' : Item(status='U '),\n 'A/B/pi' : Item(status='U '),\n 'A/B/Z' : Item(status='A '),\n 'A/B/Z/zeta' : Item(status='A '),\n 'A/D/G/pi' : Item(status='U '),\n 'A/D/G/Z' : Item(status='A '),\n 'A/D/G/Z/zeta' : Item(status='A '),\n })\n\n # Create expected disk tree for the update\n expected_disk = get_routine_disk_state(wc_dir)\n expected_disk.tweak('iota', contents=\"This is the file 'gamma'.\\napple\")\n expected_disk.tweak('A/D/gamma', contents=\"This is the file 'gamma'.\\napple\")\n expected_disk.tweak('A/B/pi', contents=\"This is the file 'pi'.\\nmelon\")\n expected_disk.tweak('A/D/G/pi', contents=\"This is the file 'pi'.\\nmelon\")\n expected_disk.add({\n 'A/B/Z' : Item(),\n 'A/B/Z/zeta' : Item(contents=\"This is the file 'zeta'.\\n\"),\n 'A/D/G/Z' : Item(),\n 'A/D/G/Z/zeta' : Item(contents=\"This is the file 'zeta'.\\n\"),\n })\n\n # Create expected status tree for the update.\n expected_status = get_routine_status_state(wc_dir)\n expected_status.tweak(wc_rev=2)\n expected_status.add({\n 'A/D/G/Z' : Item(status=' ', wc_rev=2),\n 'A/D/G/Z/zeta' : Item(status=' ', wc_rev=2),\n 'A/B/Z' : Item(status=' ', wc_rev=2),\n 'A/B/Z/zeta' : Item(status=' ', wc_rev=2),\n })\n expected_status.tweak('iota', 'A/B', switched='S')\n\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n expected_disk,\n expected_status)\n\n#----------------------------------------------------------------------\n\ndef full_rev_update(sbox):\n \"reverse update wc that contains switched things\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Setup some switched things (don't bother verifying)\n do_routine_switching(wc_dir, sbox.repo_url, 0)\n\n # Commit some stuff (don't bother verifying)\n commit_routine_switching(wc_dir, 0)\n\n # Update to HEAD (tested elsewhere)\n svntest.main.run_svn(None, 'up', wc_dir)\n\n # Some convenient path variables\n iota_path = sbox.ospath('iota')\n gamma_path = sbox.ospath('A/D/gamma')\n Bpi_path = sbox.ospath('A/B/pi')\n BZ_path = sbox.ospath('A/B/Z')\n Gpi_path = sbox.ospath('A/D/G/pi')\n GZ_path = sbox.ospath('A/D/G/Z')\n\n # Now, reverse update, back to the pre-commit state.\n expected_output = svntest.wc.State(wc_dir, {\n 'iota' : Item(status='U '),\n 'A/D/gamma' : Item(status='U '),\n 'A/B/pi' : Item(status='U '),\n 'A/B/Z' : Item(status='D '),\n 'A/D/G/pi' : Item(status='U '),\n 'A/D/G/Z' : Item(status='D '),\n })\n\n # Create expected disk tree\n expected_disk = get_routine_disk_state(wc_dir)\n\n # Create expected status\n expected_status = get_routine_status_state(wc_dir)\n expected_status.tweak('iota', 'A/B', switched='S')\n\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n expected_disk,\n expected_status,\n [], True,\n '-r', '1', wc_dir)\n\n#----------------------------------------------------------------------\n\ndef update_switched_things(sbox):\n \"update switched wc things to HEAD\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Setup some switched things (don't bother verifying)\n do_routine_switching(wc_dir, sbox.repo_url, 0)\n\n # Copy wc_dir to a backup location\n wc_backup = sbox.add_wc_path('backup')\n svntest.actions.duplicate_dir(wc_dir, wc_backup)\n\n # Commit some stuff (don't bother verifying)\n commit_routine_switching(wc_backup, 0)\n\n # Some convenient path variables\n iota_path = sbox.ospath('iota')\n B_path = sbox.ospath('A/B')\n\n # Create expected output tree for an update of wc_backup.\n expected_output = svntest.wc.State(wc_dir, {\n 'iota' : Item(status='U '),\n 'A/B/pi' : Item(status='U '),\n 'A/B/Z' : Item(status='A '),\n 'A/B/Z/zeta' : Item(status='A '),\n })\n\n # Create expected disk tree for the update\n expected_disk = get_routine_disk_state(wc_dir)\n expected_disk.tweak('iota', contents=\"This is the file 'gamma'.\\napple\")\n\n expected_disk.tweak('A/B/pi', contents=\"This is the file 'pi'.\\nmelon\")\n expected_disk.add({\n 'A/B/Z' : Item(),\n 'A/B/Z/zeta' : Item(\"This is the file 'zeta'.\\n\"),\n })\n\n # Create expected status tree for the update.\n expected_status = get_routine_status_state(wc_dir)\n expected_status.tweak('iota', 'A/B', switched='S')\n expected_status.tweak('A/B', 'A/B/pi', 'A/B/rho', 'A/B/tau', 'iota',\n wc_rev=2)\n expected_status.add({\n 'A/B/Z' : Item(status=' ', wc_rev=2),\n 'A/B/Z/zeta' : Item(status=' ', wc_rev=2),\n })\n\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n B_path,\n iota_path)\n\n\n#----------------------------------------------------------------------\n\ndef rev_update_switched_things(sbox):\n \"reverse update switched wc things to an older rev\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Setup some switched things (don't bother verifying)\n do_routine_switching(wc_dir, sbox.repo_url, 0)\n\n # Commit some stuff (don't bother verifying)\n commit_routine_switching(wc_dir, 0)\n\n # Some convenient path variables\n iota_path = sbox.ospath('iota')\n B_path = sbox.ospath('A/B')\n\n # Update to HEAD (tested elsewhere)\n svntest.main.run_svn(None, 'up', wc_dir)\n\n # Now, reverse update, back to the pre-commit state.\n expected_output = svntest.wc.State(wc_dir, {\n 'iota' : Item(status='U '),\n 'A/B/pi' : Item(status='U '),\n 'A/B/Z' : Item(status='D '),\n })\n\n # Create expected disk tree\n expected_disk = get_routine_disk_state(wc_dir)\n expected_disk.tweak('A/D/gamma', contents=\"This is the file 'gamma'.\\napple\")\n expected_disk.tweak('A/D/G/pi', contents=\"This is the file 'pi'.\\nmelon\")\n expected_disk.add({\n 'A/D/G/Z' : Item(),\n 'A/D/G/Z/zeta' : Item(\"This is the file 'zeta'.\\n\"),\n })\n\n # Create expected status tree for the update.\n expected_status = get_routine_status_state(wc_dir)\n expected_status.tweak(wc_rev=2)\n expected_status.tweak('iota', 'A/B', switched='S')\n expected_status.tweak('A/B', 'A/B/pi', 'A/B/rho', 'A/B/tau', 'iota',\n wc_rev=1)\n expected_status.add({\n 'A/D/G/Z' : Item(status=' ', wc_rev=2),\n 'A/D/G/Z/zeta' : Item(status=' ', wc_rev=2),\n })\n\n svntest.actions.run_and_verify_update(wc_dir,\n expected_output,\n expected_disk,\n expected_status,\n [], True,\n '-r', '1',\n B_path,\n iota_path)\n\n\n#----------------------------------------------------------------------\n\ndef log_switched_file(sbox):\n \"show logs for a switched file\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Setup some switched things (don't bother verifying)\n do_routine_switching(wc_dir, sbox.repo_url, 0)\n\n # edit and commit switched file 'iota'\n iota_path = sbox.ospath('iota')\n svntest.main.run_svn(None, 'ps', 'x', 'x', iota_path)\n svntest.main.run_svn(None,\n 'ci', '-m',\n 'set prop on switched iota',\n iota_path)\n\n # log switched file 'iota'\n exit_code, output, error = svntest.main.run_svn(None, 'log', iota_path)\n for line in output:\n if line.find(\"set prop on switched iota\") != -1:\n break\n else:\n raise svntest.Failure\n\n#----------------------------------------------------------------------\n\ndef delete_subdir(sbox):\n \"switch that deletes a sub-directory\"\n sbox.build()\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n A_url = sbox.repo_url + '/A'\n A2_url = sbox.repo_url + '/A2'\n A2_B_F_url = sbox.repo_url + '/A2/B/F'\n\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'], [],\n 'cp', '-m', 'make copy', A_url, A2_url)\n\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 3.\\n'], [],\n 'rm', '-m', 'delete subdir', A2_B_F_url)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/F' : Item(status='D '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/B/F')\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_status.tweak('A', switched='S')\n expected_status.remove('A/B/F')\n expected_status.tweak('', 'iota', wc_rev=1)\n\n # Used to fail with a 'directory not locked' error for A/B/F\n svntest.actions.run_and_verify_switch(wc_dir, A_path, A2_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--ignore-ancestry')\n\n#----------------------------------------------------------------------\n# Issue 1532: Switch a file to a dir: can't switch it back to the file\n@XFail()\n@Issue(1532)\ndef file_dir_file(sbox):\n \"switch a file to a dir and back to the file\"\n sbox.build(read_only = True)\n wc_dir = sbox.wc_dir\n\n file_path = sbox.ospath('iota')\n file_url = sbox.repo_url + '/iota'\n dir_url = sbox.repo_url + '/A/C'\n\n svntest.actions.run_and_verify_svn(None, [], 'switch',\n '--ignore-ancestry', dir_url, file_path)\n if not os.path.isdir(file_path):\n raise svntest.Failure\n\n # The reason the following switch currently fails is that the node\n # is determined to be a 'root', because it is switched against its parent.\n # In this specific case the switch editor is designed to be rooted on the node\n # itself instead of its ancestor. If you would use sbox.ospath('A') for\n # file_path the switch works both ways.\n svntest.actions.run_and_verify_svn(None, [], 'switch',\n '--ignore-ancestry', file_url, file_path)\n if not os.path.isfile(file_path):\n raise svntest.Failure\n\n#----------------------------------------------------------------------\n# Issue 1751: \"svn switch --non-recursive\" does not switch existing files,\n# and generates the wrong URL for new files.\n\ndef nonrecursive_switching(sbox):\n \"non-recursive switch\"\n sbox.build()\n wc1_dir = sbox.wc_dir\n wc2_dir = os.path.join(wc1_dir, 'wc2')\n\n # \"Trunk\" will be the existing dir \"A/\", with existing file \"mu\".\n # \"Branch\" will be the new dir \"branch/version1/\", with added file \"newfile\".\n # \"wc1\" will hold the whole repository (including trunk and branch).\n # \"wc2\" will hold the \"trunk\" and then be switched to the \"branch\".\n # It is irrelevant that wc2 is located on disk as a sub-directory of wc1.\n trunk_url = sbox.repo_url + '/A'\n branch_url = sbox.repo_url + '/branch'\n version1_url = branch_url + '/version1'\n wc1_new_file = os.path.join(wc1_dir, 'branch', 'version1', 'newfile')\n wc2_new_file = os.path.join(wc2_dir, 'newfile')\n wc2_mu_file = os.path.join(wc2_dir, 'mu')\n wc2_B_dir = os.path.join(wc2_dir, 'B')\n wc2_C_dir = os.path.join(wc2_dir, 'C')\n wc2_D_dir = os.path.join(wc2_dir, 'D')\n\n # Check out the trunk as \"wc2\"\n svntest.main.run_svn(None, 'co', trunk_url, wc2_dir)\n\n # Make a branch, and add a new file, in \"wc_dir\" and repository\n svntest.main.run_svn(None,\n 'mkdir', '-m', '', branch_url)\n svntest.main.run_svn(None,\n 'cp', '-m', '', trunk_url, version1_url)\n svntest.main.run_svn(None,\n 'up', wc1_dir)\n svntest.main.file_append(wc1_new_file, \"This is the file 'newfile'.\\n\")\n svntest.main.run_svn(None, 'add', wc1_new_file)\n sbox.simple_commit()\n\n # Try to switch \"wc2\" to the branch (non-recursively)\n svntest.actions.run_and_verify_svn(None, [], 'switch', '-N',\n '--ignore-ancestry', version1_url, wc2_dir)\n\n # Check the URLs of the (not switched) directories.\n expected_infos = [\n { 'URL' : '.*/A/B$' },\n { 'URL' : '.*/A/C$' },\n { 'URL' : '.*/A/D$' },\n ]\n svntest.actions.run_and_verify_info(expected_infos,\n wc2_B_dir, wc2_C_dir, wc2_D_dir)\n\n # Check the URLs of the switched files.\n # (\"svn status -u\" might be a better check: it fails when newfile's URL\n # is bad, and shows \"S\" when mu's URL is wrong.)\n # mu: not switched\n expected_infos = [\n { 'URL' : '.*/branch/version1/mu$' },\n { 'URL' : '.*/branch/version1/newfile$' }, # newfile: wrong URL\n ]\n svntest.actions.run_and_verify_info(expected_infos,\n wc2_mu_file, wc2_new_file)\n\n\n#----------------------------------------------------------------------\ndef failed_anchor_is_target(sbox):\n \"anchor=target, try to replace a local-mod file\"\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Set up a switch from dir H, containing locally-modified file 'psi',\n # to dir G, containing a directory 'psi'. Expect a tree conflict.\n\n # Make a directory 'G/psi' in the repository.\n G_url = sbox.repo_url + '/A/D/G'\n G_psi_url = G_url + '/psi'\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'], [],\n 'mkdir', '-m', 'log msg', G_psi_url)\n\n # Modify the file 'H/psi' locally.\n H_path = sbox.ospath('A/D/H')\n psi_path = os.path.join(H_path, 'psi')\n svntest.main.file_append(psi_path, \"more text\")\n\n # This switch raises a tree conflict on 'psi', because of the local mods.\n svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],\n 'switch', '--ignore-ancestry',\n G_url, H_path)\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/D/H', switched='S', wc_rev=2)\n expected_status.tweak('A/D/H/psi', status='R ', copied='+',\n wc_rev='-', treeconflict='C')\n expected_status.remove('A/D/H/chi', 'A/D/H/omega')\n expected_status.add({\n 'A/D/H/pi' : Item(status=' ', wc_rev=2),\n 'A/D/H/tau' : Item(status=' ', wc_rev=2),\n 'A/D/H/rho' : Item(status=' ', wc_rev=2),\n })\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n # There was a bug whereby the failed switch left the wrong URL in\n # the target directory H. Check for that.\n expected_infos = [\n { 'URL' : '.*' + G_url + '$' },\n ]\n svntest.actions.run_and_verify_info(expected_infos, H_path)\n\n # Resolve tree conflict at psi.\n svntest.actions.run_and_verify_resolved([psi_path])\n\n # The switch should now be complete.\n ### Instead of \"treeconflict=None\" which means \"don't check\", we should\n # check \"treeconflict=' '\" but the test suite doesn't do the right thing.\n expected_status.tweak('A/D/H/psi', treeconflict=None)\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n#----------------------------------------------------------------------\n# Issue #1826 - svn switch temporarily drops invalid URLs into the entries\n# files (which become not-temporary if the switch fails).\ndef bad_intermediate_urls(sbox):\n \"bad intermediate urls in use\"\n sbox.build()\n wc_dir = sbox.wc_dir\n url = sbox.repo_url\n\n A = sbox.ospath('A')\n A_Z = sbox.ospath('A/Z')\n url_A_C = url + '/A/C'\n url_A_C_A = url + '/A/C/A'\n url_A_C_A_Z = url + '/A/C/A/Z'\n\n # We'll be switching our working copy to (a modified) A/C in the Greek tree.\n\n # First, make an extra subdirectory in C to match one in the root, plus\n # another one inside of that.\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'], [],\n 'mkdir', '-m', 'log msg',\n url_A_C_A, url_A_C_A_Z)\n\n # Now, we'll drop a conflicting path under the root.\n svntest.main.file_append(A_Z, 'Look, Mom, a ... tree conflict.')\n\n #svntest.factory.make(sbox, \"\"\"\n # svn switch url/A/C wc_dir\n # # svn info A\n # # check that we can recover from the tree conflict\n # rm A/Z\n # svn up\n # \"\"\")\n #exit(0)\n\n # svn switch url/A/C wc_dir\n expected_output = svntest.wc.State(wc_dir, {\n 'A/mu' : Item(status='D '),\n 'A/Z' : Item(status=' ', treeconflict='C'),\n 'A/C' : Item(status='D '),\n 'A/B' : Item(status='D '),\n 'A/D' : Item(status='D '),\n 'iota' : Item(status='D '),\n })\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('iota', 'A/B', 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha',\n 'A/B/F', 'A/B/lambda', 'A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi',\n 'A/D/G/tau', 'A/D/H', 'A/D/H/psi', 'A/D/H/omega', 'A/D/H/chi',\n 'A/D/gamma', 'A/mu', 'A/C')\n expected_disk.add({\n 'A/Z' : Item(contents=\"Look, Mom, a ... tree conflict.\"),\n })\n\n expected_status = actions.get_virginal_state(wc_dir, 2)\n expected_status.remove('iota', 'A/B', 'A/B/E', 'A/B/E/beta', 'A/B/E/alpha',\n 'A/B/F', 'A/B/lambda', 'A/D', 'A/D/G', 'A/D/G/rho', 'A/D/G/pi',\n 'A/D/G/tau', 'A/D/H', 'A/D/H/psi', 'A/D/H/omega', 'A/D/H/chi',\n 'A/D/gamma', 'A/mu', 'A/C')\n expected_status.add({\n # Obstructed node is currently turned into a delete to allow resolving.\n 'A/Z' : Item(status='D ', treeconflict='C', wc_rev=2),\n })\n\n actions.run_and_verify_switch(wc_dir, wc_dir, url_A_C, expected_output,\n expected_disk, expected_status,\n [], False,\n '--ignore-ancestry')\n\n # However, the URL for wc/A should now reflect ^/A/C/A, not something else.\n expected_infos = [\n { 'URL' : '.*/A/C/A$' },\n ]\n svntest.actions.run_and_verify_info(expected_infos, A)\n\n\n # check that we can recover from the tree conflict\n # rm A/Z\n os.remove(A_Z)\n svntest.main.run_svn(None, 'revert', A_Z)\n\n # svn up\n expected_output = svntest.wc.State(wc_dir, {\n })\n\n expected_disk.tweak('A/Z', contents=None)\n\n expected_status.tweak(status=' ', wc_rev='2')\n expected_status.tweak('A/Z', treeconflict=None)\n\n actions.run_and_verify_update(wc_dir, expected_output, expected_disk,\n expected_status)\n\n\n\n\n#----------------------------------------------------------------------\n# Regression test for issue #1825: failed switch may corrupt\n# working copy\n@Issue(1825)\ndef obstructed_switch(sbox):\n \"obstructed switch\"\n #svntest.factory.make(sbox, \"\"\"svn cp -m msgcopy url/A/B/E url/A/B/Esave\n # svn rm A/B/E/alpha\n # svn commit\n # echo \"hello\" >> A/B/E/alpha\n # svn switch url/A/B/Esave A/B/E\n # svn status\n # svn info A/B/E/alpha\"\"\")\n sbox.build()\n wc_dir = sbox.wc_dir\n url = sbox.repo_url\n\n A_B_E = sbox.ospath('A/B/E')\n A_B_E_alpha = sbox.ospath('A/B/E/alpha')\n url_A_B_E = url + '/A/B/E'\n url_A_B_Esave = url + '/A/B/Esave'\n\n # svn cp -m msgcopy url/A/B/E url/A/B/Esave\n expected_stdout = [\n 'Committing transaction...\\n',\n 'Committed revision 2.\\n',\n ]\n\n actions.run_and_verify_svn2(expected_stdout, [], 0, 'cp', '-m',\n 'msgcopy', url_A_B_E, url_A_B_Esave)\n\n # svn rm A/B/E/alpha\n expected_stdout = ['D ' + A_B_E_alpha + '\\n']\n\n actions.run_and_verify_svn2(expected_stdout, [], 0, 'rm',\n A_B_E_alpha)\n\n # svn commit\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E/alpha' : Item(verb='Deleting'),\n })\n\n expected_status = actions.get_virginal_state(wc_dir, 1)\n expected_status.remove('A/B/E/alpha')\n\n actions.run_and_verify_commit(wc_dir, expected_output, expected_status)\n\n # echo \"hello\" >> A/B/E/alpha\n main.file_append(A_B_E_alpha, 'hello')\n\n # svn switch url/A/B/Esave A/B/E\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/E/alpha' : Item(status=' ', treeconflict='C'),\n })\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak('A/B/E/alpha', contents='hello')\n\n expected_status.add({\n 'A/B/E/alpha' : Item(status='D ', treeconflict='C', wc_rev=3),\n })\n expected_status.tweak('A/B/E', wc_rev='3', switched='S')\n expected_status.tweak('A/B/E/beta', wc_rev='3')\n\n actions.run_and_verify_switch(wc_dir, A_B_E, url_A_B_Esave,\n expected_output, expected_disk,\n expected_status,\n [], False, '--ignore-ancestry')\n\n # svn status\n expected_status.add({\n 'A/B/Esave' : Item(status=' '),\n 'A/B/Esave/beta' : Item(status=' '),\n 'A/B/Esave/alpha' : Item(status=' '),\n })\n\n actions.run_and_verify_unquiet_status(wc_dir, expected_status)\n\n # svn info A/B/E/alpha\n expected_stdout = verify.RegexOutput(\n \".*local file unversioned, incoming file add upon switch\",\n match_all=False)\n actions.run_and_verify_svn2(expected_stdout, [], 0, 'info',\n A_B_E_alpha)\n\n\n#----------------------------------------------------------------------\n# Issue 2353.\ndef commit_mods_below_switch(sbox):\n \"commit with mods below switch\"\n sbox.build()\n wc_dir = sbox.wc_dir\n\n C_path = sbox.ospath('A/C')\n B_url = sbox.repo_url + '/A/B'\n expected_output = svntest.wc.State(wc_dir, {\n 'A/C/E' : Item(status='A '),\n 'A/C/E/alpha' : Item(status='A '),\n 'A/C/E/beta' : Item(status='A '),\n 'A/C/F' : Item(status='A '),\n 'A/C/lambda' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'A/C/E' : Item(),\n 'A/C/E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'A/C/E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'A/C/F' : Item(),\n 'A/C/lambda' : Item(contents=\"This is the file 'lambda'.\\n\"),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/C', switched='S')\n expected_status.add({\n 'A/C/E' : Item(status=' ', wc_rev=1),\n 'A/C/E/alpha' : Item(status=' ', wc_rev=1),\n 'A/C/E/beta' : Item(status=' ', wc_rev=1),\n 'A/C/F' : Item(status=' ', wc_rev=1),\n 'A/C/lambda' : Item(status=' ', wc_rev=1),\n })\n svntest.actions.run_and_verify_switch(wc_dir, C_path, B_url,\n expected_output,\n expected_disk,\n expected_status,\n [],\n False, '--ignore-ancestry')\n\n D_path = sbox.ospath('A/D')\n svntest.actions.run_and_verify_svn(None, [],\n 'propset', 'x', 'x', C_path, D_path)\n\n expected_status.tweak('A/C', 'A/D', status=' M')\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/C' : Item(verb='Sending'),\n 'A/D' : Item(verb='Sending'),\n })\n expected_status.tweak('A/C', 'A/D', status=' ', wc_rev=2)\n\n # A/C erroneously classified as a wc root caused the commit to fail\n # with \"'A/C/E' is missing or not locked\"\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status,\n [], C_path, D_path)\n\n#----------------------------------------------------------------------\n# Issue 2306.\ndef refresh_read_only_attribute(sbox):\n \"refresh the WC file system read-only attribute \"\n\n # This test will fail when run as root. Since that's normal\n # behavior, just skip the test.\n if os.name == 'posix':\n if os.geteuid() == 0:\n raise svntest.Skip('Test doesn\\'t work as uid 0')\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Create a branch.\n url = sbox.repo_url + '/A'\n branch_url = sbox.repo_url + '/A-branch'\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'], [],\n 'cp', '-m', 'svn:needs-lock not set',\n url, branch_url)\n\n # Set the svn:needs-lock property on a file from the \"trunk\".\n A_path = sbox.ospath('A')\n mu_path = os.path.join(A_path, 'mu')\n svntest.actions.run_and_verify_svn(None, [],\n 'ps', 'svn:needs-lock', '1', mu_path)\n\n # Commit the propset of svn:needs-lock.\n expected_output = svntest.wc.State(wc_dir, {\n 'A/mu' : Item(verb='Sending'),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/mu', wc_rev=3)\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output, expected_status,\n [], mu_path)\n\n # The file on which svn:needs-lock was set is now expected to be read-only.\n if os.access(mu_path, os.W_OK):\n raise svntest.Failure(\"'%s' expected to be read-only after having had \"\n \"its svn:needs-lock property set\" % mu_path)\n\n # Switch to the branch with the WC state from before the propset of\n # svn:needs-lock.\n expected_output = svntest.wc.State(wc_dir, {\n 'A/mu' : Item(status=' U'),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_status.tweak('', wc_rev=1)\n expected_status.tweak('iota', wc_rev=1)\n expected_status.tweak('A', switched='S')\n svntest.actions.run_and_verify_switch(wc_dir, A_path, branch_url,\n expected_output,\n expected_disk,\n expected_status,\n [],\n False, '--ignore-ancestry')\n\n # The file with we set svn:needs-lock on should now be writable, but\n # is still read-only!\n if not os.access(mu_path, os.W_OK):\n raise svntest.Failure(\"'%s' expected to be writable after being switched \"\n \"to a branch on which its svn:needs-lock property \"\n \"is not set\" % mu_path)\n\n# Check that switch can't change the repository root.\ndef switch_change_repos_root(sbox):\n \"switch shouldn't allow changing repos root\"\n sbox.build()\n\n wc_dir = sbox.wc_dir\n repo_url = sbox.repo_url\n other_repo_url = repo_url\n\n # Strip trailing slashes and add something bogus to that other URL.\n while other_repo_url[-1] == '/':\n other_repos_url = other_repo_url[:-1]\n other_repo_url = other_repo_url + \"_bogus\"\n\n other_A_url = other_repo_url + \"/A\"\n A_wc_dir = sbox.ospath('A')\n\n # Test 1: A switch that changes to a non-existing repo shouldn't work.\n expected_err = \".*Unable to open repository.*|.*Could not open.*|\"\\\n \".*Could not find.*|.*No repository found.*\"\n svntest.actions.run_and_verify_svn(None,\n expected_err,\n 'switch', '--ignore-ancestry',\n other_A_url, A_wc_dir)\n\n # Test 2: A switch that changes the repo root part of the URL shouldn't work.\n other_repo_dir, other_repo_url = sbox.add_repo_path('other')\n other_A_url = other_repo_url + \"/A\"\n\n svntest.main.create_repos(other_repo_dir)\n svntest.actions.run_and_verify_svn(None,\n \".*UUID.*\",\n 'switch', '--ignore-ancestry',\n other_A_url, A_wc_dir)\n\n # Make sure we didn't break the WC.\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n svntest.actions.run_and_verify_status(wc_dir, expected_status)\n\n#----------------------------------------------------------------------\n\ndef forced_switch(sbox):\n \"forced switch tolerates obstructions to adds\"\n sbox.build(read_only = True)\n\n # Dir obstruction\n G_path = sbox.ospath('A/B/F/G')\n os.mkdir(G_path)\n\n # Faux file obstructions\n shutil.copyfile(sbox.ospath('A/D/gamma'),\n sbox.ospath('A/B/F/gamma'))\n shutil.copyfile(sbox.ospath('A/D/G/tau'),\n sbox.ospath('A/B/F/G/tau'))\n\n # Real file obstruction\n pi_path = sbox.ospath('A/B/F/G/pi')\n svntest.main.file_write(pi_path,\n \"This is the OBSTRUCTING file 'pi'.\\n\")\n\n # Non-obstructing dir and file\n I_path = sbox.ospath('A/B/F/I')\n os.mkdir(I_path)\n upsilon_path = os.path.join(G_path, 'upsilon')\n svntest.main.file_write(upsilon_path,\n \"This is the unversioned file 'upsilon'.\\n\")\n\n # Setup expected results of switch.\n expected_output = svntest.wc.State(sbox.wc_dir, {\n \"A/B/F/gamma\" : Item(status='E '),\n \"A/B/F/G\" : Item(status='E '),\n \"A/B/F/G/pi\" : Item(status='E '),\n \"A/B/F/G/rho\" : Item(status='A '),\n \"A/B/F/G/tau\" : Item(status='E '),\n \"A/B/F/H\" : Item(status='A '),\n \"A/B/F/H/chi\" : Item(status='A '),\n \"A/B/F/H/omega\" : Item(status='A '),\n \"A/B/F/H/psi\" : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n \"A/B/F/gamma\" : Item(\"This is the file 'gamma'.\\n\"),\n \"A/B/F/G\" : Item(),\n \"A/B/F/G/pi\" : Item(\"This is the OBSTRUCTING file 'pi'.\\n\"),\n \"A/B/F/G/rho\" : Item(\"This is the file 'rho'.\\n\"),\n \"A/B/F/G/tau\" : Item(\"This is the file 'tau'.\\n\"),\n \"A/B/F/G/upsilon\" : Item(\"This is the unversioned file 'upsilon'.\\n\"),\n \"A/B/F/H\" : Item(),\n \"A/B/F/H/chi\" : Item(\"This is the file 'chi'.\\n\"),\n \"A/B/F/H/omega\" : Item(\"This is the file 'omega'.\\n\"),\n \"A/B/F/H/psi\" : Item(\"This is the file 'psi'.\\n\"),\n \"A/B/F/I\" : Item(),\n })\n expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)\n expected_status.tweak('A/B/F', switched='S')\n expected_status.add({\n \"A/B/F/gamma\" : Item(status=' ', wc_rev=1),\n \"A/B/F/G\" : Item(status=' ', wc_rev=1),\n \"A/B/F/G/pi\" : Item(status='M ', wc_rev=1),\n \"A/B/F/G/rho\" : Item(status=' ', wc_rev=1),\n \"A/B/F/G/tau\" : Item(status=' ', wc_rev=1),\n \"A/B/F/H\" : Item(status=' ', wc_rev=1),\n \"A/B/F/H/chi\" : Item(status=' ', wc_rev=1),\n \"A/B/F/H/omega\" : Item(status=' ', wc_rev=1),\n \"A/B/F/H/psi\" : Item(status=' ', wc_rev=1),\n })\n\n # Do the switch and check the results in three ways.\n F_path = sbox.ospath('A/B/F')\n AD_url = sbox.repo_url + '/A/D'\n svntest.actions.run_and_verify_switch(sbox.wc_dir, F_path, AD_url,\n expected_output,\n expected_disk,\n expected_status, [], False,\n '--force', '--ignore-ancestry')\n\n#----------------------------------------------------------------------\ndef forced_switch_failures(sbox):\n \"forced switch detects tree conflicts\"\n # svntest.factory.make(sbox,\n # \"\"\"\n # # Add a directory to obstruct a file.\n # mkdir A/B/F/pi\n #\n # # Add a file to obstruct a directory.\n # echo \"The file 'H'\" > A/C/H\n #\n # # Test three cases where forced switch should cause a tree conflict\n #\n # # 1) A forced switch that tries to add a file when an unversioned\n # # directory of the same name already exists. (Currently fails)\n # svn switch --force url/A/D A/C\n #\n # # 2) A forced switch that tries to add a dir when a file of the same\n # # name already exists. (Tree conflict)\n # svn switch --force url/A/D/G A/B/F\n # svn info A/B/F/pi\n #\n # # 3) A forced update that tries to add a directory when a versioned\n # # directory of the same name already exists.\n #\n # # Make dir A/D/H/I in repos.\n # svn mkdir -m \"Log message\" url/A/D/H/I\n #\n # # Make A/D/G/I and co A/D/H/I into it.\n # mkdir A/D/G/I\n # svn co url/A/D/H/I A/D/G/I\n #\n # # Try the forced switch. A/D/G/I obstructs the dir A/D/G/I coming\n # # from the repos, causing an error.\n # svn switch --force url/A/D/H A/D/G\n #\n # # Delete all three obstructions and finish the update.\n # rm -rf A/D/G/I\n # rm A/B/F/pi\n # rm A/C/H\n #\n # # A/B/F is switched to A/D/G\n # # A/C is switched to A/D\n # # A/D/G is switched to A/D/H\n # svn up\n # \"\"\")\n # exit(0)\n sbox.build()\n wc_dir = sbox.wc_dir\n url = sbox.repo_url\n\n A_B_F = sbox.ospath('A/B/F')\n A_B_F_pi = sbox.ospath('A/B/F/pi')\n A_C = sbox.ospath('A/C')\n A_C_H = sbox.ospath('A/C/H')\n A_D_G = sbox.ospath('A/D/G')\n A_D_G_I = sbox.ospath('A/D/G/I')\n url_A_D = url + '/A/D'\n url_A_D_G = url + '/A/D/G'\n url_A_D_H = url + '/A/D/H'\n url_A_D_H_I = url + '/A/D/H/I'\n\n # Add a directory to obstruct a file.\n # mkdir A/B/F/pi\n os.makedirs(A_B_F_pi)\n\n # Add a file to obstruct a directory.\n # echo \"The file 'H'\" > A/C/H\n main.file_write(A_C_H, \"The file 'H'\\n\")\n\n # Test three cases where forced switch should cause a tree conflict\n # 1) A forced switch that tries to add a directory when an unversioned\n # file of the same name already exists. (Currently fails)\n # svn switch --force url/A/D A/C\n expected_output = svntest.wc.State(wc_dir, {\n 'A/C/G' : Item(status='A '),\n 'A/C/G/pi' : Item(status='A '),\n 'A/C/G/rho' : Item(status='A '),\n 'A/C/G/tau' : Item(status='A '),\n 'A/C/gamma' : Item(status='A '),\n 'A/C/H' : Item(status=' ', treeconflict='C'),\n 'A/C/H/psi' : Item(status=' ', treeconflict='A'),\n 'A/C/H/omega' : Item(status=' ', treeconflict='A'),\n 'A/C/H/chi' : Item(status=' ', treeconflict='A'),\n })\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'A/C/gamma' : Item(contents=\"This is the file 'gamma'.\\n\"),\n 'A/C/G' : Item(),\n 'A/C/G/pi' : Item(contents=\"This is the file 'pi'.\\n\"),\n 'A/C/G/rho' : Item(contents=\"This is the file 'rho'.\\n\"),\n 'A/C/G/tau' : Item(contents=\"This is the file 'tau'.\\n\"),\n 'A/C/H' : Item(contents=\"The file 'H'\\n\"),\n 'A/B/F/pi' : Item(),\n })\n\n expected_status = actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'A/C/G' : Item(status=' ', wc_rev='1'),\n 'A/C/G/rho' : Item(status=' ', wc_rev='1'),\n 'A/C/G/tau' : Item(status=' ', wc_rev='1'),\n 'A/C/G/pi' : Item(status=' ', wc_rev='1'),\n 'A/C/H' : Item(status='D ', treeconflict='C', wc_rev='1'),\n 'A/C/H/psi' : Item(status='D ', wc_rev='1'),\n 'A/C/H/omega' : Item(status='D ', wc_rev='1'),\n 'A/C/H/chi' : Item(status='D ', wc_rev='1'),\n 'A/C/gamma' : Item(status=' ', wc_rev='1'),\n })\n expected_status.tweak('A/C', switched='S')\n\n actions.run_and_verify_switch(wc_dir, A_C, url_A_D, expected_output,\n expected_disk, expected_status, [], False,\n '--force',\n '--ignore-ancestry')\n\n\n # 2) A forced switch that tries to add a file when a dir of the same\n # name already exists. (Tree conflict)\n # svn switch --force url/A/D/G A/B/F\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/F/rho' : Item(status='A '),\n 'A/B/F/pi' : Item(status=' ', treeconflict='C'),\n 'A/B/F/tau' : Item(status='A '),\n })\n\n expected_disk.add({\n 'A/B/F/rho' : Item(contents=\"This is the file 'rho'.\\n\"),\n 'A/B/F/tau' : Item(contents=\"This is the file 'tau'.\\n\"),\n })\n\n expected_status.add({\n 'A/B/F/tau' : Item(status=' ', wc_rev='1'),\n 'A/B/F/pi' : Item(status='D ', treeconflict='C', wc_rev='1'),\n 'A/B/F/rho' : Item(status=' ', wc_rev='1'),\n })\n expected_status.tweak('A/B/F', switched='S')\n\n actions.run_and_verify_switch(wc_dir, A_B_F, url_A_D_G, expected_output,\n expected_disk, expected_status, [], False,\n '--force',\n '--ignore-ancestry')\n\n # svn info A/B/F/pi\n expected_stdout = verify.ExpectedOutput(\n 'Tree conflict: local dir unversioned, incoming file add upon switch\\n',\n match_all=False)\n\n actions.run_and_verify_svn2(expected_stdout, [], 0, 'info',\n A_B_F_pi)\n\n\n # 3) A forced update that tries to add a directory when a versioned\n # directory of the same name already exists.\n # Make dir A/D/H/I in repos.\n # svn mkdir -m \"Log message\" url/A/D/H/I\n expected_stdout = verify.UnorderedOutput([\n 'Committing transaction...\\n',\n 'Committed revision 2.\\n',\n ])\n\n actions.run_and_verify_svn2(expected_stdout, [], 0, 'mkdir',\n '-m', 'Log message', url_A_D_H_I)\n\n # Make A/D/G/I and co A/D/H/I into it.\n # mkdir A/D/G/I\n os.makedirs(A_D_G_I)\n\n # svn co url/A/D/H/I A/D/G/I\n expected_output = svntest.wc.State(wc_dir, {})\n\n expected_disk.add({\n 'A/D/G/I' : Item(),\n })\n\n exit_code, so, se = svntest.actions.run_and_verify_svn(\n ['Checked out revision 2.\\n'], [],\n \"co\", url_A_D_H_I, A_D_G_I)\n\n # Try the forced switch. A/D/G/I obstructs the dir A/D/G/I coming\n # from the repos, causing an error.\n # svn switch --force url/A/D/H A/D/G\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/chi' : Item(status='A '),\n 'A/D/G/tau' : Item(status='D '),\n 'A/D/G/omega' : Item(status='A '),\n 'A/D/G/psi' : Item(status='A '),\n 'A/D/G/I' : Item(verb='Skipped'),\n 'A/D/G/rho' : Item(status='D '),\n 'A/D/G/pi' : Item(status='D '),\n })\n\n actions.run_and_verify_switch(wc_dir, A_D_G, url_A_D_H, expected_output,\n None, None, [], False,\n '--force', '--ignore-ancestry')\n\n # Delete all three obstructions and finish the update.\n # rm -rf A/D/G/I\n main.safe_rmtree(A_D_G_I)\n\n # rm A/B/F/pi\n main.safe_rmtree(A_B_F_pi)\n\n # rm A/C/H\n os.remove(A_C_H)\n\n # Resolve the tree conflict on A_C_H and A_B_F_pi\n svntest.main.run_svn(None, 'resolved', A_C_H)\n svntest.main.run_svn(None, 'revert', A_B_F_pi)\n\n # A/B/F is switched to A/D/G\n # A/C is switched to A/D\n # A/D/G is switched to A/D/H\n # svn up\n expected_output = svntest.wc.State(wc_dir, {\n 'A/C/H/I' : Item(status='A '),\n 'A/D/G/I' : Item(status='A '),\n 'A/D/H/I' : Item(status='A '),\n })\n\n expected_disk.remove('A/D/G/tau', 'A/D/G/rho', 'A/D/G/pi')\n expected_disk.add({\n 'A/D/H/I' : Item(),\n 'A/D/G/omega' : Item(contents=\"This is the file 'omega'.\\n\"),\n 'A/D/G/psi' : Item(contents=\"This is the file 'psi'.\\n\"),\n 'A/D/G/chi' : Item(contents=\"This is the file 'chi'.\\n\"),\n 'A/C/H/I' : Item(),\n 'A/C/H/omega' : Item(contents=\"This is the file 'omega'.\\n\"),\n 'A/C/H/psi' : Item(contents=\"This is the file 'psi'.\\n\"),\n 'A/C/H/chi' : Item(contents=\"This is the file 'chi'.\\n\"),\n })\n expected_disk.tweak('A/C/H', contents=None)\n expected_disk.tweak('A/B/F/pi', contents=\"This is the file 'pi'.\\n\")\n\n expected_status.remove('A/D/G/tau', 'A/D/G/rho', 'A/D/G/pi')\n expected_status.add({\n 'A/D/G/omega' : Item(status=' ', wc_rev='2'),\n 'A/D/G/I' : Item(status=' ', wc_rev='2'),\n 'A/D/G/psi' : Item(status=' ', wc_rev='2'),\n 'A/D/G/chi' : Item(status=' ', wc_rev='2'),\n 'A/D/H/I' : Item(status=' ', wc_rev='2'),\n 'A/C/H/psi' : Item(status=' ', wc_rev='2'),\n 'A/C/H/omega' : Item(status=' ', wc_rev='2'),\n 'A/C/H/chi' : Item(status=' ', wc_rev='2'),\n 'A/C/H/I' : Item(status=' ', wc_rev='2'),\n })\n expected_status.tweak(wc_rev='2', status=' ')\n expected_status.tweak('A/B/F/pi', 'A/C/H', treeconflict=None)\n expected_status.tweak('A/D/G', switched='S')\n\n svntest.main.run_svn(None, 'revert', '-R', sbox.ospath('A/C/H'))\n\n actions.run_and_verify_update(wc_dir, expected_output, expected_disk,\n expected_status)\n\n\ndef switch_with_obstructing_local_adds(sbox):\n \"switch tolerates WC adds\"\n sbox.build(read_only = True)\n\n # Dir obstruction scheduled for addition without history.\n G_path = sbox.ospath('A/B/F/G')\n os.mkdir(G_path)\n\n # File obstructions scheduled for addition without history.\n # Contents identical to additions from switch.\n gamma_copy_path = sbox.ospath('A/B/F/gamma')\n shutil.copyfile(sbox.ospath('A/D/gamma'),\n gamma_copy_path)\n shutil.copyfile(sbox.ospath('A/D/G/tau'),\n sbox.ospath('A/B/F/G/tau'))\n\n # File obstruction scheduled for addition without history.\n # Contents conflict with addition from switch.\n pi_path = sbox.ospath('A/B/F/G/pi')\n svntest.main.file_write(pi_path,\n \"This is the OBSTRUCTING file 'pi'.\\n\")\n\n # Non-obstructing dir and file scheduled for addition without history.\n I_path = sbox.ospath('A/B/F/I')\n os.mkdir(I_path)\n upsilon_path = os.path.join(G_path, 'upsilon')\n svntest.main.file_write(upsilon_path,\n \"This is the unversioned file 'upsilon'.\\n\")\n\n # Add the above obstructions.\n svntest.actions.run_and_verify_svn(None, [],\n 'add', G_path, I_path,\n gamma_copy_path)\n\n # Setup expected results of switch.\n expected_output = svntest.wc.State(sbox.wc_dir, {\n \"A/B/F/gamma\" : Item(status=' ', treeconflict='C'),\n \"A/B/F/G\" : Item(status=' ', treeconflict='C'),\n 'A/B/F/G/tau' : Item(status=' ', treeconflict='A'),\n 'A/B/F/G/rho' : Item(status=' ', treeconflict='A'),\n 'A/B/F/G/pi' : Item(status=' ', treeconflict='A'),\n \"A/B/F/H\" : Item(status='A '),\n \"A/B/F/H/chi\" : Item(status='A '),\n \"A/B/F/H/omega\" : Item(status='A '),\n \"A/B/F/H/psi\" : Item(status='A '),\n })\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n \"A/B/F/gamma\" : Item(\"This is the file 'gamma'.\\n\"),\n \"A/B/F/G\" : Item(),\n \"A/B/F/G/pi\" : Item(\"This is the OBSTRUCTING file 'pi'.\\n\"),\n \"A/B/F/G/tau\" : Item(\"This is the file 'tau'.\\n\"),\n \"A/B/F/G/upsilon\" : Item(\"This is the unversioned file 'upsilon'.\\n\"),\n \"A/B/F/H\" : Item(),\n \"A/B/F/H/chi\" : Item(\"This is the file 'chi'.\\n\"),\n \"A/B/F/H/omega\" : Item(\"This is the file 'omega'.\\n\"),\n \"A/B/F/H/psi\" : Item(\"This is the file 'psi'.\\n\"),\n \"A/B/F/I\" : Item(),\n })\n expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)\n expected_status.tweak('A/B/F', switched='S')\n expected_status.add({\n 'A/B/F/gamma' : Item(status='R ', treeconflict='C', wc_rev='1'),\n 'A/B/F/G' : Item(status='R ', treeconflict='C', wc_rev='1'),\n 'A/B/F/G/pi' : Item(status='A ', wc_rev='-', entry_status='R ', entry_rev='1'),\n 'A/B/F/G/tau' : Item(status='A ', wc_rev='-', entry_status='R ', entry_rev='1'),\n 'A/B/F/G/upsilon' : Item(status='A ', wc_rev='-', entry_rev='0'),\n 'A/B/F/G/rho' : Item(status='D ', wc_rev='1'),\n 'A/B/F/H' : Item(status=' ', wc_rev='1'),\n 'A/B/F/H/chi' : Item(status=' ', wc_rev='1'),\n 'A/B/F/H/omega' : Item(status=' ', wc_rev='1'),\n 'A/B/F/H/psi' : Item(status=' ', wc_rev='1'),\n 'A/B/F/I' : Item(status='A ', wc_rev='-', entry_rev='0'),\n })\n\n # Do the switch and check the results in three ways.\n F_path = sbox.ospath('A/B/F')\n D_url = sbox.repo_url + '/A/D'\n\n svntest.actions.run_and_verify_switch(sbox.wc_dir, F_path, D_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--ignore-ancestry')\n\n#----------------------------------------------------------------------\n\ndef switch_scheduled_add(sbox):\n \"switch a scheduled-add file\"\n sbox.build(read_only = True)\n wc_dir = sbox.wc_dir\n\n file_path = sbox.ospath('stub_file')\n switch_url = sbox.repo_url + '/iota'\n nodo_path = sbox.ospath('nodo')\n\n svntest.main.file_append(file_path, \"\")\n svntest.actions.run_and_verify_svn(None, [],\n 'add', file_path)\n svntest.actions.run_and_verify_svn(None,\n \"svn: E200007: Cannot switch '.*file' \" +\n \"because it is not in the repository yet\",\n 'switch', '--ignore-ancestry',\n switch_url, file_path)\n\n svntest.actions.run_and_verify_svn(None,\n \"svn: E155010: The node '.*nodo' was not\",\n 'switch', '--ignore-ancestry',\n switch_url, nodo_path)\n\n#----------------------------------------------------------------------\n@SkipUnless(server_has_mergeinfo)\ndef mergeinfo_switch_elision(sbox):\n \"mergeinfo does not elide post switch\"\n\n # When a switch adds mergeinfo on a path which is identical to\n # the mergeinfo on one of the path's subtrees, the subtree's mergeinfo\n # should *not* elide! If it did this could result in the switch of a\n # pristine tree producing local mods.\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about\n lambda_path = sbox.ospath('A/B_COPY_1/lambda')\n B_COPY_1_path = sbox.ospath('A/B_COPY_1')\n B_COPY_2_path = sbox.ospath('A/B_COPY_2')\n E_COPY_2_path = sbox.ospath('A/B_COPY_2/E')\n alpha_path = sbox.ospath('A/B/E/alpha')\n beta_path = sbox.ospath('A/B/E/beta')\n\n # Make branches A/B_COPY_1 and A/B_COPY_2\n expected_stdout = verify.UnorderedOutput([\n \"A \" + sbox.ospath('A/B_COPY_1/lambda') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_1/E') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_1/E/alpha') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_1/E/beta') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_1/F') + \"\\n\",\n \"Checked out revision 1.\\n\",\n \"A \" + B_COPY_1_path + \"\\n\",\n ])\n svntest.actions.run_and_verify_svn(expected_stdout, [], 'copy',\n sbox.repo_url + \"/A/B\", B_COPY_1_path)\n\n expected_stdout = verify.UnorderedOutput([\n \"A \" + sbox.ospath('A/B_COPY_2/lambda') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_2/E') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_2/E/alpha') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_2/E/beta') + \"\\n\",\n \"A \" + sbox.ospath('A/B_COPY_2/F') + \"\\n\",\n \"Checked out revision 1.\\n\",\n \"A \" + B_COPY_2_path + \"\\n\",\n ])\n svntest.actions.run_and_verify_svn(expected_stdout, [], 'copy',\n sbox.repo_url + \"/A/B\", B_COPY_2_path)\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B_COPY_1' : Item(verb='Adding'),\n 'A/B_COPY_2' : Item(verb='Adding')\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n \"A/B_COPY_1\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/lambda\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/E\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/E/alpha\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/E/beta\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/F\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_2\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_2/lambda\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_2/E\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_2/E/alpha\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_2/E/beta\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_2/F\" : Item(status=' ', wc_rev=2),\n })\n\n svntest.actions.run_and_verify_commit(wc_dir,\n expected_output,\n expected_status)\n\n # Make some changes under A/B\n\n # r3 - modify and commit A/B/E/beta\n svntest.main.file_write(beta_path, \"New content\")\n expected_output = svntest.wc.State(wc_dir,\n {'A/B/E/beta' : Item(verb='Sending')})\n expected_status.tweak('A/B/E/beta', wc_rev=3)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # r4 - modify and commit A/B/E/alpha\n svntest.main.file_write(alpha_path, \"New content\")\n expected_output = svntest.wc.State(wc_dir,\n {'A/B/E/alpha' : Item(verb='Sending')})\n expected_status.tweak('A/B/E/alpha', wc_rev=4)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Merge r2:4 into A/B_COPY_1\n expected_output = svntest.wc.State(B_COPY_1_path, {\n 'E/alpha' : Item(status='U '),\n 'E/beta' : Item(status='U '),\n })\n expected_mergeinfo_output = svntest.wc.State(B_COPY_1_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = svntest.wc.State(B_COPY_1_path, {\n })\n expected_merge_status = svntest.wc.State(B_COPY_1_path, {\n '' : Item(status=' M', wc_rev=2),\n 'lambda' : Item(status=' ', wc_rev=2),\n 'E' : Item(status=' ', wc_rev=2),\n 'E/alpha' : Item(status='M ', wc_rev=2),\n 'E/beta' : Item(status='M ', wc_rev=2),\n 'F' : Item(status=' ', wc_rev=2),\n })\n expected_merge_disk = svntest.wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),\n 'lambda' : Item(\"This is the file 'lambda'.\\n\"),\n 'E' : Item(),\n 'E/alpha' : Item(\"New content\"),\n 'E/beta' : Item(\"New content\"),\n 'F' : Item(),\n })\n expected_skip = svntest.wc.State(B_COPY_1_path, { })\n svntest.actions.run_and_verify_merge(B_COPY_1_path, '2', '4',\n sbox.repo_url + '/A/B', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_merge_disk,\n expected_merge_status,\n expected_skip,\n check_props=True)\n\n # r5 - Commit the merge into A/B_COPY_1/E\n expected_output = svntest.wc.State(\n wc_dir,\n {'A/B_COPY_1' : Item(verb='Sending'),\n 'A/B_COPY_1/E/alpha' : Item(verb='Sending'),\n 'A/B_COPY_1/E/beta' : Item(verb='Sending'),\n })\n expected_status.tweak('A/B_COPY_1', wc_rev=5)\n expected_status.tweak('A/B_COPY_1/E/alpha', wc_rev=5)\n expected_status.tweak('A/B_COPY_1/E/beta', wc_rev=5)\n expected_status.tweak('A/B_COPY_1/lambda', wc_rev=2)\n svntest.actions.run_and_verify_commit(wc_dir, expected_output,\n expected_status)\n\n # Merge r2:4 into A/B_COPY_2/E\n expected_output = svntest.wc.State(E_COPY_2_path, {\n 'alpha' : Item(status='U '),\n 'beta' : Item(status='U '),\n })\n expected_mergeinfo_output = svntest.wc.State(E_COPY_2_path, {\n '' : Item(status=' U'),\n })\n expected_elision_output = svntest.wc.State(E_COPY_2_path, {\n })\n expected_merge_status = svntest.wc.State(E_COPY_2_path, {\n '' : Item(status=' M', wc_rev=2),\n 'alpha' : Item(status='M ', wc_rev=2),\n 'beta' : Item(status='M ', wc_rev=2),\n })\n expected_merge_disk = svntest.wc.State('', {\n '' : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:3-4'}),\n 'alpha' : Item(\"New content\"),\n 'beta' : Item(\"New content\"),\n })\n expected_skip = svntest.wc.State(E_COPY_2_path, { })\n svntest.actions.run_and_verify_merge(E_COPY_2_path, '2', '4',\n sbox.repo_url + '/A/B/E', None,\n expected_output,\n expected_mergeinfo_output,\n expected_elision_output,\n expected_merge_disk,\n expected_merge_status,\n expected_skip,\n check_props=True)\n\n # Switch A/B_COPY_2 to URL of A/B_COPY_1. The local mergeinfo for r1,3-4\n # on A/B_COPY_2/E is identical to the mergeinfo added to A/B_COPY_2 as a\n # result of the switch, but we leave the former in place.\n\n # Setup expected results of switch.\n expected_output = svntest.wc.State(sbox.wc_dir, {\n \"A/B_COPY_2\" : Item(status=' U'),\n \"A/B_COPY_2/E/alpha\" : Item(status='G '),\n \"A/B_COPY_2/E/beta\" : Item(status='G '),\n })\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak(\"A/B/E/alpha\", contents=\"New content\")\n expected_disk.tweak(\"A/B/E/beta\", contents=\"New content\")\n expected_disk.add({\n \"A/B_COPY_1\" : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),\n \"A/B_COPY_1/E\" : Item(),\n \"A/B_COPY_1/F\" : Item(),\n \"A/B_COPY_1/lambda\" : Item(\"This is the file 'lambda'.\\n\"),\n \"A/B_COPY_1/E/alpha\" : Item(\"New content\"),\n \"A/B_COPY_1/E/beta\" : Item(\"New content\"),\n \"A/B_COPY_2\" : Item(props={SVN_PROP_MERGEINFO : '/A/B:3-4'}),\n \"A/B_COPY_2/E\" : Item(props={SVN_PROP_MERGEINFO : '/A/B/E:3-4'}),\n \"A/B_COPY_2/F\" : Item(),\n \"A/B_COPY_2/lambda\" : Item(\"This is the file 'lambda'.\\n\"),\n \"A/B_COPY_2/E/alpha\" : Item(\"New content\"),\n \"A/B_COPY_2/E/beta\" : Item(\"New content\"),\n })\n expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)\n expected_status.tweak(\"A/B/E/beta\", wc_rev=3)\n expected_status.tweak(\"A/B/E/alpha\", wc_rev=4)\n expected_status.add({\n \"A/B_COPY_1\" : Item(status=' ', wc_rev=5),\n \"A/B_COPY_1/E\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/F\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/lambda\" : Item(status=' ', wc_rev=2),\n \"A/B_COPY_1/E/alpha\" : Item(status=' ', wc_rev=5),\n \"A/B_COPY_1/E/beta\" : Item(status=' ', wc_rev=5),\n \"A/B_COPY_2\" : Item(status=' ', wc_rev=5, switched='S'),\n \"A/B_COPY_2/E\" : Item(status=' M', wc_rev=5),\n \"A/B_COPY_2/F\" : Item(status=' ', wc_rev=5),\n \"A/B_COPY_2/lambda\" : Item(status=' ', wc_rev=5),\n \"A/B_COPY_2/E/alpha\" : Item(status=' ', wc_rev=5),\n \"A/B_COPY_2/E/beta\" : Item(status=' ', wc_rev=5),\n })\n\n svntest.actions.run_and_verify_switch(sbox.wc_dir,\n B_COPY_2_path,\n sbox.repo_url + \"/A/B_COPY_1\",\n expected_output,\n expected_disk,\n expected_status,\n [], True,\n '--ignore-ancestry')\n\n # Now check a switch which reverses and earlier switch and leaves\n # a path in an unswitched state.\n #\n # Switch A/B_COPY_1/lambda to iota. Use propset to give A/B_COPY/lambda\n # the mergeinfo '/A/B/lambda:1,3-4'. Then switch A/B_COPY_1/lambda back\n # to A/B_COPY_1/lambda. The local mergeinfo for r1,3-4 should remain on\n # A/B_COPY_1/lambda.\n expected_output = svntest.wc.State(sbox.wc_dir, {\n \"A/B_COPY_1/lambda\" : Item(status='U '),\n })\n expected_disk.tweak(\"A/B_COPY_1/lambda\",\n contents=\"This is the file 'iota'.\\n\")\n expected_status.tweak(\"A/B_COPY_1/lambda\", wc_rev=5, switched='S')\n svntest.actions.run_and_verify_switch(sbox.wc_dir,\n lambda_path,\n sbox.repo_url + \"/iota\",\n expected_output,\n expected_disk,\n expected_status,\n [], True,\n '--ignore-ancestry')\n\n svntest.actions.run_and_verify_svn([\"property '\" + SVN_PROP_MERGEINFO +\n \"' set on '\" + lambda_path + \"'\" +\n \"\\n\"], [], 'ps', SVN_PROP_MERGEINFO,\n '/A/B/lambda:3-4', lambda_path)\n\n expected_output = svntest.wc.State(sbox.wc_dir, {\n \"A/B_COPY_1/lambda\" : Item(status='U '),\n })\n expected_disk.tweak(\"A/B_COPY_1/lambda\",\n contents=\"This is the file 'lambda'.\\n\",\n props={SVN_PROP_MERGEINFO : '/A/B/lambda:3-4'})\n expected_status.tweak(\"A/B_COPY_1/lambda\", switched=None, status=' M')\n svntest.actions.run_and_verify_switch(sbox.wc_dir,\n lambda_path,\n sbox.repo_url + \"/A/B_COPY_1/lambda\",\n expected_output,\n expected_disk,\n expected_status,\n [], True,\n '--ignore-ancestry')\n\n#----------------------------------------------------------------------\n\ndef switch_with_depth(sbox):\n \"basic tests to verify switch along with depth\"\n\n sbox.build(read_only = True)\n\n # Form some paths and URLs required\n wc_dir = sbox.wc_dir\n repo_url = sbox.repo_url\n AD_url = repo_url + '/A/D'\n AB_url = repo_url + '/A/B'\n AB_path = sbox.ospath('A/B')\n\n # Set up expected results of 'switch --depth=empty'\n expected_output = svntest.wc.State(wc_dir, {})\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.tweak('A/B', switched='S')\n expected_status.tweak('A/B/lambda', switched='S')\n expected_status.tweak('A/B/E', switched='S')\n expected_status.tweak('A/B/F', switched='S')\n\n # Do 'switch --depth=empty' and check the results in three ways.\n svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--depth', 'empty', '--ignore-ancestry')\n\n # Set up expected results for reverting 'switch --depth=empty'\n expected_output = svntest.wc.State(wc_dir, {})\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n\n svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--depth', 'empty', '--ignore-ancestry')\n\n # Set up expected results of 'switch --depth=files'\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/lambda' : Item(status='D '),\n 'A/B/gamma' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/B/lambda')\n expected_disk.add({\n 'A/B/gamma' : Item(\"This is the file 'gamma'.\\n\")\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.remove('A/B/lambda')\n expected_status.add({\n 'A/B/gamma' : Item(status=' ', wc_rev=1)\n })\n expected_status.tweak('A/B', switched='S')\n expected_status.tweak('A/B/E', switched='S')\n expected_status.tweak('A/B/F', switched='S')\n\n # Do 'switch --depth=files' and check the results in three ways.\n svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--depth', 'files', '--ignore-ancestry')\n\n # Set up expected results for reverting 'switch --depth=files'\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/gamma' : Item(status='D '),\n 'A/B/lambda' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n\n svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--depth', 'files', '--ignore-ancestry')\n\n # Putting the depth=immediates stuff in a subroutine, because we're\n # going to run it at least twice.\n def sw_depth_imm():\n # Set up expected results of 'switch --depth=immediates'\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/lambda' : Item(status='D '),\n 'A/B/E' : Item(status='D '),\n 'A/B/F' : Item(status='D '),\n 'A/B/gamma' : Item(status='A '),\n 'A/B/G' : Item(status='A '),\n 'A/B/H' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/B/lambda', 'A/B/E/beta', 'A/B/E/alpha',\n 'A/B/E', 'A/B/F')\n expected_disk.add({\n 'A/B/gamma' : Item(\"This is the file 'gamma'.\\n\"),\n 'A/B/G' : Item(),\n 'A/B/H' : Item(),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.remove('A/B/lambda', 'A/B/E/beta', 'A/B/E/alpha',\n 'A/B/E', 'A/B/F')\n expected_status.add({\n 'A/B/gamma' : Item(status=' ', wc_rev=1),\n 'A/B/G' : Item(status=' ', wc_rev=1),\n 'A/B/H' : Item(status=' ', wc_rev=1)\n })\n expected_status.tweak('A/B', switched='S')\n\n # Do 'switch --depth=immediates' and check the results in three ways.\n svntest.actions.run_and_verify_switch(wc_dir, AB_path, AD_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--depth', 'immediates',\n '--ignore-ancestry')\n\n sw_depth_imm()\n\n # Set up expected results for reverting 'switch --depth=immediates'.\n # (Reverting with default [infinite] depth, so that the result is a\n # standard Greek Tree working copy again.)\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/gamma' : Item(status='D '),\n 'A/B/G' : Item(status='D '),\n 'A/B/H' : Item(status='D '),\n 'A/B/lambda' : Item(status='A '),\n 'A/B/E' : Item(status='A '),\n 'A/B/E/alpha' : Item(status='A '),\n 'A/B/E/beta' : Item(status='A '),\n 'A/B/F' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--ignore-ancestry')\n\n # Okay, repeat 'switch --depth=immediates'. (Afterwards we'll\n # 'switch --depth=infinity', to test going all the way.)\n sw_depth_imm()\n\n # Set up expected results of 'switch --depth=infinity'\n expected_output = svntest.wc.State(wc_dir, {\n 'A/B/gamma' : Item(status='D '),\n 'A/B/G' : Item(status='D '),\n 'A/B/H' : Item(status='D '),\n 'A/B/lambda' : Item(status='A '),\n 'A/B/E' : Item(status='A '),\n 'A/B/E/alpha' : Item(status='A '),\n 'A/B/E/beta' : Item(status='A '),\n 'A/B/F' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n\n # Do the 'switch --depth=infinity' and check the results in three ways.\n svntest.actions.run_and_verify_switch(wc_dir, AB_path, AB_url,\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '--depth', 'infinity',\n '--ignore-ancestry')\n\n#----------------------------------------------------------------------\n\ndef switch_to_dir_with_peg_rev(sbox):\n \"switch to dir@peg where dir doesn't exist in HEAD\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n repo_url = sbox.repo_url\n\n # prepare two dirs X and Y in rev. 2\n X_path = sbox.ospath('X')\n Y_path = sbox.ospath('Y')\n svntest.main.run_svn(None, 'mkdir', X_path, Y_path)\n sbox.simple_commit(message='log message')\n\n # change tau in rev. 3\n ADG_path = sbox.ospath('A/D/G')\n tau_path = os.path.join(ADG_path, 'tau')\n svntest.main.file_append(tau_path, \"new line\\n\")\n sbox.simple_commit(message='log message')\n\n # delete A/D/G in rev. 4\n svntest.main.run_svn(None, 'up', wc_dir)\n svntest.main.run_svn(None, 'rm', ADG_path)\n sbox.simple_commit(message='log message')\n\n # Test 1: switch X to A/D/G@2\n ADG_url = repo_url + '/A/D/G'\n expected_output = svntest.wc.State(wc_dir, {\n 'X/pi' : Item(status='A '),\n 'X/rho' : Item(status='A '),\n 'X/tau' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'X' : Item(),\n 'X/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'X/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'X/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'Y' : Item(),\n })\n expected_disk.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_status.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n expected_status.add({\n 'X' : Item(status=' ', wc_rev=2, switched='S'),\n 'X/pi' : Item(status=' ', wc_rev=2),\n 'X/rho' : Item(status=' ', wc_rev=2),\n 'X/tau' : Item(status=' ', wc_rev=2),\n 'Y' : Item(status=' ', wc_rev=3)\n })\n\n # Do the switch to rev. 2 of /A/D/G@3.\n svntest.actions.run_and_verify_switch(wc_dir, X_path, ADG_url + '@3',\n expected_output,\n expected_disk,\n expected_status,\n [], False,\n '-r', '2', '--ignore-ancestry')\n\ndef switch_urls_with_spaces(sbox):\n \"switch file and dir to url containing spaces\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n repo_url = sbox.repo_url\n\n # add file and directory with spaces in their names.\n XYZ_path = sbox.ospath('X Y Z')\n ABC_path = sbox.ospath('A B C')\n svntest.main.run_svn(None, 'mkdir', XYZ_path, ABC_path)\n\n tpm_path = sbox.ospath('tau pau mau')\n bbb_path = sbox.ospath('bar baz bal')\n svntest.main.file_write(tpm_path, \"This is the file 'tau pau mau'.\\n\")\n svntest.main.file_write(bbb_path, \"This is the file 'bar baz bal'.\\n\")\n svntest.main.run_svn(None, 'add', tpm_path, bbb_path)\n\n sbox.simple_commit(message='log message')\n\n # Test 1: switch directory 'A B C' to url 'X Y Z'\n XYZ_url = repo_url + '/X Y Z'\n expected_output = svntest.wc.State(wc_dir, {\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'X Y Z' : Item(),\n 'A B C' : Item(),\n 'tau pau mau' : Item(\"This is the file 'tau pau mau'.\\n\"),\n 'bar baz bal' : Item(\"This is the file 'bar baz bal'.\\n\"),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'X Y Z' : Item(status=' ', wc_rev=2),\n 'A B C' : Item(status=' ', wc_rev=2, switched='S'),\n 'tau pau mau' : Item(status=' ', wc_rev=2),\n 'bar baz bal' : Item(status=' ', wc_rev=2),\n })\n\n svntest.actions.run_and_verify_switch(wc_dir, ABC_path, XYZ_url,\n expected_output,\n expected_disk,\n expected_status,\n [],\n False, '--ignore-ancestry')\n\n # Test 2: switch file 'bar baz bal' to 'tau pau mau'\n tpm_url = repo_url + '/tau pau mau'\n expected_output = svntest.wc.State(wc_dir, {\n 'bar baz bal' : Item(status='U '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'X Y Z' : Item(),\n 'A B C' : Item(),\n 'tau pau mau' : Item(\"This is the file 'tau pau mau'.\\n\"),\n 'bar baz bal' : Item(\"This is the file 'tau pau mau'.\\n\"),\n })\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.add({\n 'X Y Z' : Item(status=' ', wc_rev=2),\n 'A B C' : Item(status=' ', wc_rev=2, switched='S'),\n 'tau pau mau' : Item(status=' ', wc_rev=2),\n 'bar baz bal' : Item(status=' ', wc_rev=2, switched='S'),\n })\n\n svntest.actions.run_and_verify_switch(wc_dir, bbb_path, tpm_url,\n expected_output,\n expected_disk,\n expected_status,\n [],\n False, '--ignore-ancestry')\n\ndef switch_to_dir_with_peg_rev2(sbox):\n \"switch to old rev of now renamed branch\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n repo_url = sbox.repo_url\n\n # prepare dir X in rev. 2\n X_path = sbox.ospath('X')\n svntest.main.run_svn(None, 'mkdir', X_path)\n sbox.simple_commit(message='log message')\n\n # make a change in ADG in rev. 3\n tau_path = sbox.ospath('A/D/G/tau')\n svntest.main.file_append(tau_path, \"extra line\\n\")\n sbox.simple_commit(message='log message')\n\n # Rename ADG to ADY in rev 4\n svntest.main.run_svn(None, 'up', wc_dir)\n ADG_path = sbox.ospath('A/D/G')\n ADY_path = sbox.ospath('A/D/Y')\n svntest.main.run_svn(None, 'mv', ADG_path, ADY_path)\n sbox.simple_commit(message='log message')\n\n # Test switch X to rev 2 of A/D/Y@HEAD\n ADY_url = sbox.repo_url + '/A/D/Y'\n expected_output = svntest.wc.State(wc_dir, {\n 'X/pi' : Item(status='A '),\n 'X/rho' : Item(status='A '),\n 'X/tau' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'X' : Item(),\n 'X/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'X/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'X/tau' : Item(\"This is the file 'tau'.\\n\"),\n 'A/D/Y' : Item(),\n 'A/D/Y/pi' : Item(\"This is the file 'pi'.\\n\"),\n 'A/D/Y/rho' : Item(\"This is the file 'rho'.\\n\"),\n 'A/D/Y/tau' : Item(\"This is the file 'tau'.\\nextra line\\n\"),\n })\n expected_disk.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_status.remove('A/D/G', 'A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n expected_status.add({\n 'X' : Item(status=' ', wc_rev=2, switched='S'),\n 'X/pi' : Item(status=' ', wc_rev=2),\n 'X/rho' : Item(status=' ', wc_rev=2),\n 'X/tau' : Item(status=' ', wc_rev=2),\n 'A/D/Y' : Item(status=' ', wc_rev=4),\n 'A/D/Y/pi' : Item(status=' ', wc_rev=4),\n 'A/D/Y/rho' : Item(status=' ', wc_rev=4),\n 'A/D/Y/tau' : Item(status=' ', wc_rev=4),\n })\n\n svntest.actions.run_and_verify_switch(wc_dir, X_path, ADY_url + '@HEAD',\n expected_output,\n expected_disk,\n expected_status, [], False,\n '-r', '2', '--ignore-ancestry')\n\ndef switch_to_root(sbox):\n \"switch a folder to the root of its repository\"\n\n sbox.build(read_only = True)\n wc_dir = sbox.wc_dir\n repo_url = sbox.repo_url\n\n ADG_path = sbox.ospath('A/D/G')\n\n # Test switch /A/D/G to /\n AD_url = sbox.repo_url + '/A/D'\n expected_output = svntest.wc.State(wc_dir, {\n 'A/D/G/pi' : Item(status='D '),\n 'A/D/G/rho' : Item(status='D '),\n 'A/D/G/tau' : Item(status='D '),\n 'A/D/G/A' : Item(status='A '),\n 'A/D/G/A/B' : Item(status='A '),\n 'A/D/G/A/B/lambda' : Item(status='A '),\n 'A/D/G/A/B/E' : Item(status='A '),\n 'A/D/G/A/B/E/alpha' : Item(status='A '),\n 'A/D/G/A/B/E/beta' : Item(status='A '),\n 'A/D/G/A/B/F' : Item(status='A '),\n 'A/D/G/A/mu' : Item(status='A '),\n 'A/D/G/A/C' : Item(status='A '),\n 'A/D/G/A/D' : Item(status='A '),\n 'A/D/G/A/D/gamma' : Item(status='A '),\n 'A/D/G/A/D/G' : Item(status='A '),\n 'A/D/G/A/D/G/pi' : Item(status='A '),\n 'A/D/G/A/D/G/rho' : Item(status='A '),\n 'A/D/G/A/D/G/tau' : Item(status='A '),\n 'A/D/G/A/D/H' : Item(status='A '),\n 'A/D/G/A/D/H/chi' : Item(status='A '),\n 'A/D/G/A/D/H/omega' : Item(status='A '),\n 'A/D/G/A/D/H/psi' : Item(status='A '),\n 'A/D/G/iota' : Item(status='A '),\n })\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n expected_disk.add_state('A/D/G', svntest.main.greek_state.copy())\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 1)\n expected_status.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n expected_status.add_state('A/D/G',\n svntest.actions.get_virginal_state(wc_dir + '/A/D/G', 1))\n expected_status.tweak('A/D/G', switched = 'S')\n svntest.actions.run_and_verify_switch(wc_dir, ADG_path, sbox.repo_url,\n expected_output,\n expected_disk,\n expected_status,\n [],\n False, '--ignore-ancestry')\n\n#----------------------------------------------------------------------\n# Make sure that switch continue after deleting locally modified\n# directories, as update and merge do.\n\n@Issue(2505)\ndef tolerate_local_mods(sbox):\n \"tolerate deletion of a directory with local mods\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n A_path = sbox.ospath('A')\n L_path = os.path.join(A_path, 'L')\n LM_path = os.path.join(L_path, 'local_mod')\n A_url = sbox.repo_url + '/A'\n A2_url = sbox.repo_url + '/A2'\n\n svntest.actions.run_and_verify_svn(['Committing transaction...\\n',\n 'Committed revision 2.\\n'], [],\n 'cp', '-m', 'make copy', A_url, A2_url)\n\n os.mkdir(L_path)\n svntest.main.run_svn(None, 'add', L_path)\n sbox.simple_commit(message='Commit added folder')\n\n # locally modified unversioned file\n svntest.main.file_write(LM_path, 'Locally modified file.\\n', 'w+')\n\n expected_output = svntest.wc.State(wc_dir, {\n 'A/L' : Item(status=' ', treeconflict='C'),\n })\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.add({\n 'A/L' : Item(),\n 'A/L/local_mod' : Item(contents='Locally modified file.\\n'),\n })\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_status.tweak('', 'iota', wc_rev=1)\n expected_status.tweak('A', switched='S')\n expected_status.add({\n 'A/L' : Item(status='A ', copied='+', treeconflict='C', wc_rev='-')\n })\n\n # Used to fail with locally modified or unversioned files\n svntest.actions.run_and_verify_switch(wc_dir, A_path, A2_url,\n expected_output,\n expected_disk,\n expected_status,\n [],\n False, '--ignore-ancestry')\n\n#----------------------------------------------------------------------\n\n# Detect tree conflicts among files and directories,\n# edited or deleted in a deep directory structure.\n#\n# See use cases 1-3 in notes/tree-conflicts/use-cases.txt for background.\n# Note that we do not try to track renames. The only difference from\n# the behavior of Subversion 1.4 and 1.5 is the conflicted status of the\n# parent directory.\n\n# convenience definitions\nleaf_edit = svntest.deeptrees.deep_trees_leaf_edit\ntree_del = svntest.deeptrees.deep_trees_tree_del\nleaf_del = svntest.deeptrees.deep_trees_leaf_del\n\ndisk_after_leaf_edit = svntest.deeptrees.deep_trees_after_leaf_edit\ndisk_after_leaf_del = svntest.deeptrees.deep_trees_after_leaf_del\ndisk_after_tree_del = svntest.deeptrees.deep_trees_after_tree_del\n\ndeep_trees_conflict_output = svntest.deeptrees.deep_trees_conflict_output\ndeep_trees_conflict_output_skipped = \\\n svntest.deeptrees.deep_trees_conflict_output_skipped\ndeep_trees_status_local_tree_del = \\\n svntest.deeptrees.deep_trees_status_local_tree_del\ndeep_trees_status_local_leaf_edit = \\\n svntest.deeptrees.deep_trees_status_local_leaf_edit\n\nDeepTreesTestCase = svntest.deeptrees.DeepTreesTestCase\n\nj = os.path.join\n\n\ndef tree_conflicts_on_switch_1_1(sbox):\n \"tree conflicts 1.1: tree del, leaf edit on switch\"\n\n sbox.build()\n\n # use case 1, as in notes/tree-conflicts/use-cases.txt\n # 1.1) local tree delete, incoming leaf edit\n\n expected_output = deep_trees_conflict_output.copy()\n expected_output.add({\n 'DDD/D1/D2' : Item(status=' ', treeconflict='U'),\n 'DDD/D1/D2/D3' : Item(status=' ', treeconflict='U'),\n 'DDD/D1/D2/D3/zeta' : Item(status=' ', treeconflict='A'),\n 'DD/D1/D2' : Item(status=' ', treeconflict='U'),\n 'DD/D1/D2/epsilon' : Item(status=' ', treeconflict='A'),\n 'DF/D1/beta' : Item(status=' ', treeconflict='U'),\n 'D/D1/delta' : Item(status=' ', treeconflict='A'),\n 'DDF/D1/D2' : Item(status=' ', treeconflict='U'),\n 'DDF/D1/D2/gamma' : Item(status=' ', treeconflict='U')\n })\n\n expected_disk = svntest.wc.State('', {\n 'F' : Item(),\n 'D' : Item(),\n 'DF' : Item(),\n 'DD' : Item(),\n 'DDF' : Item(),\n 'DDD' : Item(),\n })\n\n # The files delta, epsilon, and zeta are incoming additions, but since\n # they are all within locally deleted trees they should also be schedule\n # for deletion.\n expected_status = deep_trees_status_local_tree_del.copy()\n expected_status.add({\n 'D/D1/delta' : Item(status='D '),\n 'DD/D1/D2/epsilon' : Item(status='D '),\n 'DDD/D1/D2/D3/zeta' : Item(status='D '),\n })\n expected_status.tweak('', switched='S')\n\n # Update to the target rev.\n expected_status.tweak(wc_rev=3)\n\n expected_info = {\n 'F/alpha' : {\n 'Tree conflict' :\n '^local file delete, incoming file edit upon switch'\n + ' Source left: .file.*/F/alpha@2'\n + ' Source right: .file.*/F/alpha@3$',\n },\n 'DF/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DF/D1@2'\n + ' Source right: .dir.*/DF/D1@3$',\n },\n 'DDF/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DDF/D1@2'\n + ' Source right: .dir.*/DDF/D1@3$',\n },\n 'D/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/D/D1@2'\n + ' Source right: .dir.*/D/D1@3$',\n },\n 'DD/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DD/D1@2'\n + ' Source right: .dir.*/DD/D1@3$',\n },\n 'DDD/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DDD/D1@2'\n + ' Source right: .dir.*/DDD/D1@3$',\n },\n }\n\n svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,\n [ DeepTreesTestCase(\"local_tree_del_incoming_leaf_edit\",\n tree_del,\n leaf_edit,\n expected_output,\n expected_disk,\n expected_status,\n expected_info = expected_info) ] )\n\n\n@Issue(3334)\ndef tree_conflicts_on_switch_1_2(sbox):\n \"tree conflicts 1.2: tree del, leaf del on switch\"\n\n sbox.build()\n\n # 1.2) local tree delete, incoming leaf delete\n\n expected_output = deep_trees_conflict_output.copy()\n expected_output.add({\n 'DD/D1/D2' : Item(status=' ', treeconflict='D'),\n 'DDF/D1/D2' : Item(status=' ', treeconflict='U'),\n 'DDF/D1/D2/gamma' : Item(status=' ', treeconflict='D'),\n 'DDD/D1/D2' : Item(status=' ', treeconflict='U'),\n 'DDD/D1/D2/D3' : Item(status=' ', treeconflict='D'),\n 'DF/D1/beta' : Item(status=' ', treeconflict='D'),\n })\n\n expected_disk = svntest.wc.State('', {\n 'F' : Item(),\n 'D' : Item(),\n 'DF' : Item(),\n 'DD' : Item(),\n 'DDF' : Item(),\n 'DDD' : Item(),\n })\n\n expected_status = deep_trees_status_local_tree_del.copy()\n\n # Expect the incoming leaf deletes to actually occur. Even though they\n # are within (or in the case of F/alpha and D/D1 are the same as) the\n # trees locally scheduled for deletion we must still delete them and\n # update the scheduled for deletion items to the target rev. Otherwise\n # once the conflicts are resolved we still have a mixed-rev WC we can't\n # commit without updating...which, you guessed it, raises tree conflicts\n # again, repeat ad infinitum - see issue #3334.\n #\n # Update to the target rev.\n expected_status.tweak(wc_rev=3)\n expected_status.tweak('F/alpha',\n 'D/D1',\n status='! ', wc_rev=None)\n expected_status.tweak('', switched='S')\n # Remove the incoming deletes from status and disk.\n expected_status.remove('DD/D1/D2',\n 'DDD/D1/D2/D3',\n 'DDF/D1/D2/gamma',\n 'DF/D1/beta')\n\n expected_info = {\n 'F/alpha' : {\n 'Tree conflict' :\n '^local file delete, incoming file delete or move upon switch'\n + ' Source left: .file.*/F/alpha@2'\n + ' Source right: .none.*(/F/alpha@3)?$',\n },\n 'DF/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DF/D1@2'\n + ' Source right: .dir.*/DF/D1@3$',\n },\n 'DDF/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DDF/D1@2'\n + ' Source right: .dir.*/DDF/D1@3$',\n },\n 'D/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/D/D1@2'\n + ' Source right: .none.*(/D/D1@3)?$',\n },\n 'DD/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DD/D1@2'\n + ' Source right: .dir.*/DD/D1@3$',\n },\n 'DDD/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir edit upon switch'\n + ' Source left: .dir.*/DDD/D1@2'\n + ' Source right: .dir.*/DDD/D1@3$',\n },\n }\n\n svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,\n [ DeepTreesTestCase(\"local_tree_del_incoming_leaf_del\",\n tree_del,\n leaf_del,\n expected_output,\n expected_disk,\n expected_status,\n expected_info = expected_info) ] )\n\n\n@Issue(3334)\ndef tree_conflicts_on_switch_2_1(sbox):\n \"tree conflicts 2.1: leaf edit, tree del on switch\"\n\n # use case 2, as in notes/tree-conflicts/use-cases.txt\n # 2.1) local leaf edit, incoming tree delete\n\n expected_output = deep_trees_conflict_output\n\n expected_disk = disk_after_leaf_edit.copy()\n\n expected_status = deep_trees_status_local_leaf_edit.copy()\n\n # The expectation on 'alpha' reflects partial progress on issue #3334.\n expected_status.tweak('D/D1',\n 'F/alpha',\n 'DD/D1',\n 'DF/D1',\n 'DDD/D1',\n 'DDF/D1',\n status='A ', copied='+', wc_rev='-')\n # See the status of all the paths *under* the above six subtrees. Only the\n # roots of the added subtrees show as schedule 'A', these childs paths show\n # only that history is scheduled with the commit.\n expected_status.tweak(\n 'DD/D1/D2',\n 'DDD/D1/D2',\n 'DDD/D1/D2/D3',\n 'DF/D1/beta',\n 'DDF/D1/D2',\n 'DDF/D1/D2/gamma',\n copied='+', wc_rev='-')\n expected_status.tweak('', switched='S')\n\n expected_info = {\n 'F/alpha' : {\n 'Tree conflict' :\n '^local file edit, incoming file delete or move upon switch'\n + ' Source left: .file.*/F/alpha@2'\n + ' Source right: .none.*(/F/alpha@3)?$',\n },\n 'DF/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DF/D1@2'\n + ' Source right: .none.*(/DF/D1@3)?$',\n },\n 'DDF/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DDF/D1@2'\n + ' Source right: .none.*(/DDF/D1@3)?$',\n },\n 'D/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/D/D1@2'\n + ' Source right: .none.*(/D/D1@3)?$',\n },\n 'DD/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DD/D1@2'\n + ' Source right: .none.*(/DD/D1@3)?$',\n },\n 'DDD/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DDD/D1@2'\n + ' Source right: .none.*(/DDD/D1@3)?$',\n },\n }\n\n ### D/D1/delta is locally-added during leaf_edit. when tree_del executes,\n ### it will delete D/D1, and the switch reschedules local D/D1 for\n ### local-copy from its original revision. however, right now, we cannot\n ### denote that delta is a local-add rather than a child of that D/D1 copy.\n ### thus, it appears in the status output as a (M)odified child.\n svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,\n [ DeepTreesTestCase(\"local_leaf_edit_incoming_tree_del\",\n leaf_edit,\n tree_del,\n expected_output,\n expected_disk,\n expected_status,\n expected_info = expected_info) ] )\n\n\ndef tree_conflicts_on_switch_2_2(sbox):\n \"tree conflicts 2.2: leaf del, tree del on switch\"\n\n # 2.2) local leaf delete, incoming tree delete\n\n ### Current behaviour fails to show conflicts when deleting\n ### a directory tree that has modifications. (Will be solved\n ### when dirs_same_p() is implemented)\n expected_output = deep_trees_conflict_output\n\n expected_disk = svntest.wc.State('', {\n 'DDF/D1/D2' : Item(),\n 'F' : Item(),\n 'D' : Item(),\n 'DF/D1' : Item(),\n 'DD/D1' : Item(),\n 'DDD/D1/D2' : Item(),\n })\n\n expected_status = svntest.deeptrees.deep_trees_virginal_state.copy()\n expected_status.add({'' : Item(),\n 'F/alpha' : Item()})\n expected_status.tweak(contents=None, status=' ', wc_rev=3)\n expected_status.tweak('', switched='S')\n\n # Expect the incoming tree deletes and the local leaf deletes to mean\n # that all deleted paths are *really* gone, not simply scheduled for\n # deletion.\n expected_status.tweak('DD/D1', 'DF/D1', 'DDF/D1', 'DDD/D1',\n status='A ', copied='+', treeconflict='C',\n wc_rev='-')\n expected_status.tweak('DDF/D1/D2', 'DDD/D1/D2',\n copied='+', wc_rev='-')\n expected_status.tweak('DD/D1/D2', 'DF/D1/beta', 'DDD/D1/D2/D3',\n 'DDF/D1/D2/gamma',\n status='D ', copied='+', wc_rev='-')\n expected_status.tweak('F/alpha', 'D/D1',\n status='! ', treeconflict='C', wc_rev=None)\n\n expected_info = {\n 'F/alpha' : {\n 'Tree conflict' :\n '^local file delete, incoming file delete or move upon switch'\n + ' Source left: .file.*/F/alpha@2'\n + ' Source right: .none.*(/F/alpha@3)?$',\n },\n 'DF/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DF/D1@2'\n + ' Source right: .none.*(/DF/D1@3)?$',\n },\n 'DDF/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DDF/D1@2'\n + ' Source right: .none.*(/DDF/D1@3)?$',\n },\n 'D/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/D/D1@2'\n + ' Source right: .none.*(/D/D1@3)?$',\n },\n 'DD/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DD/D1@2'\n + ' Source right: .none.*(/DD/D1@3)?$',\n },\n 'DDD/D1' : {\n 'Tree conflict' :\n '^local dir edit, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DDD/D1@2'\n + ' Source right: .none.*(/DDD/D1@3)?$',\n },\n }\n\n svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,\n [ DeepTreesTestCase(\"local_leaf_del_incoming_tree_del\",\n leaf_del,\n tree_del,\n expected_output,\n expected_disk,\n expected_status,\n expected_info = expected_info) ] )\n\n\ndef tree_conflicts_on_switch_3(sbox):\n \"tree conflicts 3: tree del, tree del on switch\"\n\n # use case 3, as in notes/tree-conflicts/use-cases.txt\n # local tree delete, incoming tree delete\n\n expected_output = deep_trees_conflict_output\n\n expected_disk = svntest.wc.State('', {\n 'F' : Item(),\n 'D' : Item(),\n 'DF' : Item(),\n 'DD' : Item(),\n 'DDF' : Item(),\n 'DDD' : Item(),\n })\n\n expected_status = deep_trees_status_local_tree_del.copy()\n expected_status.tweak('', switched='S')\n\n # Expect the incoming tree deletes and the local tree deletes to mean\n # that all deleted paths are *really* gone, not simply scheduled for\n # deletion.\n expected_status.tweak('F/alpha',\n 'D/D1',\n 'DD/D1',\n 'DF/D1',\n 'DDD/D1',\n 'DDF/D1',\n status='! ', wc_rev=None)\n # Remove from expected status and disk everything below the deleted paths.\n expected_status.remove('DD/D1/D2',\n 'DF/D1/beta',\n 'DDD/D1/D2',\n 'DDD/D1/D2/D3',\n 'DDF/D1/D2',\n 'DDF/D1/D2/gamma',)\n\n expected_info = {\n 'F/alpha' : {\n 'Tree conflict' :\n '^local file delete, incoming file delete or move upon switch'\n + ' Source left: .file.*/F/alpha@2'\n + ' Source right: .none.*(/F/alpha@3)?$',\n },\n 'DF/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DF/D1@2'\n + ' Source right: .none.*(/DF/D1@3)?$',\n },\n 'DDF/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DDF/D1@2'\n + ' Source right: .none.*(/DDF/D1@3)?$',\n },\n 'D/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/D/D1@2'\n + ' Source right: .none.*(/D/D1@3)?$',\n },\n 'DD/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DD/D1@2'\n + ' Source right: .none.*(/DD/D1@3)?$',\n },\n 'DDD/D1' : {\n 'Tree conflict' :\n '^local dir delete, incoming dir delete or move upon switch'\n + ' Source left: .dir.*/DDD/D1@2'\n + ' Source right: .none.*(/DDD/D1@3)?$',\n },\n }\n\n svntest.deeptrees.deep_trees_run_tests_scheme_for_switch(sbox,\n [ DeepTreesTestCase(\"local_tree_del_incoming_tree_del\",\n tree_del,\n tree_del,\n expected_output,\n expected_disk,\n expected_status,\n expected_info = expected_info) ] )\n\ndef copy_with_switched_subdir(sbox):\n \"copy directory with switched subdir\"\n sbox.build()\n wc_dir = sbox.wc_dir\n D = sbox.ospath('A/D')\n G = os.path.join(D, 'G')\n\n E_url = sbox.repo_url + '/A/B/E'\n R = sbox.ospath('R')\n\n state = svntest.actions.get_virginal_state(wc_dir, 1)\n\n # Verify before switching\n svntest.actions.run_and_verify_status(wc_dir, state)\n\n # Switch A/D/G\n svntest.actions.run_and_verify_svn(None, [], 'switch',\n '--ignore-ancestry', E_url, G)\n\n state.tweak('A/D/G', switched='S')\n state.remove('A/D/G/pi', 'A/D/G/rho', 'A/D/G/tau')\n state.add({\n 'A/D/G/alpha' : Item(status=' ', wc_rev=1),\n 'A/D/G/beta' : Item(status=' ', wc_rev=1),\n })\n svntest.actions.run_and_verify_status(wc_dir, state)\n\n # And now copy A/D and everything below it to R\n svntest.actions.run_and_verify_svn(None, [], 'cp', D, R)\n\n state.add({\n 'R' : Item(status='A ', copied='+', wc_rev='-'),\n 'R/gamma' : Item(status=' ', copied='+', wc_rev='-'),\n 'R/G/alpha' : Item(status=' ', copied='+', wc_rev='-'),\n 'R/G/beta' : Item(status=' ', copied='+', wc_rev='-'),\n 'R/H' : Item(status=' ', copied='+', wc_rev='-'),\n 'R/H/chi' : Item(status=' ', copied='+', wc_rev='-'),\n 'R/H/omega' : Item(status=' ', copied='+', wc_rev='-'),\n 'R/H/psi' : Item(status=' ', copied='+', wc_rev='-'),\n 'R/G' : Item(status='A ', copied='+', wc_rev='-'),\n })\n\n svntest.actions.run_and_verify_status(wc_dir, state)\n\n sbox.simple_commit(message='Commit added folder')\n\n # Additional test, it should commit to R/G/alpha.\n svntest.main.run_svn(None, 'up', wc_dir)\n svntest.main.file_append(sbox.ospath('R/G/alpha'), \"apple\")\n sbox.simple_commit(message='Commit changed file')\n\n # Checkout working copy to verify result\n svntest.main.safe_rmtree(wc_dir, 1)\n svntest.actions.run_and_verify_svn(None, [],\n 'checkout',\n sbox.repo_url, wc_dir)\n\n # Switch A/D/G again to recreate state\n svntest.actions.run_and_verify_svn(None, [], 'switch',\n '--ignore-ancestry', E_url, G)\n\n # Clear the statuses\n state.tweak(status=' ', copied=None, wc_rev='3', entry_status=None)\n # But reset the switched state\n state.tweak('A/D/G', switched='S')\n\n svntest.actions.run_and_verify_status(wc_dir, state)\n\n@Issue(3871)\ndef up_to_old_rev_with_subtree_switched_to_root(sbox):\n \"up to old rev with subtree switched to root\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n\n # Some paths we'll care about.\n A_path = sbox.ospath('A')\n branch_path = sbox.ospath('branch')\n\n # Starting with a vanilla greek tree, create a branch of A, switch\n # that branch to the root of the repository, then update the WC to\n # r1.\n svntest.actions.run_and_verify_svn(None, [], 'copy', A_path,\n branch_path)\n svntest.actions.run_and_verify_svn(None, [], 'ci', wc_dir,\n '-m', 'Create a branch')\n svntest.actions.run_and_verify_svn(None, [], 'sw', sbox.repo_url,\n branch_path, '--ignore-ancestry')\n\n # Now update the WC to r1.\n svntest.actions.run_and_verify_svn(None, [], 'up', '-r1', wc_dir)\n\ndef different_node_kind(sbox):\n \"switch to a different node kind\"\n sbox.build(read_only = True)\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n pristine_disk = svntest.main.greek_state\n pristine_status = svntest.actions.get_virginal_state(sbox.wc_dir, 1)\n expected_disk = pristine_disk.copy()\n expected_status = pristine_status.copy()\n\n def switch_to_dir(sbox, rel_url, rel_path):\n full_url = sbox.repo_url + '/' + rel_url\n full_path = sbox.ospath(rel_path)\n expected_disk.remove(rel_path)\n expected_disk.add({ rel_path : pristine_disk.desc[rel_url] })\n expected_disk.add_state(rel_path, pristine_disk.subtree(rel_url))\n expected_status.tweak(rel_path, switched='S')\n expected_status.add_state(rel_path, pristine_status.subtree(rel_url))\n svntest.actions.run_and_verify_switch(sbox.wc_dir, full_path, full_url,\n None, expected_disk, expected_status,\n [], False,\n '--ignore-ancestry')\n svntest.actions.run_and_verify_svn(None, [], 'info', full_path)\n if not os.path.isdir(full_path):\n raise svntest.Failure\n\n def switch_to_file(sbox, rel_url, rel_path):\n full_url = sbox.repo_url + '/' + rel_url\n full_path = sbox.ospath(rel_path)\n expected_disk.remove_subtree(rel_path)\n expected_disk.add({ rel_path : pristine_disk.desc[rel_url] })\n expected_status.remove_subtree(rel_path)\n expected_status.add({ rel_path : pristine_status.desc[rel_url] })\n expected_status.tweak(rel_path, switched='S')\n svntest.actions.run_and_verify_switch(sbox.wc_dir, full_path, full_url,\n None, expected_disk, expected_status,\n [], False,\n '--ignore-ancestry')\n svntest.actions.run_and_verify_svn(None, [], 'info', full_path)\n if not os.path.isfile(full_path):\n raise svntest.Failure\n\n # Switch two files to dirs and two dirs to files.\n # 'A/C' is an empty dir; 'A/D/G' is a non-empty dir.\n switch_to_dir(sbox, 'A/C', 'iota')\n switch_to_dir(sbox, 'A/D/G', 'A/D/gamma')\n switch_to_file(sbox, 'iota', 'A/C')\n switch_to_file(sbox, 'A/D/gamma', 'A/D/G')\n\n@Issue(3332, 3333)\ndef switch_to_spaces(sbox):\n \"switch to a directory with spaces in its name\"\n\n sbox.build()\n wc_dir = sbox.wc_dir\n repo_url = sbox.repo_url\n\n # Paths are normalized in the command processing, so %20 is equivalent to ' '\n svntest.actions.run_and_verify_svn(None, [],\n 'cp', repo_url + '/A',\n repo_url + '/A%20with space',\n '-m', '')\n\n svntest.actions.run_and_verify_svn(None, [],\n 'mv', repo_url + '/A%20with space',\n repo_url + '/A with%20more spaces',\n '-m', '')\n\n expected_status = svntest.actions.get_virginal_state(wc_dir, 3)\n expected_status.tweak('A', switched='S')\n expected_status.tweak('', 'iota', wc_rev=1)\n\n svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'),\n repo_url + '/A%20with more%20spaces',\n None, None, expected_status)\n\ndef switch_across_replacement(sbox):\n \"switch across a node replacement\"\n sbox.build()\n os.chdir(sbox.wc_dir)\n sbox.wc_dir = ''\n\n # replacement\n sbox.simple_rm('A/mu')\n sbox.simple_append('A/mu', \"This is the file 'mu'.\\n\", truncate=True)\n sbox.simple_add('A/mu')\n sbox.simple_commit() # r2\n\n # When 'switch' of a dir brings in a replacement of a child file with no\n # textual difference and ignoring ancestry, the switch doesn't report any\n # incoming change at all, (and so won't raise a tree conflict if there is\n # a local mod). 'update' on the other hand does report the replacement\n # as expected.\n\n # This test FAILs when using a Subversion 1.0-1.7 svnserve.\n\n expected_output = svntest.wc.State(sbox.wc_dir, {\n 'A/mu' : Item(status='A ', prev_status='D '),\n })\n svntest.actions.run_and_verify_update(sbox.wc_dir,\n expected_output, None, None,\n [], False,\n '-r1')\n svntest.actions.run_and_verify_update(sbox.wc_dir,\n expected_output, None, None,\n [], False,\n '-r2')\n svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'), '^/A',\n expected_output, None, None,\n [], False,\n '-r1')\n\n@Issue(1975)\ndef switch_keywords(sbox):\n \"switch and svn:keywords\"\n sbox.build()\n gamma_path = sbox.ospath('A/D/gamma')\n psi_path = sbox.ospath('A/D/H/psi')\n\n sbox.simple_propset('svn:keywords', 'URL', 'A/D/gamma')\n svntest.main.file_write(gamma_path, \"$URL$\\n\")\n sbox.simple_propset('svn:keywords', 'URL', 'A/D/H/psi')\n svntest.main.file_write(psi_path, \"$URL$\\n\")\n sbox.simple_commit()\n\n expected_disk = svntest.main.greek_state.copy()\n expected_disk.tweak('A/D/gamma',\n contents=\"$URL: %s/A/D/gamma $\\n\" % sbox.repo_url)\n expected_disk.tweak('A/D/H/psi',\n contents=\"$URL: %s/A/D/H/psi $\\n\" % sbox.repo_url)\n\n svntest.actions.run_and_verify_update(sbox.wc_dir,\n None, expected_disk, None)\n sbox.simple_copy('A', 'A_copy')\n sbox.simple_commit()\n sbox.simple_update()\n\n # Next, we're going to switch A to A_copy, and expect keywords\n # in the switched files gamma and psi to be updated accordingly.\n\n expected_disk.add({\n 'A_copy/D/H/chi' : Item(contents=\"This is the file 'chi'.\\n\"),\n 'A_copy/D/H/psi' : Item(contents=\"$URL: %s/A_copy/D/H/psi $\\n\"\n % sbox.repo_url),\n 'A_copy/D/H/omega' : Item(contents=\"This is the file 'omega'.\\n\"),\n 'A_copy/D/G/pi' : Item(contents=\"This is the file 'pi'.\\n\"),\n 'A_copy/D/G/tau' : Item(contents=\"This is the file 'tau'.\\n\"),\n 'A_copy/D/G/rho' : Item(contents=\"This is the file 'rho'.\\n\"),\n 'A_copy/D/gamma' : Item(contents=\"$URL: %s/A_copy/D/gamma $\\n\"\n % sbox.repo_url),\n 'A_copy/B/F' : Item(),\n 'A_copy/B/E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'A_copy/B/E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'A_copy/B/lambda' : Item(contents=\"This is the file 'lambda'.\\n\"),\n 'A_copy/mu' : Item(contents=\"This is the file 'mu'.\\n\"),\n 'A_copy/C' : Item(),\n })\n\n # update expected URL for switched gamma\n expected_disk.tweak('A/D/gamma',\n contents=\"$URL: %s/A_copy/D/gamma $\\n\" % sbox.repo_url)\n\n # leave gamma unmodified, locally modify psi\n svntest.main.file_write(psi_path, \"$URL$\\nnew line\\n\")\n # update expected URL for switched psi\n expected_disk.tweak('A/D/H/psi',\n contents=\"$URL: %s/A_copy/D/H/psi $\\nnew line\\n\"\n % sbox.repo_url)\n\n expected_status = svntest.actions.get_virginal_state(sbox.wc_dir, 3)\n expected_status.add({\n 'A_copy' : Item(status=' ', wc_rev='3'),\n 'A_copy/mu' : Item(status=' ', wc_rev='3'),\n 'A_copy/D' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/H' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/H/psi' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/H/chi' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/H/omega' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/gamma' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/G' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/G/rho' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/G/tau' : Item(status=' ', wc_rev='3'),\n 'A_copy/D/G/pi' : Item(status=' ', wc_rev='3'),\n 'A_copy/B' : Item(status=' ', wc_rev='3'),\n 'A_copy/B/E' : Item(status=' ', wc_rev='3'),\n 'A_copy/B/E/alpha' : Item(status=' ', wc_rev='3'),\n 'A_copy/B/E/beta' : Item(status=' ', wc_rev='3'),\n 'A_copy/B/F' : Item(status=' ', wc_rev='3'),\n 'A_copy/B/lambda' : Item(status=' ', wc_rev='3'),\n 'A_copy/C' : Item(status=' ', wc_rev='3'),\n })\n expected_status.tweak('A', switched='S')\n expected_status.tweak('A/D/H/psi', status='M ')\n\n # both gamma and psi should have update URLs after the switch\n svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath('A'), '^/A_copy',\n None, expected_disk, expected_status)\n\n@Issue(4524)\ndef switch_moves(sbox):\n \"switch moves on wc checkpoint\"\n\n sbox.build()\n\n sbox.simple_move('A/B', 'B')\n sbox.simple_rm('A')\n\n branch_url = sbox.repo_url + '/branch'\n\n svntest.actions.run_and_verify_svn(None, [],\n 'cp', sbox.wc_dir, branch_url,\n '-m', '')\n\n expected_disk = svntest.wc.State('', {\n 'B/E/alpha' : Item(contents=\"This is the file 'alpha'.\\n\"),\n 'B/E/beta' : Item(contents=\"This is the file 'beta'.\\n\"),\n 'B/lambda' : Item(contents=\"This is the file 'lambda'.\\n\"),\n 'B/F' : Item(),\n 'iota' : Item(contents=\"This is the file 'iota'.\\n\"),\n })\n\n expected_status = svntest.wc.State(sbox.wc_dir, {\n '' : Item(status=' ', wc_rev='2'),\n 'B' : Item(status='R ', copied='+', treeconflict='C', wc_rev='-'),\n 'B/lambda' : Item(status=' ', copied='+', wc_rev='-'),\n 'B/F' : Item(status=' ', copied='+', wc_rev='-'),\n 'B/E' : Item(status=' ', copied='+', wc_rev='-'),\n 'B/E/beta' : Item(status=' ', copied='+', wc_rev='-'),\n 'B/E/alpha' : Item(status=' ', copied='+', wc_rev='-'),\n 'A' : Item(status='! ', treeconflict='C'),\n 'iota' : Item(status=' ', wc_rev='2'),\n })\n\n # In Subversion 1.8 this scenario causes an Sqlite row not found error.\n # It would be nice if we could handle the tree conflict more intelligent, as\n # the working copy matches the incomming change.\n svntest.actions.run_and_verify_switch(sbox.wc_dir, sbox.ospath(''), branch_url,\n None, expected_disk, expected_status)\n\n\n########################################################################\n# Run the tests\n\n# list all tests here, starting with None:\ntest_list = [ None,\n routine_switching,\n commit_switched_things,\n full_update,\n full_rev_update,\n update_switched_things,\n rev_update_switched_things,\n log_switched_file,\n delete_subdir,\n file_dir_file,\n nonrecursive_switching,\n failed_anchor_is_target,\n bad_intermediate_urls,\n obstructed_switch,\n commit_mods_below_switch,\n refresh_read_only_attribute,\n switch_change_repos_root,\n forced_switch,\n forced_switch_failures,\n switch_scheduled_add,\n mergeinfo_switch_elision,\n switch_with_obstructing_local_adds,\n switch_with_depth,\n switch_to_dir_with_peg_rev,\n switch_urls_with_spaces,\n switch_to_dir_with_peg_rev2,\n switch_to_root,\n tolerate_local_mods,\n tree_conflicts_on_switch_1_1,\n tree_conflicts_on_switch_1_2,\n tree_conflicts_on_switch_2_1,\n tree_conflicts_on_switch_2_2,\n tree_conflicts_on_switch_3,\n copy_with_switched_subdir,\n up_to_old_rev_with_subtree_switched_to_root,\n different_node_kind,\n switch_to_spaces,\n switch_across_replacement,\n switch_keywords,\n switch_moves,\n ]\n\nif __name__ == '__main__':\n svntest.main.run_tests(test_list)\n # NOTREACHED\n\n\n### End of file.\n", "id": "4055536", "language": "Python", "matching_score": 2.9533352851867676, "max_stars_count": 1, "path": "subversion/tests/cmdline/switch_tests.py" }, { "content": "#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\nimport unittest, setup_path, os, sys\nfrom sys import version_info # For Python version check\nif version_info[0] >= 3:\n # Python >=3.0\n from io import StringIO\nelse:\n # Python <3.0\n from StringIO import StringIO\nfrom svn import core, repos, fs, delta\nfrom svn.core import SubversionException\nimport utils\n\nclass ChangeReceiver(delta.Editor):\n \"\"\"A delta editor which saves textdeltas for later use\"\"\"\n\n def __init__(self, src_root, tgt_root):\n self.src_root = src_root\n self.tgt_root = tgt_root\n self.textdeltas = []\n\n def apply_textdelta(self, file_baton, base_checksum, pool=None):\n def textdelta_handler(textdelta):\n if textdelta is not None:\n self.textdeltas.append(textdelta)\n return textdelta_handler\n\nclass DumpStreamParser(repos.ParseFns3):\n def __init__(self):\n repos.ParseFns3.__init__(self)\n self.ops = []\n def magic_header_record(self, version, pool=None):\n self.ops.append((\"magic-header\", version))\n def uuid_record(self, uuid, pool=None):\n self.ops.append((\"uuid\", uuid))\n def new_revision_record(self, headers, pool=None):\n rev = int(headers[repos.DUMPFILE_REVISION_NUMBER])\n self.ops.append((\"new-revision\", rev))\n return rev\n def close_revision(self, revision_baton):\n self.ops.append((\"close-revision\", revision_baton))\n def new_node_record(self, headers, revision_baton, pool=None):\n node = headers[repos.DUMPFILE_NODE_PATH]\n self.ops.append((\"new-node\", revision_baton, node))\n return (revision_baton, node)\n def close_node(self, node_baton):\n self.ops.append((\"close-node\", node_baton[0], node_baton[1]))\n def set_revision_property(self, revision_baton, name, value):\n self.ops.append((\"set-revision-prop\", revision_baton, name, value))\n def set_node_property(self, node_baton, name, value):\n self.ops.append((\"set-node-prop\", node_baton[0], node_baton[1], name, value))\n def remove_node_props(self, node_baton):\n self.ops.append((\"remove-node-props\", node_baton[0], node_baton[1]))\n def delete_node_property(self, node_baton, name):\n self.ops.append((\"delete-node-prop\", node_baton[0], node_baton[1], name))\n def apply_textdelta(self, node_baton):\n self.ops.append((\"apply-textdelta\", node_baton[0], node_baton[1]))\n return None\n def set_fulltext(self, node_baton):\n self.ops.append((\"set-fulltext\", node_baton[0], node_baton[1]))\n return None\n\n\ndef _authz_callback(root, path, pool):\n \"A dummy authz callback which always returns success.\"\n return 1\n\nclass SubversionRepositoryTestCase(unittest.TestCase):\n \"\"\"Test cases for the Subversion repository layer\"\"\"\n\n def setUp(self):\n \"\"\"Load a Subversion repository\"\"\"\n self.temper = utils.Temper()\n (self.repos, self.repos_path, _) = self.temper.alloc_known_repo(\n 'trac/versioncontrol/tests/svnrepos.dump', suffix='-repository')\n self.fs = repos.fs(self.repos)\n self.rev = fs.youngest_rev(self.fs)\n\n def tearDown(self):\n self.fs = None\n self.repos = None\n self.temper.cleanup()\n\n def test_cease_invocation(self):\n \"\"\"Test returning SVN_ERR_CEASE_INVOCATION from a callback\"\"\"\n\n revs = []\n def history_lookup(path, rev, pool):\n revs.append(rev)\n raise core.SubversionException(apr_err=core.SVN_ERR_CEASE_INVOCATION,\n message=\"Hi from history_lookup\")\n\n repos.history2(self.fs, '/trunk/README2.txt', history_lookup, None, 0,\n self.rev, True)\n self.assertEqual(len(revs), 1)\n\n def test_create(self):\n \"\"\"Make sure that repos.create doesn't segfault when we set fs-type\n using a config hash\"\"\"\n fs_config = { \"fs-type\": \"fsfs\" }\n for i in range(5):\n path = self.temper.alloc_empty_dir(suffix='-repository-create%d' % i)\n repos.create(path, \"\", \"\", None, fs_config)\n\n def test_dump_fs2(self):\n \"\"\"Test the dump_fs2 function\"\"\"\n\n self.callback_calls = 0\n\n def is_cancelled():\n self.callback_calls += 1\n return None\n\n dumpstream = StringIO()\n feedbackstream = StringIO()\n repos.dump_fs2(self.repos, dumpstream, feedbackstream, 0, self.rev, 0, 0,\n is_cancelled)\n\n # Check that we can dump stuff\n dump = dumpstream.getvalue()\n feedback = feedbackstream.getvalue()\n expected_feedback = \"* Dumped revision \" + str(self.rev)\n self.assertEquals(dump.count(\"Node-path: trunk/README.txt\"), 2)\n self.assertEquals(feedback.count(expected_feedback), 1)\n self.assertEquals(self.callback_calls, 13)\n\n # Check that the dump can be cancelled\n self.assertRaises(SubversionException, repos.dump_fs2,\n self.repos, dumpstream, feedbackstream, 0, self.rev, 0, 0, lambda: 1)\n\n dumpstream.close()\n feedbackstream.close()\n\n # Check that the dump fails when the dumpstream is closed\n self.assertRaises(ValueError, repos.dump_fs2,\n self.repos, dumpstream, feedbackstream, 0, self.rev, 0, 0, None)\n\n dumpstream = StringIO()\n feedbackstream = StringIO()\n\n # Check that we can grab the feedback stream, but not the dumpstream\n repos.dump_fs2(self.repos, None, feedbackstream, 0, self.rev, 0, 0, None)\n feedback = feedbackstream.getvalue()\n self.assertEquals(feedback.count(expected_feedback), 1)\n\n # Check that we can grab the dumpstream, but not the feedbackstream\n repos.dump_fs2(self.repos, dumpstream, None, 0, self.rev, 0, 0, None)\n dump = dumpstream.getvalue()\n self.assertEquals(dump.count(\"Node-path: trunk/README.txt\"), 2)\n\n # Check that we can ignore both the dumpstream and the feedbackstream\n repos.dump_fs2(self.repos, dumpstream, None, 0, self.rev, 0, 0, None)\n self.assertEquals(feedback.count(expected_feedback), 1)\n\n # FIXME: The Python bindings don't check for 'NULL' values for\n # svn_repos_t objects, so the following call segfaults\n #repos.dump_fs2(None, None, None, 0, self.rev, 0, 0, None)\n\n def test_parse_fns3(self):\n self.cancel_calls = 0\n def is_cancelled():\n self.cancel_calls += 1\n return None\n dump_path = os.path.join(os.path.dirname(sys.argv[0]),\n \"trac/versioncontrol/tests/svnrepos.dump\")\n stream = open(dump_path)\n dsp = DumpStreamParser()\n ptr, baton = repos.make_parse_fns3(dsp)\n repos.parse_dumpstream3(stream, ptr, baton, False, is_cancelled)\n stream.close()\n self.assertEqual(self.cancel_calls, 76)\n expected_list = [\n (\"magic-header\", 2),\n ('uuid', '92ea810a-adf3-0310-b540-bef912dcf5ba'),\n ('new-revision', 0),\n ('set-revision-prop', 0, 'svn:date', '2005-04-01T09:57:41.312767Z'),\n ('close-revision', 0),\n ('new-revision', 1),\n ('set-revision-prop', 1, 'svn:log', 'Initial directory layout.'),\n ('set-revision-prop', 1, 'svn:author', 'john'),\n ('set-revision-prop', 1, 'svn:date', '2005-04-01T10:00:52.353248Z'),\n ('new-node', 1, 'branches'),\n ('remove-node-props', 1, 'branches'),\n ('close-node', 1, 'branches'),\n ('new-node', 1, 'tags'),\n ('remove-node-props', 1, 'tags'),\n ('close-node', 1, 'tags'),\n ('new-node', 1, 'trunk'),\n ('remove-node-props', 1, 'trunk'),\n ('close-node', 1, 'trunk'),\n ('close-revision', 1),\n ('new-revision', 2),\n ('set-revision-prop', 2, 'svn:log', 'Added README.'),\n ('set-revision-prop', 2, 'svn:author', 'john'),\n ('set-revision-prop', 2, 'svn:date', '2005-04-01T13:12:18.216267Z'),\n ('new-node', 2, 'trunk/README.txt'),\n ('remove-node-props', 2, 'trunk/README.txt'),\n ('set-fulltext', 2, 'trunk/README.txt'),\n ('close-node', 2, 'trunk/README.txt'),\n ('close-revision', 2), ('new-revision', 3),\n ('set-revision-prop', 3, 'svn:log', 'Fixed README.\\n'),\n ('set-revision-prop', 3, 'svn:author', 'kate'),\n ('set-revision-prop', 3, 'svn:date', '2005-04-01T13:24:58.234643Z'),\n ('new-node', 3, 'trunk/README.txt'),\n ('remove-node-props', 3, 'trunk/README.txt'),\n ('set-node-prop', 3, 'trunk/README.txt', 'svn:mime-type', 'text/plain'),\n ('set-node-prop', 3, 'trunk/README.txt', 'svn:eol-style', 'native'),\n ('set-fulltext', 3, 'trunk/README.txt'),\n ('close-node', 3, 'trunk/README.txt'), ('close-revision', 3),\n ]\n # Compare only the first X nodes described in the expected list - otherwise\n # the comparison list gets too long.\n self.assertEqual(dsp.ops[:len(expected_list)], expected_list)\n\n def test_get_logs(self):\n \"\"\"Test scope of get_logs callbacks\"\"\"\n logs = []\n def addLog(paths, revision, author, date, message, pool):\n if paths is not None:\n logs.append(paths)\n\n # Run get_logs\n repos.get_logs(self.repos, ['/'], self.rev, 0, True, 0, addLog)\n\n # Count and verify changes\n change_count = 0\n for log in logs:\n for path_changed in log.values():\n change_count += 1\n path_changed.assert_valid()\n self.assertEqual(logs[2][\"/tags/v1.1\"].action, \"A\")\n self.assertEqual(logs[2][\"/tags/v1.1\"].copyfrom_path, \"/branches/v1x\")\n self.assertEqual(len(logs), 12)\n self.assertEqual(change_count, 19)\n\n def test_dir_delta(self):\n \"\"\"Test scope of dir_delta callbacks\"\"\"\n # Run dir_delta\n this_root = fs.revision_root(self.fs, self.rev)\n prev_root = fs.revision_root(self.fs, self.rev-1)\n editor = ChangeReceiver(this_root, prev_root)\n e_ptr, e_baton = delta.make_editor(editor)\n repos.dir_delta(prev_root, '', '', this_root, '', e_ptr, e_baton,\n _authz_callback, 1, 1, 0, 0)\n\n # Check results.\n # Ignore the order in which the editor delivers the two sibling files.\n self.assertEqual(set([editor.textdeltas[0].new_data,\n editor.textdeltas[1].new_data]),\n set([\"This is a test.\\n\", \"A test.\\n\"]))\n self.assertEqual(len(editor.textdeltas), 2)\n\n def test_unnamed_editor(self):\n \"\"\"Test editor object without reference from interpreter\"\"\"\n # Check that the delta.Editor object has proper lifetime. Without\n # increment of the refcount in make_baton, the object was destroyed\n # immediately because the interpreter does not hold a reference to it.\n this_root = fs.revision_root(self.fs, self.rev)\n prev_root = fs.revision_root(self.fs, self.rev-1)\n e_ptr, e_baton = delta.make_editor(ChangeReceiver(this_root, prev_root))\n repos.dir_delta(prev_root, '', '', this_root, '', e_ptr, e_baton,\n _authz_callback, 1, 1, 0, 0)\n\n def test_retrieve_and_change_rev_prop(self):\n \"\"\"Test playing with revprops\"\"\"\n self.assertEqual(repos.fs_revision_prop(self.repos, self.rev, \"svn:log\",\n _authz_callback),\n \"''(a few years later)'' Argh... v1.1 was buggy, \"\n \"after all\")\n\n # We expect this to complain because we have no pre-revprop-change\n # hook script for the repository.\n self.assertRaises(SubversionException, repos.fs_change_rev_prop3,\n self.repos, self.rev, \"jrandom\", \"svn:log\",\n \"Youngest revision\", True, True, _authz_callback)\n\n repos.fs_change_rev_prop3(self.repos, self.rev, \"jrandom\", \"svn:log\",\n \"Youngest revision\", False, False,\n _authz_callback)\n\n self.assertEqual(repos.fs_revision_prop(self.repos, self.rev, \"svn:log\",\n _authz_callback),\n \"Youngest revision\")\n\n def freeze_body(self, pool):\n self.freeze_invoked += 1\n\n def test_freeze(self):\n \"\"\"Test repository freeze\"\"\"\n\n self.freeze_invoked = 0\n repos.freeze([self.repos_path], self.freeze_body)\n self.assertEqual(self.freeze_invoked, 1)\n\n def test_lock_unlock(self):\n \"\"\"Basic lock/unlock\"\"\"\n\n access = fs.create_access('jrandom')\n fs.set_access(self.fs, access)\n fs.lock(self.fs, '/trunk/README.txt', None, None, 0, 0, self.rev, False)\n try:\n fs.lock(self.fs, '/trunk/README.txt', None, None, 0, 0, self.rev, False)\n except core.SubversionException as exc:\n self.assertEqual(exc.apr_err, core.SVN_ERR_FS_PATH_ALREADY_LOCKED)\n fs.lock(self.fs, '/trunk/README.txt', None, None, 0, 0, self.rev, True)\n\n self.calls = 0\n self.errors = 0\n def unlock_callback(path, lock, err, pool):\n self.assertEqual(path, '/trunk/README.txt')\n self.assertEqual(lock, None)\n self.calls += 1\n if err != None:\n self.assertEqual(err.apr_err, core.SVN_ERR_FS_NO_SUCH_LOCK)\n self.errors += 1\n\n the_lock = fs.get_lock(self.fs, '/trunk/README.txt')\n fs.unlock_many(self.fs, {'/trunk/README.txt':the_lock.token}, False,\n unlock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.errors, 0)\n\n self.calls = 0\n fs.unlock_many(self.fs, {'/trunk/README.txt':the_lock.token}, False,\n unlock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.errors, 1)\n\n self.locks = 0\n def lock_callback(path, lock, err, pool):\n self.assertEqual(path, '/trunk/README.txt')\n if lock != None:\n self.assertEqual(lock.owner, 'jrandom')\n self.locks += 1\n self.calls += 1\n if err != None:\n self.assertEqual(err.apr_err, core.SVN_ERR_FS_PATH_ALREADY_LOCKED)\n self.errors += 1\n \n self.calls = 0\n self.errors = 0\n target = fs.lock_target_create(None, self.rev)\n fs.lock_many(self.fs, {'trunk/README.txt':target},\n None, False, 0, False, lock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.locks, 1)\n self.assertEqual(self.errors, 0)\n\n self.calls = 0\n self.locks = 0\n fs.lock_many(self.fs, {'trunk/README.txt':target},\n None, False, 0, False, lock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.locks, 0)\n self.assertEqual(self.errors, 1)\n\n self.calls = 0\n self.errors = 0\n the_lock = fs.get_lock(self.fs, '/trunk/README.txt')\n repos.fs_unlock_many(self.repos, {'trunk/README.txt':the_lock.token},\n False, unlock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.errors, 0)\n\n self.calls = 0\n repos.fs_unlock_many(self.repos, {'trunk/README.txt':the_lock.token},\n False, unlock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.errors, 1)\n\n self.calls = 0\n self.errors = 0\n repos.fs_lock_many(self.repos, {'trunk/README.txt':target},\n None, False, 0, False, lock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.locks, 1)\n self.assertEqual(self.errors, 0)\n\n self.calls = 0\n self.locks = 0\n repos.fs_lock_many(self.repos, {'trunk/README.txt':target},\n None, False, 0, False, lock_callback)\n self.assertEqual(self.calls, 1)\n self.assertEqual(self.locks, 0)\n self.assertEqual(self.errors, 1)\n\ndef suite():\n return unittest.defaultTestLoader.loadTestsFromTestCase(\n SubversionRepositoryTestCase)\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(suite())\n", "id": "6892927", "language": "Python", "matching_score": 3.3936004638671875, "max_stars_count": 1, "path": "subversion/bindings/swig/python/tests/repository.py" }, { "content": "#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\nimport unittest, setup_path\nimport svn.delta\nimport svn.core\nfrom sys import version_info # For Python version check\nif version_info[0] >= 3:\n # Python >=3.0\n from io import StringIO\nelse:\n # Python <3.0\n from cStringIO import StringIO\n\n# Test case for svn.delta\nclass DeltaTestCase(unittest.TestCase):\n\n def testTxWindowHandler(self):\n \"\"\"Test tx_invoke_window_handler\"\"\"\n src_stream = StringIO(\"hello world\")\n target_stream = StringIO(\"bye world\")\n\n # Invoke the window_handler using a helper function\n window_handler, baton = \\\n svn.delta.tx_apply(src_stream, target_stream, None)\n svn.delta.tx_invoke_window_handler(window_handler, None, baton)\n\n # Invoke the window_handler directly (easier!)\n window_handler, baton = \\\n svn.delta.tx_apply(src_stream, target_stream, None)\n window_handler(None, baton)\n\n def testTxdeltaWindowT(self):\n \"\"\"Test the svn_txdelta_window_t wrapper.\"\"\"\n a = StringIO(\"abc\\ndef\\n\")\n b = StringIO(\"def\\nghi\\n\")\n\n delta_stream = svn.delta.svn_txdelta(a, b)\n window = svn.delta.svn_txdelta_next_window(delta_stream)\n\n self.assert_(window.sview_offset + window.sview_len <= len(a.getvalue()))\n self.assert_(window.tview_len <= len(b.getvalue()))\n self.assert_(len(window.new_data) > 0)\n self.assertEqual(window.num_ops, len(window.ops))\n self.assertEqual(window.src_ops, len([op for op in window.ops\n if op.action_code == svn.delta.svn_txdelta_source]))\n\n # Check that the ops inherit the window's pool\n self.assertEqual(window.ops[0]._parent_pool, window._parent_pool)\n\ndef suite():\n return unittest.defaultTestLoader.loadTestsFromTestCase(DeltaTestCase)\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(suite())\n", "id": "1868416", "language": "Python", "matching_score": 0.9585857391357422, "max_stars_count": 7, "path": "subversion/bindings/swig/python/tests/delta.py" }, { "content": "#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n# gen_make.py -- generate makefiles and dependencies\n#\n\nimport os\nimport stat\nimport sys\ntry:\n # Python >=3.0\n import configparser\nexcept ImportError:\n # Python <3.0\n import ConfigParser as configparser\n\nif sys.version_info[0] >= 3:\n # Python >=3.0\n from io import StringIO\nelse:\n # Python <3.0\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n\nimport ezt\n\nimport gen_base\nimport generator.swig.header_wrappers\nimport generator.swig.checkout_swig_header\nimport generator.swig.external_runtime\n\nfrom gen_base import build_path_join, build_path_strip, build_path_splitfile, \\\n build_path_basename, build_path_dirname, build_path_retreat, unique\n\n\ndef _normstr(x):\n if os.sep == '/':\n return os.path.normpath(str(x))\n else:\n return os.path.normpath(str(x).replace('/', os.sep)).replace(os.sep, '/')\n\nclass Generator(gen_base.GeneratorBase):\n\n _extension_map = {\n ('exe', 'target'): '$(EXEEXT)',\n ('exe', 'object'): '.lo',\n ('lib', 'target'): '.la',\n ('lib', 'object'): '.lo',\n ('pyd', 'target'): '.la',\n ('pyd', 'object'): '.lo',\n ('so', 'target'): '.la',\n ('so', 'object'): '.lo',\n }\n\n def __init__(self, fname, verfname, options=None):\n gen_base.GeneratorBase.__init__(self, fname, verfname, options)\n self.assume_shared_libs = False\n if ('--assume-shared-libs', '') in options:\n self.assume_shared_libs = True\n\n def write(self):\n install_deps = self.graph.get_deps(gen_base.DT_INSTALL)\n install_sources = self.graph.get_all_sources(gen_base.DT_INSTALL)\n\n cp = configparser.ConfigParser()\n cp.read('gen-make.opts')\n if cp.has_option('options', '--installed-libs'):\n self.installed_libs = cp.get('options', '--installed-libs').split(',')\n else:\n self.installed_libs = []\n\n # ensure consistency between runs\n install_deps.sort()\n install_sources.sort(key = lambda s: s.name)\n\n class _eztdata(object):\n def __init__(self, **kw):\n vars(self).update(kw)\n\n data = _eztdata(\n modules=[ ],\n swig_langs=[ ],\n swig_c=[ ],\n target=[ ],\n itargets=[ ],\n areas=[ ],\n isources=[ ],\n deps=[ ],\n sql=[],\n )\n\n ########################################\n\n for target in install_sources:\n if isinstance(target, gen_base.TargetRaModule) or \\\n isinstance(target, gen_base.TargetFsModule):\n # name of the module: strip 'libsvn_' and upper-case it\n name = target.name[7:].upper()\n\n # construct a list of the other .la libs to link against\n retreat = build_path_retreat(target.path)\n if target.name in self.installed_libs:\n deps = []\n link = [ '-l%s-%s' % (target.name[3:], self.version) ]\n else:\n deps = [ target.filename ]\n link = [ build_path_join(retreat, target.filename) ]\n for source in self.graph.get_sources(gen_base.DT_LINK, target.name):\n if not isinstance(source, gen_base.TargetLib) or source.external_lib:\n continue\n elif source.name in self.installed_libs:\n continue\n deps.append(source.filename)\n link.append(build_path_join(retreat, source.filename))\n\n data.modules.append(_eztdata(name=name, deps=deps, link=link))\n\n # write a list of directories in which things are built\n # get all the test scripts' directories\n script_dirs = list(map(build_path_dirname, self.scripts + self.bdb_scripts))\n\n # remove duplicate directories between targets and tests\n build_dirs = unique(self.target_dirs + script_dirs + self.swig_dirs)\n data.build_dirs = build_dirs\n\n # write lists of test files\n # deps = all, progs = not including those marked \"testing = skip\"\n data.bdb_test_deps = self.bdb_test_deps + self.bdb_scripts\n data.bdb_test_progs = self.bdb_test_progs + self.bdb_scripts\n data.test_deps = self.test_deps + self.scripts\n data.test_progs = self.test_progs + self.scripts\n data.test_helpers = self.test_helpers\n\n # write list of all manpages\n data.manpages = self.manpages\n\n # write a list of files to remove during \"make clean\"\n cfiles = [ ]\n for target in install_sources:\n # .la files are handled by the standard 'clean' rule; clean all the\n # other targets\n if not isinstance(target, gen_base.TargetScript) \\\n and not isinstance(target, gen_base.TargetProject) \\\n and not isinstance(target, gen_base.TargetI18N) \\\n and not isinstance(target, gen_base.TargetJava) \\\n and not target.external_lib \\\n and target.filename[-3:] != '.la':\n cfiles.append(target.filename)\n for script in self.scripts:\n if script.endswith('.py'):\n cfiles.append(script + 'c')\n data.cfiles = sorted(cfiles)\n\n # here are all the SQL files and their generated headers. the Makefile\n # has an implicit rule for generating these, so there isn't much to do\n # except to clean them out. we only do that for 'make extraclean' since\n # these are included as part of the tarball. the files are transformed\n # by gen-make, and developers also get a Make rule to keep them updated.\n for hdrfile, sqlfile in sorted(self.graph.get_deps(gen_base.DT_SQLHDR),\n key=lambda t: t[0]):\n data.sql.append(_eztdata(header=hdrfile, source=sqlfile[0]))\n\n data.release_mode = ezt.boolean(self.release_mode)\n\n ########################################\n\n if not self.release_mode:\n swig_rules = StringIO()\n for swig in (generator.swig.header_wrappers,\n generator.swig.checkout_swig_header,\n generator.swig.external_runtime):\n gen = swig.Generator(self.conf, \"swig\")\n gen.write_makefile_rules(swig_rules)\n\n data.swig_rules = swig_rules.getvalue()\n\n ########################################\n\n # write dependencies and build rules for generated .c files\n swig_c_deps = sorted(self.graph.get_deps(gen_base.DT_SWIG_C),\n key=lambda t: t[0].filename)\n\n swig_lang_deps = {}\n for lang in self.swig.langs:\n swig_lang_deps[lang] = []\n\n for objname, sources in swig_c_deps:\n swig_lang_deps[objname.lang].append(str(objname))\n\n for lang in self.swig.langs:\n data.swig_langs.append(_eztdata(short=self.swig.short[lang],\n deps=swig_lang_deps[lang]))\n\n ########################################\n\n if not self.release_mode:\n for objname, sources in swig_c_deps:\n data.swig_c.append(_eztdata(c_file=str(objname),\n deps=list(map(str, sources)),\n opts=self.swig.opts[objname.lang],\n source=str(sources[0])))\n\n ########################################\n\n for target_ob in install_sources:\n\n if isinstance(target_ob, gen_base.TargetScript):\n # there is nothing to build\n continue\n\n target = target_ob.name\n if isinstance(target_ob, gen_base.TargetJava):\n path = target_ob.output_dir\n else:\n path = target_ob.path\n\n retreat = build_path_retreat(path)\n\n # get the source items (.o and .la) for the link unit\n objects = [ ]\n objdeps = [ ]\n object_srcs = [ ]\n headers = [ ]\n header_classes = [ ]\n header_class_filenames = [ ]\n deps = [ ]\n libs = [ ]\n add_deps = target_ob.add_deps.split()\n\n for link_dep in self.graph.get_sources(gen_base.DT_LINK, target_ob.name):\n if isinstance(link_dep, gen_base.TargetJava):\n deps.append(link_dep.name)\n elif isinstance(link_dep, gen_base.TargetLinked):\n if link_dep.external_lib:\n libs.append(link_dep.external_lib)\n elif link_dep.external_project:\n # FIXME: This is a temporary workaround to fix build breakage\n # expeditiously. It is of questionable validity for a build\n # node to have external_project but not have external_lib.\n pass\n elif link_dep.name in self.installed_libs:\n libs.append('-l%s-%s' % (link_dep.name[3:], self.version))\n else:\n # append the output of the target to our stated dependencies\n if not self.assume_shared_libs:\n deps.append(link_dep.filename)\n\n # link against the library\n libs.append(build_path_join(retreat, link_dep.filename))\n elif isinstance(link_dep, gen_base.ObjectFile):\n # link in the object file\n objects.append(link_dep.filename)\n objdeps.append(_normstr(link_dep.filename))\n for dep in self.graph.get_sources(gen_base.DT_OBJECT, link_dep, gen_base.SourceFile):\n object_srcs.append(\n build_path_join('$(abs_srcdir)', dep.filename))\n elif isinstance(link_dep, gen_base.HeaderFile):\n # link in the header file\n # N.B. that filename_win contains the '_'-escaped class name\n headers.append(link_dep.filename_win)\n header_classes.append(link_dep.classname)\n for dep in self.graph.get_sources(gen_base.DT_OBJECT, link_dep, gen_base.ObjectFile):\n header_class_filenames.append(dep.filename)\n else:\n ### we don't know what this is, so we don't know what to do with it\n raise UnknownDependency\n\n for nonlib in self.graph.get_sources(gen_base.DT_NONLIB, target_ob.name):\n if isinstance(nonlib, gen_base.TargetLinked):\n if not nonlib.external_lib:\n deps.append(nonlib.filename)\n\n targ_varname = target.replace('-', '_')\n objnames = build_path_strip(path, objects)\n\n ezt_target = _eztdata(name=target_ob.name,\n varname=targ_varname,\n path=path,\n install=None,\n add_deps=add_deps,\n objects=objects,\n objdeps=objdeps,\n deps=deps,\n when=target_ob.when,\n )\n data.target.append(ezt_target)\n\n if hasattr(target_ob, 'link_cmd'):\n ezt_target.link_cmd = target_ob.link_cmd\n if hasattr(target_ob, 'output_dir'):\n ezt_target.output_dir = target_ob.output_dir\n if hasattr(target_ob, 'headers_dir'):\n ezt_target.headers_dir = target_ob.headers_dir\n\n # Add additional install dependencies if necessary\n if target_ob.add_install_deps:\n ezt_target.install = target_ob.install\n ezt_target.install_deps = target_ob.add_install_deps\n\n if isinstance(target_ob, gen_base.TargetJava):\n ezt_target.type = 'java'\n ezt_target.headers = headers\n ezt_target.sources = None\n ezt_target.jar = None\n ezt_target.classes = target_ob.classes\n\n # Build the headers from the header_classes with one 'javah' call\n if headers:\n ezt_target.header_class_filenames = header_class_filenames\n ezt_target.header_classes = header_classes\n\n # Build the objects from the object_srcs with one 'javac' call\n if object_srcs:\n ezt_target.sources = object_srcs\n\n # Once the bytecodes have been compiled up, we produce the\n # JAR.\n if target_ob.jar:\n ezt_target.jar_path = build_path_join(target_ob.classes,\n target_ob.jar)\n ezt_target.packages = target_ob.packages\n\n elif isinstance(target_ob, gen_base.TargetI18N):\n ezt_target.type = 'i18n'\n else:\n ezt_target.type = 'n/a'\n ezt_target.filename = target_ob.filename\n ezt_target.path = path\n if (isinstance(target_ob, gen_base.TargetLib)\n and not target_ob.undefined_lib_symbols):\n ezt_target.undefined_flag = '$(LT_NO_UNDEFINED)'\n else:\n ezt_target.undefined_flag = ''\n ezt_target.libs = gen_base.unique(libs)\n ezt_target.objnames = objnames\n ezt_target.basename = build_path_basename(target_ob.filename)\n\n ########################################\n\n for itype, i_targets in install_deps:\n\n # perl bindings do their own thing, \"swig-pl\" target is\n # already specified in Makefile.in\n if itype == \"swig-pl\":\n continue\n\n outputs = [ ]\n\n for t in i_targets:\n if hasattr(t, 'filename'):\n outputs.append(t.filename)\n\n data.itargets.append(_eztdata(type=itype, outputs=outputs))\n\n ########################################\n\n # for each install group, write a rule to install its outputs\n for area, inst_targets in install_deps:\n\n # perl bindings do their own thing, \"install-swig-pl\" target is\n # already specified in Makefile.in\n if area == \"swig-pl\":\n continue\n\n # get the output files for these targets, sorted in dependency order\n files = gen_base._sorted_files(self.graph, area)\n\n ezt_area_type = (area == 'apache-mod' and 'mods-shared' or area)\n ezt_area = _eztdata(type=ezt_area_type, files=[], extra_install=None)\n\n def file_to_eztdata(file):\n # cd to dirname before install to work around libtool 1.4.2 bug.\n dirname, fname = build_path_splitfile(file.filename)\n return _eztdata(mode=None,\n dirname=dirname, fullname=file.filename,\n filename=fname, when=file.when,\n pc_fullname=None,\n pc_installdir=None,\n pc_install_fname=None,)\n\n def apache_file_to_eztdata(file):\n # cd to dirname before install to work around libtool 1.4.2 bug.\n dirname, fname = build_path_splitfile(file.filename)\n base, ext = os.path.splitext(fname)\n name = base.replace('mod_', '')\n return _eztdata(mode='apache-mod',\n fullname=file.filename, dirname=dirname,\n name=name, filename=fname, when=file.when)\n\n if area != 'test' and area != 'bdb-test':\n data.areas.append(ezt_area)\n\n area_var = area.replace('-', '_')\n upper_var = area_var.upper()\n ezt_area.varname = area_var\n ezt_area.uppervar = upper_var\n\n for file in files:\n if isinstance(file.target, gen_base.TargetApacheMod):\n ezt_file = apache_file_to_eztdata(file)\n else:\n ezt_file = file_to_eztdata(file)\n if area == 'locale':\n lang, objext = os.path.splitext(ezt_file.filename)\n installdir = ('$(DESTDIR)$(%sdir)/%s/LC_MESSAGES'\n % (area_var, lang))\n ezt_file.installdir = installdir\n ezt_file.objext = objext\n else:\n ezt_file.install_fname = build_path_join('$(%sdir)' % area_var,\n ezt_file.filename)\n\n # Install pkg-config files\n if (isinstance(file.target, gen_base.TargetLib) and\n ezt_file.fullname.startswith('subversion/libsvn_')):\n ezt_file.pc_fullname = ezt_file.fullname.replace('-1.la', '.pc')\n ezt_file.pc_installdir = '$(pkgconfig_dir)'\n pc_install_fname = ezt_file.filename.replace('-1.la', '.pc')\n ezt_file.pc_install_fname = build_path_join(ezt_file.pc_installdir,\n pc_install_fname)\n ezt_area.files.append(ezt_file)\n\n # certain areas require hooks for extra install rules defined\n # in Makefile.in\n ### we should turn AREA into an object, then test it instead of this\n if area[:5] == 'swig-' and area[-4:] != '-lib' \\\n or area[:7] == 'javahl-' \\\n or area[:6] == 'cxxhl-' \\\n or area == 'tools':\n ezt_area.extra_install = 'yes'\n\n ########################################\n\n includedir = build_path_join('$(includedir)',\n 'subversion-%s' % self.version)\n data.includes = [_eztdata(file=file,\n src=build_path_join('$(abs_srcdir)', file),\n dst=build_path_join(includedir,\n build_path_basename(file)))\n for file in self.includes]\n data.includedir = includedir\n\n ########################################\n\n for target in install_sources:\n if not isinstance(target, gen_base.TargetScript) and \\\n not isinstance(target, gen_base.TargetJava) and \\\n not isinstance(target, gen_base.TargetI18N):\n data.isources.append(_eztdata(name=target.name,\n filename=target.filename))\n\n ########################################\n\n # write dependencies and build rules (when not using suffix rules)\n # for all other generated files which will not be installed\n # (or will be installed, but not by the main generated build)\n obj_deps = sorted(self.graph.get_deps(gen_base.DT_OBJECT),\n key=lambda t: t[0].filename)\n\n for objname, sources in obj_deps:\n dep = _eztdata(name=_normstr(objname),\n when=objname.when,\n deps=list(map(_normstr, sources)),\n cmd=objname.compile_cmd,\n source=_normstr(sources[0]))\n data.deps.append(dep)\n dep.generated = ezt.boolean(getattr(objname, 'source_generated', 0))\n\n template = ezt.Template(os.path.join('build', 'generator', 'templates',\n 'build-outputs.mk.ezt'),\n compress_whitespace=False)\n template.generate(open('build-outputs.mk', 'w'), data)\n\n self.write_standalone()\n\n self.write_transform_libtool_scripts(install_sources)\n\n self.write_pkg_config_dot_in_files(install_sources)\n\n def write_standalone(self):\n \"\"\"Write autogen-standalone.mk\"\"\"\n\n standalone = open(\"autogen-standalone.mk\", \"w\")\n standalone.write('# DO NOT EDIT -- AUTOMATICALLY GENERATED '\n 'BY build/generator/gen_make.py\\n')\n standalone.write('# FROM build-outputs.mk\\n')\n standalone.write('abs_srcdir = %s\\n' % os.getcwd())\n standalone.write('abs_builddir = %s\\n' % os.getcwd())\n standalone.write('top_srcdir = .\\n')\n standalone.write('top_builddir = .\\n')\n standalone.write('SWIG = swig\\n')\n standalone.write('PYTHON = ' + sys.executable + '\\n')\n standalone.write('\\n')\n standalone.write(open(\"build-outputs.mk\",\"r\").read())\n standalone.close()\n\n def write_transform_libtool_scripts(self, install_sources):\n \"\"\"Write build/transform_libtool_scripts.sh\"\"\"\n script = 'build/transform_libtool_scripts.sh'\n fd = open(script, 'w')\n fd.write('''#!/bin/sh\n# DO NOT EDIT -- AUTOMATICALLY GENERATED BY build/generator/gen_make.py\n\ntransform()\n{\n SCRIPT=\"$1\"\n LIBS=\"$2\"\n if [ -f $SCRIPT ]; then\n if grep LD_PRELOAD \"$SCRIPT\" > /dev/null; then\n :\n elif grep LD_LIBRARY_PATH \"$SCRIPT\" > /dev/null; then\n echo \"Transforming $SCRIPT\"\n EXISTINGLIBS=\"\"\n for LIB in $LIBS; do\n # exclude libsvn_test since the undefined test_funcs breaks libtool\n case $LIB in\n *libsvn_test-*) continue ;;\n esac\n if [ ! -f $LIB ]; then\n continue\n fi\n if [ -z \"$EXISTINGLIBS\" ]; then\n EXISTINGLIBS=\"$LIB\"\n else\n EXISTINGLIBS=\"$EXISTINGLIBS $LIB\"\n fi\n done\n if [ ! -z \"$EXISTINGLIBS\" ]; then\n cat \"$SCRIPT\" |\n (\n read LINE\n echo \"$LINE\"\n read LINE\n echo \"$LINE\"\n read LINE\n echo \"$LINE\"\n read LINE\n echo \"$LINE\"\n echo \"LD_PRELOAD=\\\\\"$EXISTINGLIBS\\\\\"\"\n echo \"export LD_PRELOAD\"\n cat\n ) < \"$SCRIPT\" > \"$SCRIPT.new\"\n mv -f \"$SCRIPT.new\" \"$SCRIPT\"\n chmod +x \"$SCRIPT\"\n fi\n fi\n fi\n}\n\nDIR=`pwd`\n\n''')\n libdep_cache = {}\n paths = {}\n for lib in ('libsvn_auth_gnome_keyring', 'libsvn_auth_kwallet'):\n paths[lib] = self.sections[lib].options.get('path')\n for target_ob in install_sources:\n if not isinstance(target_ob, gen_base.TargetExe):\n continue\n name = target_ob.name\n libs = self._get_all_lib_deps(target_ob.name, libdep_cache, paths)\n path = paths[name]\n for i in range(0, len(libs)):\n lib = libs[i]\n libpath = paths[libs[i]]\n libs[i] = '$DIR/%s/.libs/%s-%s.so' % (libpath, lib, self.version)\n fd.write('transform %s/%s \"%s\"\\n' % (path, name, \" \".join(libs)))\n fd.close()\n mode = stat.S_IRWXU|stat.S_IRGRP|stat.S_IXGRP|stat.S_IROTH|stat.S_IXOTH\n os.chmod(script, mode)\n\n def _get_all_lib_deps(self, target_name, libdep_cache, paths):\n if not target_name in libdep_cache:\n libs = set()\n path = None\n if target_name in self.sections:\n section = self.sections[target_name]\n opt_libs = self.sections[target_name].options.get('libs')\n paths[target_name] = section.options.get('path')\n if opt_libs:\n for lib_name in opt_libs.split():\n if lib_name.startswith('libsvn_'):\n libs.add(lib_name)\n for lib in self._get_all_lib_deps(lib_name, libdep_cache, paths):\n libs.add(lib)\n if target_name == 'libsvn_subr':\n libs.update(('libsvn_auth_gnome_keyring', 'libsvn_auth_kwallet'))\n libdep_cache[target_name] = sorted(libs)\n return libdep_cache[target_name]\n\n def write_pkg_config_dot_in_files(self, install_sources):\n \"\"\"Write pkg-config .pc.in files for Subversion libraries.\"\"\"\n for target_ob in install_sources:\n if not (isinstance(target_ob, gen_base.TargetLib) and\n target_ob.path.startswith('subversion/libsvn_')):\n continue\n\n lib_name = target_ob.name\n lib_path = self.sections[lib_name].options.get('path')\n lib_deps = self.sections[lib_name].options.get('libs')\n lib_desc = self.sections[lib_name].options.get('description')\n output_path = build_path_join(lib_path, lib_name + '.pc.in')\n template = ezt.Template(os.path.join('build', 'generator', 'templates',\n 'pkg-config.in.ezt'),\n compress_whitespace=False)\n class _eztdata(object):\n def __init__(self, **kw):\n vars(self).update(kw)\n\n data = _eztdata(\n lib_name=lib_name,\n lib_desc=lib_desc,\n lib_deps=[],\n lib_required=[],\n lib_required_private=[],\n )\n # libsvn_foo -> -lsvn_foo\n data.lib_deps.append('-l%s' % lib_name.replace('lib', '', 1))\n for lib_dep in lib_deps.split():\n if lib_dep == 'apriconv':\n # apriconv is part of apr-util, skip it\n continue\n external_lib = self.sections[lib_dep].options.get('external-lib')\n if external_lib:\n ### Some of Subversion's internal libraries can appear as external\n ### libs to handle conditional compilation. Skip these for now.\n if external_lib in ['$(SVN_RA_LIB_LINK)', '$(SVN_FS_LIB_LINK)']:\n continue\n # If the external library is known to support pkg-config,\n # add it to the Required: or Required.private: section.\n # Otherwise, add the external library to linker flags.\n pkg_config = self.sections[lib_dep].options.get('pkg-config')\n if pkg_config:\n private = self.sections[lib_dep].options.get('pkg-config-private')\n if private:\n data.lib_required_private.append(pkg_config)\n else:\n data.lib_required.append(pkg_config)\n else:\n # $(EXTERNAL_LIB) -> @EXTERNAL_LIB@\n data.lib_deps.append('@%s@' % external_lib[2:-1])\n else:\n data.lib_required_private.append(lib_dep)\n\n template.generate(open(output_path, 'w'), data)\n\nclass UnknownDependency(Exception):\n \"We don't know how to deal with the dependent to link it in.\"\n pass\n\n### End of file.\n", "id": "5742412", "language": "Python", "matching_score": 2.419938325881958, "max_stars_count": 0, "path": "build/generator/gen_make.py" }, { "content": "#!/usr/bin/env python\n#\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n#\n#\n# run_tests.py - run the tests in the regression test suite.\n#\n\n'''usage: python run_tests.py\n [--verbose] [--log-to-stdout] [--cleanup] [--bin=<path>]\n [--parallel | --parallel=<n>] [--global-scheduler]\n [--url=<base-url>] [--http-library=<http-library>] [--enable-sasl]\n [--fs-type=<fs-type>] [--fsfs-packing] [--fsfs-sharding=<n>]\n [--list] [--milestone-filter=<regex>] [--mode-filter=<type>]\n [--server-minor-version=<version>] [--http-proxy=<host>:<port>]\n [--httpd-version=<version>] [--httpd-whitelist=<version>]\n [--config-file=<file>] [--ssl-cert=<file>]\n [--exclusive-wc-locks] [--memcached-server=<url:port>]\n [--fsfs-compression=<type>] [--fsfs-dir-deltification=<true|false>]\n <abs_srcdir> <abs_builddir>\n <prog ...>\n\nThe optional flags and the first two parameters are passed unchanged\nto the TestHarness constructor. All other parameters are names of\ntest programs.\n\nEach <prog> should be the full path (absolute or from the current directory)\nand filename of a test program, optionally followed by '#' and a comma-\nseparated list of test numbers; the default is to run all the tests in it.\n'''\n\nimport os, sys, shutil, codecs\nimport re\nimport logging\nimport optparse, subprocess, imp, threading, traceback\nfrom datetime import datetime\n\ntry:\n # Python >=3.0\n import queue\nexcept ImportError:\n # Python <3.0\n import Queue as queue\n\nif sys.version_info < (3, 0):\n # Python >= 3.0 already has this build in\n import exceptions\n\n# Ensure the compiled C tests use a known locale (Python tests set the locale\n# explicitly).\nos.environ['LC_ALL'] = 'C'\n\n# Placeholder for the svntest module\nsvntest = None\n\nclass TextColors:\n '''Some ANSI terminal constants for output color'''\n ENDC = '\\033[0;m'\n FAILURE = '\\033[1;31m'\n SUCCESS = '\\033[1;32m'\n\n @classmethod\n def disable(cls):\n cls.ENDC = ''\n cls.FAILURE = ''\n cls.SUCCESS = ''\n\n\ndef _get_term_width():\n 'Attempt to discern the width of the terminal'\n # This may not work on all platforms, in which case the default of 80\n # characters is used. Improvements welcomed.\n\n def ioctl_GWINSZ(fd):\n try:\n import fcntl, termios, struct, os\n cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,\n struct.pack('hh', 0, 0)))\n except:\n return None\n return cr\n\n cr = None\n if not cr:\n try:\n cr = (os.environ['SVN_MAKE_CHECK_LINES'],\n os.environ['SVN_MAKE_CHECK_COLUMNS'])\n except:\n cr = None\n if not cr:\n cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n if not cr:\n try:\n fd = os.open(os.ctermid(), os.O_RDONLY)\n cr = ioctl_GWINSZ(fd)\n os.close(fd)\n except:\n pass\n if not cr:\n try:\n cr = (os.environ['LINES'], os.environ['COLUMNS'])\n except:\n cr = None\n if not cr:\n # Default\n if sys.platform == 'win32':\n cr = (25, 79)\n else:\n cr = (25, 80)\n return int(cr[1])\n\ndef ensure_str(s):\n '''If S is not a string already, convert it to a string'''\n if isinstance(s, str):\n return s\n else:\n return s.decode(\"latin-1\")\n\nclass TestHarness:\n '''Test harness for Subversion tests.\n '''\n\n def __init__(self, abs_srcdir, abs_builddir, logfile, faillogfile, opts):\n '''Construct a TestHarness instance.\n\n ABS_SRCDIR and ABS_BUILDDIR are the source and build directories.\n LOGFILE is the name of the log file. If LOGFILE is None, let tests\n print their output to stdout and stderr, and don't print a summary\n at the end (since there's no log file to analyze).\n OPTS are the options that will be sent to the tests.\n '''\n\n # Canonicalize the test base URL\n if opts.url is not None and opts.url[-1] == '/':\n opts.url = opts.url[:-1]\n\n # Make the configfile path absolute\n if opts.config_file is not None:\n opts.config_file = os.path.abspath(opts.config_file)\n\n # Parse out the FSFS version number\n if (opts.fs_type is not None\n and opts.fs_type.startswith('fsfs-v')):\n opts.fsfs_version = int(opts.fs_type[6:])\n opts.fs_type = 'fsfs'\n else:\n opts.fsfs_version = None\n\n self.srcdir = abs_srcdir\n self.builddir = abs_builddir\n self.logfile = logfile\n self.faillogfile = faillogfile\n self.log = None\n self.opts = opts\n\n if not sys.stdout.isatty() or sys.platform == 'win32':\n TextColors.disable()\n\n def _init_c_tests(self):\n cmdline = [None, None] # Program name and source dir\n\n if self.opts.config_file is not None:\n cmdline.append('--config-file=' + self.opts.config_file)\n elif self.opts.memcached_server is not None:\n cmdline.append('--memcached-server=' + self.opts.memcached_server)\n\n if self.opts.url is not None:\n subdir = 'subversion/tests/cmdline/svn-test-work'\n cmdline.append('--repos-url=%s' % self.opts.url +\n '/svn-test-work/repositories')\n cmdline.append('--repos-dir=%s'\n % os.path.abspath(\n os.path.join(self.builddir,\n subdir, 'repositories')))\n\n # Enable access for http\n if self.opts.url.startswith('http'):\n authzparent = os.path.join(self.builddir, subdir)\n if not os.path.exists(authzparent):\n os.makedirs(authzparent);\n open(os.path.join(authzparent, 'authz'), 'w').write('[/]\\n'\n '* = rw\\n')\n\n # ### Support --repos-template\n if self.opts.list_tests is not None:\n cmdline.append('--list')\n if (self.opts.set_log_level is not None\n and self.opts.set_log_level <= logging.DEBUG):\n cmdline.append('--verbose')\n if self.opts.cleanup is not None:\n cmdline.append('--cleanup')\n if self.opts.fs_type is not None:\n cmdline.append('--fs-type=%s' % self.opts.fs_type)\n if self.opts.fsfs_version is not None:\n cmdline.append('--fsfs-version=%d' % self.opts.fsfs_version)\n if self.opts.server_minor_version is not None:\n cmdline.append('--server-minor-version=%d' %\n self.opts.server_minor_version)\n if self.opts.mode_filter is not None:\n cmdline.append('--mode-filter=' + self.opts.mode_filter)\n if self.opts.parallel is not None:\n cmdline.append('--parallel')\n\n self.c_test_cmdline = cmdline\n\n\n def _init_py_tests(self, basedir):\n cmdline = ['--srcdir=%s' % self.srcdir]\n if self.opts.list_tests is not None:\n cmdline.append('--list')\n if self.opts.cleanup is not None:\n cmdline.append('--cleanup')\n if self.opts.parallel is not None:\n if self.opts.parallel == 1:\n cmdline.append('--parallel')\n else:\n cmdline.append('--parallel-instances=%d' % self.opts.parallel)\n if self.opts.svn_bin is not None:\n cmdline.append('--bin=%s' % self.opts.svn_bin)\n if self.opts.url is not None:\n cmdline.append('--url=%s' % self.opts.url)\n if self.opts.fs_type is not None:\n cmdline.append('--fs-type=%s' % self.opts.fs_type)\n if self.opts.http_library is not None:\n cmdline.append('--http-library=%s' % self.opts.http_library)\n if self.opts.fsfs_sharding is not None:\n cmdline.append('--fsfs-sharding=%d' % self.opts.fsfs_sharding)\n if self.opts.fsfs_packing is not None:\n cmdline.append('--fsfs-packing')\n if self.opts.fsfs_version is not None:\n cmdline.append('--fsfs-version=%d' % self.opts.fsfs_version)\n if self.opts.server_minor_version is not None:\n cmdline.append('--server-minor-version=%d' % self.opts.server_minor_version)\n if self.opts.dump_load_cross_check is not None:\n cmdline.append('--dump-load-cross-check')\n if self.opts.enable_sasl is not None:\n cmdline.append('--enable-sasl')\n if self.opts.config_file is not None:\n cmdline.append('--config-file=%s' % self.opts.config_file)\n if self.opts.milestone_filter is not None:\n cmdline.append('--milestone-filter=%s' % self.opts.milestone_filter)\n if self.opts.mode_filter is not None:\n cmdline.append('--mode-filter=%s' % self.opts.mode_filter)\n if self.opts.set_log_level is not None:\n cmdline.append('--set-log-level=%s' % self.opts.set_log_level)\n if self.opts.ssl_cert is not None:\n cmdline.append('--ssl-cert=%s' % self.opts.ssl_cert)\n if self.opts.http_proxy is not None:\n cmdline.append('--http-proxy=%s' % self.opts.http_proxy)\n if self.opts.http_proxy_username is not None:\n cmdline.append('--http-proxy-username=%s' % self.opts.http_proxy_username)\n if self.opts.http_proxy_password is not None:\n cmdline.append('--http-proxy-password=%s' % self.opts.http_proxy_password)\n if self.opts.httpd_version is not None:\n cmdline.append('--httpd-version=%s' % self.opts.httpd_version)\n if self.opts.httpd_whitelist is not None:\n cmdline.append('--httpd-whitelist=%s' % self.opts.httpd_whitelist)\n if self.opts.exclusive_wc_locks is not None:\n cmdline.append('--exclusive-wc-locks')\n if self.opts.memcached_server is not None:\n cmdline.append('--memcached-server=%s' % self.opts.memcached_server)\n if self.opts.fsfs_compression is not None:\n cmdline.append('--fsfs-compression=%s' % self.opts.fsfs_compression)\n if self.opts.fsfs_dir_deltification is not None:\n cmdline.append('--fsfs-dir-deltification=%s' % self.opts.fsfs_dir_deltification)\n\n self.py_test_cmdline = cmdline\n\n # The svntest module is very pedantic about the current working directory\n old_cwd = os.getcwd()\n try:\n os.chdir(basedir)\n sys.path.insert(0, os.path.abspath(os.path.join(self.srcdir, basedir)))\n\n global svntest\n __import__('svntest')\n __import__('svntest.main')\n __import__('svntest.testcase')\n svntest = sys.modules['svntest']\n svntest.main = sys.modules['svntest.main']\n svntest.testcase = sys.modules['svntest.testcase']\n\n svntest.main.parse_options(cmdline, optparse.SUPPRESS_USAGE)\n svntest.testcase.TextColors.disable()\n finally:\n os.chdir(old_cwd)\n\n class Job:\n '''A single test or test suite to execute. After execution, the results\n can be taken from the respective data fields.'''\n\n def __init__(self, number, is_python, progabs, progdir, progbase):\n '''number is the test count for C tests and the test nr for Python.'''\n self.number = number\n self.is_python = is_python\n self.progabs = progabs\n self.progdir = progdir\n self.progbase = progbase\n self.result = None\n self.stdout_lines = []\n self.stderr_lines = []\n self.taken = 0\n\n def test_count(self):\n if self.is_python:\n return 1\n else:\n return self.number\n\n def _command_line(self, harness):\n if self.is_python:\n cmdline = list(harness.py_test_cmdline)\n cmdline.insert(0, sys.executable)\n cmdline.insert(1, self.progabs)\n # Run the test apps in \"child process\" mode,\n # i.e. w/o cleaning up global directories etc.\n cmdline.append('-c')\n cmdline.append(str(self.number))\n else:\n cmdline = list(harness.c_test_cmdline)\n cmdline[0] = self.progabs\n cmdline[1] = '--srcdir=%s' % os.path.join(harness.srcdir, self.progdir)\n return cmdline\n\n def execute(self, harness):\n start_time = datetime.now()\n prog = subprocess.Popen(self._command_line(harness),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.progdir)\n\n self.stdout_lines = prog.stdout.readlines()\n self.stderr_lines = prog.stderr.readlines()\n prog.wait()\n self.result = prog.returncode\n self.taken = datetime.now() - start_time\n\n class CollectingThread(threading.Thread):\n '''A thread that lists the individual tests in a given case and creates\n jobs objects for them. in in test cases in their own processes.\n Receives test numbers to run from the queue, and saves results into\n the results field.'''\n def __init__(self, srcdir, builddir, testcase):\n threading.Thread.__init__(self)\n self.srcdir = srcdir\n self.builddir = builddir\n self.testcase = testcase\n self.result = []\n\n def _count_c_tests(self, progabs, progdir, progbase):\n 'Run a c test, escaping parameters as required.'\n cmdline = [ progabs, '--list' ]\n prog = subprocess.Popen(cmdline, stdout=subprocess.PIPE, cwd=progdir)\n lines = prog.stdout.readlines()\n self.result.append(TestHarness.Job(len(lines) - 2, False, progabs,\n progdir, progbase))\n prog.wait()\n\n def _count_py_tests(self, progabs, progdir, progbase):\n 'Run a c test, escaping parameters as required.'\n cmdline = [ sys.executable, progabs, '--list' ]\n prog = subprocess.Popen(cmdline, stdout=subprocess.PIPE, cwd=progdir)\n lines = prog.stdout.readlines()\n\n for i in range(0, len(lines) - 2):\n self.result.append(TestHarness.Job(i + 1, True, progabs, \n progdir, progbase))\n prog.wait()\n\n def run(self):\n \"Run a single test. Return the test's exit code.\"\n\n progdir, progbase, test_nums = self.testcase\n\n if progbase[-3:] == '.py':\n progabs = os.path.abspath(os.path.join(self.srcdir, progdir, progbase))\n self._count_py_tests(progabs, progdir, progbase)\n else:\n progabs = os.path.abspath(os.path.join(self.builddir, progdir,\n progbase))\n self._count_c_tests(progabs, progdir, progbase)\n\n def get_result(self):\n return self.result\n\n class TestSpawningThread(threading.Thread):\n '''A thread that runs test cases in their own processes.\n Receives test jobs to run from the queue, and shows some progress\n indication on stdout. The detailed test results are stored inside\n the job objects.'''\n def __init__(self, queue, harness):\n threading.Thread.__init__(self)\n self.queue = queue\n self.harness = harness\n self.results = []\n\n def run(self):\n while True:\n try:\n job = self.queue.get_nowait()\n except queue.Empty:\n return\n\n job.execute(self.harness)\n\n if job.result:\n os.write(sys.stdout.fileno(), b'!' * job.test_count())\n else:\n os.write(sys.stdout.fileno(), b'.' * job.test_count())\n\n\n def _run_global_scheduler(self, testlist, has_py_tests):\n # Collect all tests to execute (separate jobs for each test in python\n # test cases, one job for each c test case). Do that concurrently to\n # mask latency. This takes .5s instead of about 3s.\n threads = [ ]\n for count, testcase in enumerate(testlist):\n threads.append(self.CollectingThread(self.srcdir, self.builddir,\n testcase))\n\n for t in threads:\n t.start()\n\n jobs = []\n for t in threads:\n t.join()\n jobs.extend(t.result)\n\n # Put all jobs into our \"todo\" queue.\n # Scramble them for a more even resource utilization.\n job_queue = queue.Queue()\n total_count = 0\n scrambled = list(jobs)\n # TODO: What's this line doing, and what's the magic number?\n scrambled.sort(key=lambda x: (\"1\" if x.test_count() < 30 else \"0\") + str(x.number))\n for job in scrambled:\n total_count += job.test_count()\n job_queue.put(job)\n\n # Use the svntest infrastructure to initialize the common test template\n # wc and repos.\n if has_py_tests:\n old_cwd = os.getcwd()\n os.chdir(jobs[-1].progdir)\n svntest.main.options.keep_local_tmp = True\n svntest.main.execute_tests([])\n os.chdir(old_cwd)\n\n # Some more prep work\n if self.log:\n log = self.log\n else:\n log = sys.stdout\n\n if self.opts.parallel is None:\n thread_count = 1\n else:\n if self.opts.parallel == 1:\n thread_count = 5\n else:\n thread_count = self.opts.parallel\n\n # Actually run the tests in concurrent sub-processes\n print('Tests to execute: %d' % total_count)\n sys.stdout.flush()\n\n threads = [ TestHarness.TestSpawningThread(job_queue, self)\n for i in range(thread_count) ]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n print(\"\")\n\n # Aggregate and log the results\n failed = 0\n taken = 0\n last_test_name = \"\"\n for job in jobs:\n if last_test_name != job.progbase:\n if last_test_name != \"\":\n log.write('ELAPSED: %s %s\\n\\n' % (last_test_name, str(taken)))\n last_test_name = job.progbase\n taken = job.taken\n else:\n taken += job.taken\n\n for line in job.stderr_lines:\n log.write(ensure_str(line))\n\n for line in job.stdout_lines:\n self._process_test_output_line(ensure_str(line))\n\n self._check_for_unknown_failure(log, job.progbase, job.result)\n failed = job.result or failed\n\n log.write('ELAPSED: %s %s\\n\\n' % (last_test_name, str(taken)))\n\n return failed\n\n def _run_local_schedulers(self, testlist):\n '''Serial execution of all test suites using their respective internal\n schedulers.'''\n testcount = len(testlist)\n\n failed = 0\n for count, testcase in enumerate(testlist):\n failed = self._run_test(testcase, count, testcount) or failed\n\n return failed\n\n def run(self, testlist):\n '''Run all test programs given in TESTLIST. Print a summary of results, if\n there is a log file. Return zero iff all test programs passed.'''\n self._open_log('w')\n failed = 0\n\n # Filter tests into Python and native groups and prepare arguments\n # for each group. The resulting list will contain tuples of\n # (program dir, program name, test numbers), where the test\n # numbers may be None.\n\n def split_nums(prog):\n test_nums = []\n if '#' in prog:\n prog, test_nums = prog.split('#')\n if test_nums:\n test_nums = test_nums.split(',')\n return prog, test_nums\n\n py_basedir = set()\n py_tests = []\n c_tests = []\n\n for prog in testlist:\n progpath, testnums = split_nums(prog)\n progdir, progbase = os.path.split(progpath)\n if progpath.endswith('.py'):\n py_basedir.add(progdir)\n py_tests.append((progdir, progbase, testnums))\n elif not self.opts.skip_c_tests:\n c_tests.append((progdir, progbase, testnums))\n\n # Initialize svntest.main.options for Python tests. Load the\n # svntest.main module from the Python test path.\n if len(py_tests):\n if len(py_basedir) > 1:\n sys.stderr.write('The test harness requires all Python tests'\n ' to be in the same directory.')\n sys.exit(1)\n self._init_py_tests(list(py_basedir)[0])\n py_tests.sort(key=lambda x: x[1])\n\n # Create the common command line for C tests\n if len(c_tests):\n self._init_c_tests()\n c_tests.sort(key=lambda x: x[1])\n\n # Run the tests\n testlist = c_tests + py_tests\n if self.opts.global_scheduler is None:\n failed = self._run_local_schedulers(testlist)\n else:\n failed = self._run_global_scheduler(testlist, len(py_tests) > 0)\n\n # Open the log again to for filtering.\n if self.logfile:\n self._open_log('r')\n log_lines = self.log.readlines()\n else:\n log_lines = []\n\n # Remove \\r characters introduced by opening the log as binary\n if sys.platform == 'win32':\n log_lines = [x.replace('\\r', '') for x in log_lines]\n\n # Print the results, from least interesting to most interesting.\n\n # Helper for Work-In-Progress indications for XFAIL tests.\n wimptag = ' [[WIMP: '\n def printxfail(x):\n wip = x.find(wimptag)\n if 0 > wip:\n sys.stdout.write(x)\n else:\n sys.stdout.write('%s\\n [[%s'\n % (x[:wip], x[wip + len(wimptag):]))\n\n if self.opts.list_tests:\n passed = [x for x in log_lines if x[8:13] == ' ']\n else:\n passed = [x for x in log_lines if x[:6] == 'PASS: ']\n\n if self.opts.list_tests:\n skipped = [x for x in log_lines if x[8:12] == 'SKIP']\n else:\n skipped = [x for x in log_lines if x[:6] == 'SKIP: ']\n\n if skipped and not self.opts.list_tests:\n print('At least one test was SKIPPED, checking ' + self.logfile)\n for x in skipped:\n sys.stdout.write(x)\n\n if self.opts.list_tests:\n xfailed = [x for x in log_lines if x[8:13] == 'XFAIL']\n else:\n xfailed = [x for x in log_lines if x[:6] == 'XFAIL:']\n if xfailed and not self.opts.list_tests:\n print('At least one test XFAILED, checking ' + self.logfile)\n for x in xfailed:\n printxfail(x)\n\n xpassed = [x for x in log_lines if x[:6] == 'XPASS:']\n if xpassed:\n print('At least one test XPASSED, checking ' + self.logfile)\n for x in xpassed:\n printxfail(x)\n\n failed_list = [x for x in log_lines if x[:6] == 'FAIL: ']\n if failed_list:\n print('At least one test FAILED, checking ' + self.logfile)\n for x in failed_list:\n sys.stdout.write(x)\n\n # Print summaries, from least interesting to most interesting.\n if self.opts.list_tests:\n print('Summary of test listing:')\n else:\n print('Summary of test results:')\n if passed:\n if self.opts.list_tests:\n print(' %d test%s are set to PASS'\n % (len(passed), 's'*min(len(passed) - 1, 1)))\n else:\n print(' %d test%s PASSED'\n % (len(passed), 's'*min(len(passed) - 1, 1)))\n if skipped:\n if self.opts.list_tests:\n print(' %d test%s are set as SKIP'\n % (len(skipped), 's'*min(len(skipped) - 1, 1)))\n else:\n print(' %d test%s SKIPPED'\n % (len(skipped), 's'*min(len(skipped) - 1, 1)))\n if xfailed:\n passwimp = [x for x in xfailed if 0 <= x.find(wimptag)]\n if passwimp:\n if self.opts.list_tests:\n print(' %d test%s are set to XFAIL (%d WORK-IN-PROGRESS)'\n % (len(xfailed), 's'*min(len(xfailed) - 1, 1), len(passwimp)))\n else:\n print(' %d test%s XFAILED (%d WORK-IN-PROGRESS)'\n % (len(xfailed), 's'*min(len(xfailed) - 1, 1), len(passwimp)))\n else:\n if self.opts.list_tests:\n print(' %d test%s are set as XFAIL'\n % (len(xfailed), 's'*min(len(xfailed) - 1, 1)))\n else:\n print(' %d test%s XFAILED'\n % (len(xfailed), 's'*min(len(xfailed) - 1, 1)))\n if xpassed:\n failwimp = [x for x in xpassed if 0 <= x.find(wimptag)]\n if failwimp:\n print(' %d test%s XPASSED (%d WORK-IN-PROGRESS)'\n % (len(xpassed), 's'*min(len(xpassed) - 1, 1), len(failwimp)))\n else:\n print(' %d test%s XPASSED'\n % (len(xpassed), 's'*min(len(xpassed) - 1, 1)))\n if failed_list:\n print(' %d test%s FAILED'\n % (len(failed_list), 's'*min(len(failed_list) - 1, 1)))\n\n # Copy the truly interesting verbose logs to a separate file, for easier\n # viewing.\n if xpassed or failed_list:\n faillog = codecs.open(self.faillogfile, 'w', encoding=\"latin-1\")\n last_start_lineno = None\n last_start_re = re.compile('^(FAIL|SKIP|XFAIL|PASS|START|CLEANUP|END):')\n for lineno, line in enumerate(log_lines):\n # Iterate the lines. If it ends a test we're interested in, dump that\n # test to FAILLOG. If it starts a test (at all), remember the line\n # number (in case we need it later).\n if line in xpassed or line in failed_list:\n faillog.write('[[[\\n')\n faillog.writelines(log_lines[last_start_lineno : lineno+1])\n faillog.write(']]]\\n\\n')\n if last_start_re.match(line):\n last_start_lineno = lineno + 1\n faillog.close()\n elif self.faillogfile and os.path.exists(self.faillogfile):\n print(\"WARNING: no failures, but '%s' exists from a previous run.\"\n % self.faillogfile)\n\n # Summary.\n if failed or xpassed or failed_list:\n print(\"SUMMARY: Some tests failed.\\n\")\n else:\n print(\"SUMMARY: All tests successful.\\n\")\n\n self._close_log()\n return failed\n\n def _open_log(self, mode):\n 'Open the log file with the required MODE.'\n if self.logfile:\n self._close_log()\n self.log = codecs.open(self.logfile, mode, encoding=\"latin-1\")\n\n def _close_log(self):\n 'Close the log file.'\n if not self.log is None:\n self.log.close()\n self.log = None\n\n def _process_test_output_line(self, line):\n if sys.platform == 'win32':\n # Remove CRs inserted because we parse the output as binary.\n line = line.replace('\\r', '')\n\n # If using --log-to-stdout self.log in None.\n if self.log:\n self.log.write(line)\n\n if line.startswith('PASS') or line.startswith('FAIL') \\\n or line.startswith('XFAIL') or line.startswith('XPASS') \\\n or line.startswith('SKIP'):\n return 1\n\n return 0\n\n def _check_for_unknown_failure(self, log, progbase, test_failed):\n # We always return 1 for failed tests. Some other failure than 1\n # probably means the test didn't run at all and probably didn't\n # output any failure info. In that case, log a generic failure message.\n # ### Even if failure==1 it could be that the test didn't run at all.\n if test_failed and test_failed != 1:\n if self.log:\n log.write('FAIL: %s: Unknown test failure; see tests.log.\\n' % progbase)\n log.flush()\n else:\n log.write('FAIL: %s: Unknown test failure.\\n' % progbase)\n\n def _run_c_test(self, progabs, progdir, progbase, test_nums, dot_count):\n 'Run a c test, escaping parameters as required.'\n if self.opts.list_tests and self.opts.milestone_filter:\n print('WARNING: --milestone-filter option does not currently work with C tests')\n\n if not os.access(progbase, os.X_OK):\n print(\"\\nNot an executable file: \" + progbase)\n sys.exit(1)\n\n cmdline = self.c_test_cmdline[:]\n cmdline[0] = './' + progbase\n cmdline[1] = '--srcdir=%s' % os.path.join(self.srcdir, progdir)\n\n if test_nums:\n cmdline.extend(test_nums)\n total = len(test_nums)\n else:\n total_cmdline = [cmdline[0], '--list']\n prog = subprocess.Popen(total_cmdline, stdout=subprocess.PIPE)\n lines = prog.stdout.readlines()\n total = len(lines) - 2\n\n # This has to be class-scoped for use in the progress_func()\n self.dots_written = 0\n def progress_func(completed):\n if not self.log or self.dots_written >= dot_count:\n return\n dots = (completed * dot_count) // total\n if dots > dot_count:\n dots = dot_count\n dots_to_write = dots - self.dots_written\n os.write(sys.stdout.fileno(), b'.' * dots_to_write)\n self.dots_written = dots\n\n tests_completed = 0\n prog = subprocess.Popen(cmdline, stdout=subprocess.PIPE,\n stderr=self.log)\n line = prog.stdout.readline()\n while line:\n line = ensure_str(line)\n if self._process_test_output_line(line):\n tests_completed += 1\n progress_func(tests_completed)\n\n line = prog.stdout.readline()\n\n # If we didn't run any tests, still print out the dots\n if not tests_completed:\n os.write(sys.stdout.fileno(), b'.' * dot_count)\n\n prog.wait()\n return prog.returncode\n\n def _run_py_test(self, progabs, progdir, progbase, test_nums, dot_count):\n 'Run a python test, passing parameters as needed.'\n try:\n if sys.version_info < (3, 0):\n prog_mod = imp.load_module(progbase[:-3], open(progabs, 'r'), progabs,\n ('.py', 'U', imp.PY_SOURCE))\n else:\n prog_mod = imp.load_module(progbase[:-3],\n open(progabs, 'r', encoding=\"utf-8\"),\n progabs, ('.py', 'U', imp.PY_SOURCE))\n except:\n print(\"\\nError loading test (details in following traceback): \" + progbase)\n traceback.print_exc()\n sys.exit(1)\n\n # setup the output pipes\n if self.log:\n sys.stdout.flush()\n sys.stderr.flush()\n self.log.flush()\n old_stdout = os.dup(sys.stdout.fileno())\n old_stderr = os.dup(sys.stderr.fileno())\n os.dup2(self.log.fileno(), sys.stdout.fileno())\n os.dup2(self.log.fileno(), sys.stderr.fileno())\n\n # These have to be class-scoped for use in the progress_func()\n self.dots_written = 0\n self.progress_lock = threading.Lock()\n def progress_func(completed, total):\n \"\"\"Report test suite progress. Can be called from multiple threads\n in parallel mode.\"\"\"\n if not self.log:\n return\n dots = (completed * dot_count) // total\n if dots > dot_count:\n dots = dot_count\n self.progress_lock.acquire()\n if self.dots_written < dot_count:\n dots_to_write = dots - self.dots_written\n self.dots_written = dots\n os.write(old_stdout, b'.' * dots_to_write)\n self.progress_lock.release()\n\n serial_only = hasattr(prog_mod, 'serial_only') and prog_mod.serial_only\n\n # run the tests\n if self.opts.list_tests:\n prog_f = None\n else:\n prog_f = progress_func\n\n try:\n failed = svntest.main.execute_tests(prog_mod.test_list,\n serial_only=serial_only,\n test_name=progbase,\n progress_func=prog_f,\n test_selection=test_nums)\n except svntest.Failure:\n if self.log:\n os.write(old_stdout, b'.' * dot_count)\n failed = True\n\n # restore some values\n if self.log:\n sys.stdout.flush()\n sys.stderr.flush()\n os.dup2(old_stdout, sys.stdout.fileno())\n os.dup2(old_stderr, sys.stderr.fileno())\n os.close(old_stdout)\n os.close(old_stderr)\n\n return failed\n\n def _run_test(self, testcase, test_nr, total_tests):\n \"Run a single test. Return the test's exit code.\"\n\n if self.log:\n log = self.log\n else:\n log = sys.stdout\n\n progdir, progbase, test_nums = testcase\n if self.log:\n # Using write here because we don't want even a trailing space\n test_info = '[%s/%d] %s' % (str(test_nr + 1).zfill(len(str(total_tests))),\n total_tests, progbase)\n if self.opts.list_tests:\n sys.stdout.write('Listing tests in %s' % (test_info, ))\n else:\n sys.stdout.write('%s' % (test_info, ))\n sys.stdout.flush()\n else:\n # ### Hack for --log-to-stdout to work (but not print any dots).\n test_info = ''\n\n if self.opts.list_tests:\n log.write('LISTING: %s\\n' % progbase)\n else:\n log.write('START: %s\\n' % progbase)\n\n log.flush()\n\n start_time = datetime.now()\n\n progabs = os.path.abspath(os.path.join(self.srcdir, progdir, progbase))\n old_cwd = os.getcwd()\n line_length = _get_term_width()\n dots_needed = line_length \\\n - len(test_info) \\\n - len('success')\n try:\n os.chdir(progdir)\n if progbase[-3:] == '.py':\n testcase = self._run_py_test\n else:\n testcase = self._run_c_test\n failed = testcase(progabs, progdir, progbase, test_nums, dots_needed)\n except:\n os.chdir(old_cwd)\n raise\n else:\n os.chdir(old_cwd)\n\n self._check_for_unknown_failure(log, progbase, failed)\n\n if not self.opts.list_tests:\n # Log the elapsed time.\n elapsed_time = str(datetime.now() - start_time)\n log.write('END: %s\\n' % progbase)\n log.write('ELAPSED: %s %s\\n' % (progbase, elapsed_time))\n\n log.write('\\n')\n\n # If we are only listing the tests just add a newline, otherwise if\n # we printed a \"Running all tests in ...\" line, add the test result.\n if self.log:\n if self.opts.list_tests:\n print()\n else:\n if failed:\n print(TextColors.FAILURE + 'FAILURE' + TextColors.ENDC)\n else:\n print(TextColors.SUCCESS + 'success' + TextColors.ENDC)\n\n return failed\n\n\ndef create_parser():\n def set_log_level(option, opt, value, parser, level=None):\n if level is None:\n level = value\n parser.values.set_log_level = getattr(logging, level, None) or int(level)\n\n parser = optparse.OptionParser(usage=__doc__);\n\n parser.add_option('-l', '--list', action='store_true', dest='list_tests',\n help='Print test doc strings instead of running them')\n parser.add_option('-v', '--verbose', action='callback',\n callback=set_log_level, callback_args=(logging.DEBUG, ),\n help='Print binary command-lines')\n parser.add_option('-c', '--cleanup', action='store_true',\n help='Clean up after successful tests')\n parser.add_option('-p', '--parallel', action='store', type='int',\n help='Run the tests in parallel')\n parser.add_option('-u', '--url', action='store',\n help='Base url to the repos (e.g. svn://localhost)')\n parser.add_option('-f', '--fs-type', action='store',\n help='Subversion file system type (fsfs(-v[46]), bdb or fsx)')\n parser.add_option('-g', '--global-scheduler', action='store_true',\n help='Run tests from all scripts together')\n parser.add_option('--http-library', action='store',\n help=\"Make svn use this DAV library (neon or serf)\")\n parser.add_option('--bin', action='store', dest='svn_bin',\n help='Use the svn binaries installed in this path')\n parser.add_option('--fsfs-sharding', action='store', type='int',\n help='Default shard size (for fsfs)')\n parser.add_option('--fsfs-packing', action='store_true',\n help=\"Run 'svnadmin pack' automatically\")\n parser.add_option('--server-minor-version', type='int', action='store',\n help=\"Set the minor version for the server\")\n parser.add_option('--skip-c-tests', '--skip-C-tests', action='store_true',\n help=\"Run only the Python tests\")\n parser.add_option('--dump-load-cross-check', action='store_true',\n help=\"After every test, run a series of dump and load \" +\n \"tests with svnadmin, svnrdump and svndumpfilter \" +\n \" on the testcase repositories to cross-check \" +\n \" dump file compatibility.\")\n parser.add_option('--enable-sasl', action='store_true',\n help='Whether to enable SASL authentication')\n parser.add_option('--config-file', action='store',\n help=\"Configuration file for tests.\")\n parser.add_option('--log-to-stdout', action='store_true',\n help='Print test progress to stdout instead of a log file')\n parser.add_option('--milestone-filter', action='store', dest='milestone_filter',\n help='Limit --list to those with target milestone specified')\n parser.add_option('--mode-filter', action='store', dest='mode_filter',\n default='ALL',\n help='Limit tests to those with type specified (e.g. XFAIL)')\n parser.add_option('--set-log-level', action='callback', type='str',\n callback=set_log_level,\n help=\"Set log level (numerically or symbolically). \" +\n \"Symbolic levels are: CRITICAL, ERROR, WARNING, \" +\n \"INFO, DEBUG\")\n parser.add_option('--ssl-cert', action='store',\n help='Path to SSL server certificate.')\n parser.add_option('--http-proxy', action='store',\n help='Use the HTTP Proxy at hostname:port.')\n parser.add_option('--http-proxy-username', action='store',\n help='Username for the HTTP Proxy.')\n parser.add_option('--http-proxy-password', action='store',\n help='Password for the HTTP Proxy.')\n parser.add_option('--httpd-version', action='store',\n help='Assume HTTPD is this version.')\n parser.add_option('--httpd-whitelist', action='store',\n help='Assume HTTPD whitelist is this version.')\n parser.add_option('--exclusive-wc-locks', action='store_true',\n help='Use sqlite exclusive locking for working copies')\n parser.add_option('--memcached-server', action='store',\n help='Use memcached server at specified URL (FSFS only)')\n parser.add_option('--fsfs-compression', action='store', type='str',\n help='Set compression type (for fsfs)')\n parser.add_option('--fsfs-dir-deltification', action='store', type='str',\n help='Set directory deltification option (for fsfs)')\n\n parser.set_defaults(set_log_level=None)\n return parser\n\ndef main():\n (opts, args) = create_parser().parse_args(sys.argv[1:])\n\n if len(args) < 3:\n print(\"{}: at least three positional arguments required; got {!r}\".format(\n os.path.basename(sys.argv[0]), args\n ))\n sys.exit(2)\n\n if opts.log_to_stdout:\n logfile = None\n faillogfile = None\n else:\n logfile = os.path.abspath('tests.log')\n faillogfile = os.path.abspath('fails.log')\n\n th = TestHarness(args[0], args[1], logfile, faillogfile, opts)\n failed = th.run(args[2:])\n if failed:\n sys.exit(1)\n\n\n# Run main if not imported as a module\nif __name__ == '__main__':\n main()\n", "id": "3540911", "language": "Python", "matching_score": 3.4934186935424805, "max_stars_count": 0, "path": "build/run_tests.py" }, { "content": "#!/usr/bin/env python\n# python: coding=utf-8\n#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n#\n\n\n# About this script:\n# This script is intended to automate steps in creating a new Subversion\n# minor release.\n\nimport os\nimport re\nimport sys\nimport logging\nimport subprocess\nimport argparse # standard in Python 2.7\n\nfrom release import Version\n\n\n# Some constants\nrepos = 'https://svn.apache.org/repos/asf/subversion'\nsecure_repos = 'https://svn.apache.org/repos/asf/subversion'\nbuildbot_repos = 'https://svn.apache.org/repos/infra/infrastructure/buildbot/aegis/buildmaster'\n\n# Parameters\ndry_run = False\n\n# Local working copies\nbase_dir = None # set by main()\n\ndef get_trunk_wc_path(path=None):\n trunk_wc_path = os.path.join(base_dir, 'svn-trunk')\n if path is None: return trunk_wc_path\n return os.path.join(trunk_wc_path, path)\ndef get_branch_wc_path(ver, path=None):\n branch_wc_path = os.path.join(base_dir, ver.branch + '.x')\n if path is None: return branch_wc_path\n return os.path.join(branch_wc_path, path)\ndef get_buildbot_wc_path(path=None):\n buildbot_wc_path = os.path.join(base_dir, 'svn-buildmaster')\n if path is None: return buildbot_wc_path\n return os.path.join(buildbot_wc_path, path)\n\ndef get_trunk_url():\n return secure_repos + '/trunk'\ndef get_branch_url(ver):\n return secure_repos + '/branches/' + ver.branch + '.x'\ndef get_tag_url(ver):\n return secure_repos + '/tags/' + ver.base\ndef get_buildbot_url():\n return buildbot_repos\n\n#----------------------------------------------------------------------\n# Utility functions\n\ndef run(cmd, dry_run=False):\n print('+ ' + ' '.join(cmd))\n if not dry_run:\n stdout = subprocess.check_output(cmd)\n print(stdout)\n else:\n print(' ## dry-run; not executed')\n\ndef run_svn(cmd, dry_run=False):\n run(['svn'] + cmd, dry_run)\n\ndef svn_commit(cmd):\n run_svn(['commit'] + cmd, dry_run=dry_run)\n\ndef svn_copy_branch(src, dst, message):\n args = ['copy', src, dst, '-m', message]\n run_svn(args, dry_run=dry_run)\n\ndef svn_checkout(url, wc, *args):\n args = ['checkout', url, wc] + list(args)\n run_svn(args)\n\n#----------------------------------------------------------------------\ndef edit_file(path, pattern, replacement):\n print(\"Editing '%s'\" % (path,))\n print(\" pattern='%s'\" % (pattern,))\n print(\" replace='%s'\" % (replacement,))\n old_text = open(path, 'r').read()\n new_text = re.sub(pattern, replacement, old_text)\n assert new_text != old_text\n open(path, 'w').write(new_text)\n\ndef prepend_file(path, text):\n print(\"Prepending to '%s'\" % (path,))\n print(\" text='%s'\" % (text,))\n original = open(path, 'r').read()\n open(path, 'w').write(text + original)\n\n#----------------------------------------------------------------------\ndef make_release_branch(ver, revnum):\n svn_copy_branch(get_trunk_url() + '@' + (str(revnum) if revnum else ''),\n get_branch_url(ver),\n 'Create the ' + ver.branch + '.x release branch.')\n\n#----------------------------------------------------------------------\ndef update_minor_ver_in_trunk(ver, revnum):\n \"\"\"Change the minor version in trunk to the next (future) minor version.\n \"\"\"\n trunk_wc = get_trunk_wc_path()\n trunk_url = get_trunk_url()\n svn_checkout(trunk_url + '@' + (str(revnum) if revnum else ''),\n trunk_wc)\n\n prev_ver = Version('1.%d.0' % (ver.minor - 1,))\n next_ver = Version('1.%d.0' % (ver.minor + 1,))\n relpaths = []\n\n relpath = 'subversion/include/svn_version.h'\n relpaths.append(relpath)\n edit_file(get_trunk_wc_path(relpath),\n r'(#define SVN_VER_MINOR *)%s' % (ver.minor,),\n r'\\g<1>%s' % (next_ver.minor,))\n\n relpath = 'subversion/tests/cmdline/svntest/main.py'\n relpaths.append(relpath)\n edit_file(get_trunk_wc_path(relpath),\n r'(SVN_VER_MINOR = )%s' % (ver.minor,),\n r'\\g<1>%s' % (next_ver.minor,))\n\n relpath = 'subversion/bindings/javahl/src/org/apache/subversion/javahl/NativeResources.java'\n relpaths.append(relpath)\n try:\n # since r1817921 (just after branching 1.10)\n edit_file(get_trunk_wc_path(relpath),\n r'SVN_VER_MINOR = %s;' % (ver.minor,),\n r'SVN_VER_MINOR = %s;' % (next_ver.minor,))\n except:\n # before r1817921: two separate places\n edit_file(get_trunk_wc_path(relpath),\n r'version.isAtLeast\\(1, %s, 0\\)' % (ver.minor,),\n r'version.isAtLeast\\(1, %s, 0\\)' % (next_ver.minor,))\n edit_file(get_trunk_wc_path(relpath),\n r'1.%s.0, but' % (ver.minor,),\n r'1.%s.0, but' % (next_ver.minor,))\n\n relpath = 'CHANGES'\n relpaths.append(relpath)\n # insert at beginning of CHANGES file\n prepend_file(get_trunk_wc_path(relpath),\n 'Version ' + next_ver.base + '\\n'\n + '(?? ??? 20XX, from /branches/' + next_ver.branch + '.x)\\n'\n + get_tag_url(next_ver) + '\\n'\n + '\\n')\n\n log_msg = '''\\\nIncrement the trunk version number to %s, and introduce a new CHANGES\nsection, following the creation of the %s.x release branch.\n\n* subversion/include/svn_version.h,\n subversion/bindings/javahl/src/org/apache/subversion/javahl/NativeResources.java,\n subversion/tests/cmdline/svntest/main.py\n (SVN_VER_MINOR): Increment to %s.\n\n* CHANGES: New section for %s.0.\n''' % (next_ver.branch, ver.branch, next_ver.minor, next_ver.branch)\n commit_paths = [get_trunk_wc_path(p) for p in relpaths]\n svn_commit(commit_paths + ['-m', log_msg])\n\n#----------------------------------------------------------------------\ndef create_status_file_on_branch(ver):\n branch_wc = get_branch_wc_path(ver)\n branch_url = get_branch_url(ver)\n svn_checkout(branch_url, branch_wc, '--depth=immediates')\n\n status_local_path = os.path.join(branch_wc, 'STATUS')\n text='''\\\n * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n * *\n * THIS RELEASE STREAM IS OPEN FOR STABILIZATION. *\n * *\n * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\nThis file tracks the status of releases in the %s.x line.\n\nSee http://subversion.apache.org/docs/community-guide/releasing.html#release-stabilization\nfor details on how release lines and voting work, what kinds of bugs can\ndelay a release, etc.\n\nStatus of %s:\n\nCandidate changes:\n==================\n\n\nVeto-blocked changes:\n=====================\n\n\nApproved changes:\n=================\n''' % (ver.branch, ver.base)\n open(status_local_path, 'wx').write(text)\n run_svn(['add', status_local_path])\n svn_commit([status_local_path,\n '-m', '* branches/' + ver.branch + '.x/STATUS: New file.'])\n\n#----------------------------------------------------------------------\ndef update_backport_bot(ver):\n print(\"\"\"MANUAL STEP: Fork & edit & pull-request on GitHub:\nhttps://github.com/apache/infrastructure-puppet/blob/deployment/modules/svnqavm_pvm_asf/manifests/init.pp\n\"Add new %s.x branch to list of backport branches\"\n\"\"\" % (ver.branch,))\n print(\"\"\"Someone needs to run the 'svn checkout' manually.\nThe exact checkout command is documented in machines/svn-qavm2/notes.txt\nin the private repository (need to use a trunk client and the svn-master.a.o\nhostname).\n\"\"\")\n\n#----------------------------------------------------------------------\ndef update_buildbot_config(ver):\n \"\"\"Add the new branch to the list of branches monitored by the buildbot\n master.\n \"\"\"\n buildbot_wc = get_buildbot_wc_path()\n buildbot_url = get_buildbot_url()\n svn_checkout(buildbot_url, buildbot_wc)\n\n prev_ver = Version('1.%d.0' % (ver.minor - 1,))\n next_ver = Version('1.%d.0' % (ver.minor + 1,))\n\n relpath = 'master1/projects/subversion.conf'\n edit_file(get_buildbot_wc_path(relpath),\n r'(MINOR_LINES=\\[.*%s)(\\])' % (prev_ver.minor,),\n r'\\1, %s\\2' % (ver.minor,))\n\n log_msg = '''\\\nSubversion: start monitoring the %s branch.\n''' % (ver.branch)\n commit_paths = [get_buildbot_wc_path(relpath)]\n svn_commit(commit_paths + ['-m', log_msg])\n\n#----------------------------------------------------------------------\ndef create_release_branch(args):\n make_release_branch(args.version, args.revnum)\n update_minor_ver_in_trunk(args.version, args.revnum)\n create_status_file_on_branch(args.version)\n update_backport_bot(args.version)\n update_buildbot_config(args.version)\n\n\n#----------------------------------------------------------------------\n# Main entry point for argument parsing and handling\n\ndef main():\n 'Parse arguments, and drive the appropriate subcommand.'\n\n # Setup our main parser\n parser = argparse.ArgumentParser(\n description='Create an Apache Subversion release branch.')\n subparsers = parser.add_subparsers(title='subcommands')\n\n # Setup the parser for the create-release-branch subcommand\n subparser = subparsers.add_parser('create-release-branch',\n help='''Create a minor release branch: branch from trunk,\n update version numbers on trunk, create status\n file on branch, update backport bot,\n update buildbot config.''')\n subparser.set_defaults(func=create_release_branch)\n subparser.add_argument('version', type=Version,\n help='''A version number to indicate the branch, such as\n '1.7.0' (the '.0' is required).''')\n subparser.add_argument('revnum', type=lambda arg: int(arg.lstrip('r')),\n nargs='?', default=None,\n help='''The trunk revision number to base the branch on.\n Default is HEAD.''')\n subparser.add_argument('--dry-run', action='store_true', default=False,\n help='Avoid committing any changes to repositories.')\n subparser.add_argument('--verbose', action='store_true', default=False,\n help='Increase output verbosity')\n subparser.add_argument('--base-dir', default=os.getcwd(),\n help='''The directory in which to create needed files and\n folders. The default is the current working\n directory.''')\n\n # Parse the arguments\n args = parser.parse_args()\n\n global base_dir, dry_run\n base_dir = args.base_dir\n dry_run = args.dry_run\n\n # Set up logging\n logger = logging.getLogger()\n if args.verbose:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n\n # Make timestamps in tarballs independent of local timezone\n os.environ['TZ'] = 'UTC'\n\n # finally, run the subcommand, and give it the parsed arguments\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n", "id": "11400857", "language": "Python", "matching_score": 1.6725306510925293, "max_stars_count": 3, "path": "tools/dist/create-minor-release-branch.py" }, { "content": "#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n# coding:utf-8\nimport os\nimport traceback\nimport uuid\nfrom csv import DictReader, DictWriter\n\nfrom werkzeug.datastructures import ImmutableMultiDict\nfrom werkzeug.utils import secure_filename\n\nfrom mlpm.app import aidserver\nfrom mlpm.response import json_resp\nfrom mlpm.utility import str2bool\n\n\nasync def handle_post_solver_train_or_infer(request, upload_folder,\n request_type, target_folder):\n config = ImmutableMultiDict(await request.form)\n data = config.to_dict()\n results = {}\n req_files = await request.files\n if 'file' in req_files:\n uploaded_file = req_files['file']\n filename = secure_filename(uploaded_file.filename)\n print(filename)\n # make sure the UPLOAD_FOLDER exsits\n if not os.path.isdir(upload_folder):\n os.makedirs(upload_folder)\n file_abs_path = os.path.join(upload_folder, filename)\n await uploaded_file.save(file_abs_path)\n data['input_file_path'] = file_abs_path\n try:\n if request_type == \"infer\":\n results = aidserver.solver.infer(data)\n else:\n raise NotImplementedError\n if 'delete_after_process' in data:\n if str2bool(data['delete_after_process']):\n os.remove(file_abs_path)\n print(results)\n return json_resp(results, status=200)\n except Exception as e:\n traceback.print_exc()\n return json_resp({\"error\": str(e), \"code\": \"500\"}, status=500)\n\n\nasync def handle_batch_infer_request(request, upload_folder, target_folder):\n req_files = await request.files\n if 'file' in req_files:\n uploaded_file = req_files['file']\n filename = secure_filename(uploaded_file.filename)\n if not os.path.isdir(upload_folder):\n os.makedirs(upload_folder)\n file_abs_path = os.path.join(upload_folder, filename)\n uploaded_file.save(file_abs_path)\n try:\n with open(file_abs_path, 'r') as file_obj:\n csv_dict = list(DictReader(file_obj))\n results = [aidserver.solver.infer(row) for row in csv_dict]\n # merging results\n output = []\n for index, each in enumerate(csv_dict):\n each.update(results[index])\n output.append(each)\n if not os.path.isdir(target_folder):\n os.makedirs(target_folder)\n file_identifier = str(uuid.uuid4())\n output_file_path = os.path.join(target_folder,\n file_identifier + \".csv\")\n head = output[0].keys()\n with open(output_file_path, 'w') as file_obj:\n writer = DictWriter(file_obj, fieldnames=head)\n writer.writeheader()\n for each in output:\n writer.writerow(each)\n return json_resp({'filename': file_identifier + \".csv\"}, status=200)\n except Exception as e:\n traceback.print_exc()\n return json_resp({\"error\": str(e), \"code\": \"500\"}, status=500)\n", "id": "2896971", "language": "Python", "matching_score": 2.1092159748077393, "max_stars_count": 207, "path": "components/mlserve/mlpm/handler.py" }, { "content": "# \n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n# coding:utf-8\nfrom mlpm.solver import Solver\nfrom mlpm.metrics import Metrics\nfrom mlpm.server import aidserver\n\nclass ExampleSolver(Solver):\n def __init__(self,toml_file=None):\n super().__init__(toml_file)\n self.predictor = ''\n self.acc = Metrics()\n\n def infer(self, data):\n print(data)\n return {'data': [0, 1, 2, 3]}\n\n def train(self, data):\n epochs = int(data['epochs'])\n for i in range(epochs):\n print(i)\n print(data)\n return data\n\nsolver = ExampleSolver()\naidserver.solver = solver\n", "id": "9758882", "language": "Python", "matching_score": 2.0170812606811523, "max_stars_count": 207, "path": "components/mlserve/runner_example.py" }, { "content": "# Copyright (c) 2020 <NAME> & AICAMP.CO.,LTD\n#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\n\n# coding:utf-8\nclass Metrics(object):\n def __init__(self):\n self.data = []\n\n def add(self, value):\n # value is assumed to be a json object, rather than a plain text\n # e.g. {step: 0, acc:1}\n self.data.append(value)\n\n @property\n def output(self):\n return {}\n", "id": "6354816", "language": "Python", "matching_score": 0.24214811623096466, "max_stars_count": 207, "path": "components/mlserve/mlpm/metrics.py" }, { "content": "import base64 \n\ndef encode(b64_string):\n try:\n return base64.b64decode(b64_string).decode('ascii')\n except Exception as e:\n # print(e)\n b64_string += \"=\" * ((4 - len(b64_string) % 4) % 4)\n return base64.b64decode(b64_string).decode('ascii')\n\ndef floatStr(s):\n try:\n return float(s)\n except:\n return 0\n\ndef encodeQuery(data):\n res = {}\n res['version'] = encode(data[0])\n res['uptime'] = floatStr(encode(data[1]))\n res['sessions'] = encode(data[2])\n res['processes'] = encode(data[3])\n res['processes_array'] = encode(data[4])\n res['file_handles'] = encode(data[5])\n res['file_handles_limit'] = encode(data[6])\n res['os_kernel'] = encode(data[7])\n res['os_name'] = encode(data[8])\n res['os_arch'] = encode(data[9])\n res['cpu_name'] = encode(data[10])\n res['cpu_cores'] = encode(data[11])\n res['cpu_freq'] = encode(data[12])\n res['ram_total'] = floatStr(encode(data[13]))\n res['ram_usage'] = floatStr(encode(data[14]))\n res['swap_total'] = floatStr(encode(data[15]))\n res['swap_usage'] = floatStr(encode(data[16]))\n res['disk_array'] = encode(data[17])\n res['disk_total'] = floatStr(encode(data[18]))\n res['disk_usage'] = floatStr(encode(data[19]))\n res['connections'] = encode(data[20])\n res['nic'] = encode(data[21])\n res['ipv_4'] = encode(data[22])\n res['ipv_6'] = encode(data[23])\n res['rx'] = floatStr(encode(data[24]))\n res['tx'] = floatStr(encode(data[25]))\n res['rx_gap'] = floatStr(encode(data[26]))\n res['tx_gap'] = floatStr(encode(data[27]))\n res['loads'] = encode(data[28])\n res['load_system'] = round(floatStr(res['loads'].split(\" \")[1])*100, 2)\n res['load_cpu'] = floatStr(encode(data[29]))\n res['load_io'] = floatStr(encode(data[30]))\n res['ping_eu'] = floatStr(encode(data[31]))\n res['ping_us'] = floatStr(encode(data[32]))\n res['ping_as'] = floatStr(encode(data[33]))\n\n return res", "id": "8355009", "language": "Python", "matching_score": 2.020110845565796, "max_stars_count": 81, "path": "libs/encode.py" }, { "content": "\nfrom rest_framework.decorators import api_view, renderer_classes \nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.renderers import JSONRenderer\nfrom django.core.mail import send_mail\n\nfrom urllib import parse\nfrom bson import ObjectId\n\nfrom libs.encode import encodeQuery\n\nfrom servers.models import Hosts\nfrom libs.mg.query import mgdb\n\nfrom datetime import datetime, timedelta\nimport calendar\nfrom servers.tasks import delete_records_by_host_task\n\n\ndef get_range_time():\n now = datetime.now()\n hours_ago = [(datetime.now() - timedelta(hours = 1)), now]\n days_ago = [(datetime.now() - timedelta(hours = 24)), now]\n month_ago = [(datetime.now() - timedelta(days=calendar.monthrange(datetime.now().year,datetime.now().month)[1])), now]\n year_ago = [(datetime.now() - timedelta(days = 365)), now]\n\n return {\n 'hours_ago':hours_ago,\n 'month_ago':month_ago,\n 'days_ago':days_ago,\n 'year_ago':year_ago,\n }\n\ndef recordsqeury(host_id):\n time_zone = get_range_time()\n # mdb = mongo_db()\n try:\n db = mgdb()\n collection = db['nqmonitor']['records']\n hours = {\n \"where\":{'host_id': host_id, \"updated_at\":{\"$gt\":time_zone['hours_ago'][0],\"$lt\":time_zone['hours_ago'][1] } }, \n \"fields\":{\n 'host_id':1,'rx':1, 'tx':1, 'rx_gap':1, 'tx_gap':1, '_id':0, 'updated_at':1, 'ping_us':1, 'ping_eu':1, 'ping_as':1,\n 'ram_usage':1, 'disk_usage':1,'loads':1,'load_cpu':1, 'load_io':1, 'swap_usage':1\n }\n }\n days =[\n { \n \"$match\":{\n \"host_id\":host_id,\n \"updated_at\":{\"$gt\":time_zone['days_ago'][0],\"$lt\":time_zone['days_ago'][1] }\n }\n },\n {\n \"$addFields\": {\n \"created_date\": { \"$dateToParts\": { \"date\": { \"$toDate\": { \"$toLong\": \"$updated_at\" } } } },\n \"system_load\": {\"$arrayElemAt\":[ { \"$split\": [\"$loads\" , \" \"]}, 1] } \n }\n }, \n { \"$group\": {\n \"_id\": {\n \"customerId\": \"$host_id\",\n \"hour\": \"$created_date.hour\",\n \"day\": \"$created_date.day\",\n \"month\": \"$created_date.month\",\n \"year\": \"$created_date.year\"\n },\n \"rx_gap\": { \"$avg\": \"$rx_gap\" },\n \"tx_gap\": { \"$avg\": \"$tx_gap\" },\n \"rx\": { \"$avg\": \"$rx\" },\n \"tx\": { \"$avg\": \"$tx\" },\n \"ram_usage\": { \"$avg\": \"$ram_usage\" },\n \"swap_usage\": { \"$avg\": \"$swap_usage\" },\n \"disk_usage\": { \"$avg\": \"$disk_usage\" },\n \"load_io\": { \"$avg\": \"$load_io\" },\n \"load_cpu\": { \"$avg\": \"$load_cpu\" },\n \"ping_eu\": { \"$avg\": \"$ping_eu\" },\n \"ping_us\": { \"$avg\": \"$ping_us\" },\n \"ping_as\": { \"$avg\": \"$ping_as\" },\n \"system_load\": {\"$avg\": {\"$toDouble\":\"$system_load\"} }\n }},\n { \"$group\": {\n \"_id\": {\n \"customerId\": \"$_id.host_id\", \n },\n \"hours\": { \n \"$push\": { \n \"day\": \"$_id.day\",\n \"hour\": \"$_id.hour\",\n \"updated_at\": { \"$concat\": [ { \"$toString\": \"$_id.year\" } , \"-\", { \"$toString\": \"$_id.month\" }, \"-\", { \"$toString\": \"$_id.day\" }, \" \", \n { \"$toString\": \"$_id.hour\" }, \":00:00\" ] },\n \"rx_gap\": \"$rx_gap\",\n \"tx_gap\": \"$tx_gap\",\n \"rx\": \"$rx\",\n \"tx\": \"$tx\",\n \"ram_usage\": \"$ram_usage\",\n \"swap_usage\": \"$swap_usage\",\n \"disk_usage\": \"$disk_usage\",\n \"load_io\": \"$load_io\",\n \"load_cpu\": \"$load_cpu\",\n \"ping_eu\": \"$ping_eu\",\n \"ping_us\": \"$ping_us\",\n \"ping_as\": \"$ping_as\",\n \"system_load\":\"$system_load\"\n }\n }\n }}, \n ]\n months = [\n { \n \"$match\":{\n \"host_id\":host_id,\n \"updated_at\":{\"$gt\":time_zone['month_ago'][0],\"$lt\":time_zone['month_ago'][1] }\n }\n },\n {\n \"$addFields\": {\n \"created_date\": { \"$dateToParts\": { \"date\": { \"$toDate\": { \"$toLong\": \"$updated_at\" } } } },\n \"system_load\": {\"$arrayElemAt\":[ { \"$split\": [\"$loads\" , \" \"]}, 1] } \n }\n }, \n { \"$group\": {\n \"_id\": {\n \"customerId\": \"$host_id\", \n \"day\": \"$created_date.day\",\n \"month\": \"$created_date.month\",\n \"year\": \"$created_date.year\"\n },\n \"rx_gap\": { \"$avg\":{\"$toDouble\":\"$rx_gap\"} },\n \"tx_gap\": { \"$avg\": \"$tx_gap\" },\n \"rx\": { \"$avg\": \"$rx\" },\n \"tx\": { \"$avg\": \"$tx\" },\n \"ram_usage\": { \"$avg\": \"$ram_usage\" },\n \"swap_usage\": { \"$avg\": \"$swap_usage\" },\n \"disk_usage\": { \"$avg\": \"$disk_usage\" },\n \"load_io\": { \"$avg\": \"$load_io\" },\n \"load_cpu\": { \"$avg\": \"$load_cpu\" },\n \"ping_eu\": { \"$avg\": \"$ping_eu\" },\n \"ping_us\": { \"$avg\": \"$ping_us\" },\n \"ping_as\": { \"$avg\": \"$ping_as\" },\n \"system_load\": {\"$avg\": {\"$toDouble\":\"$system_load\"} }\n }},\n { \"$group\": {\n \"_id\": {\n \"customerId\": \"$_id.host_id\", \n },\n \"months\": { \n \"$push\": { \n \"day\": \"$_id.day\",\n \"month\": \"$_id.month\",\n \"updated_at\": { \"$concat\": [ { \"$toString\": \"$_id.year\" } , \"-\", { \"$toString\": \"$_id.month\" }, \"-\", { \"$toString\": \"$_id.day\" } ] },\n \"rx_gap\": \"$rx_gap\",\n \"tx_gap\": \"$tx_gap\",\n \"rx\": \"$rx\",\n \"tx\": \"$tx\",\n \"ram_usage\": \"$ram_usage\",\n \"swap_usage\": \"$swap_usage\",\n \"disk_usage\": \"$disk_usage\",\n \"load_io\": \"$load_io\",\n \"load_cpu\": \"$load_cpu\",\n \"ping_eu\": \"$ping_eu\",\n \"ping_us\": \"$ping_us\",\n \"ping_as\": \"$ping_as\",\n \"system_load\": \"$system_load\",\n }\n }\n }}, \n ]\n hours_data = collection.find(hours['where'], hours['fields'])\n days_data = list(collection.aggregate(days))\n months_data = list(collection.aggregate(months))\n \n hours_res = []\n for i in hours_data:\n hours_res.append(i)\n \n days_res = days_data[0]['hours']\n months_res = months_data[0]['months'] \n return {\n 'hours':{\"count\":len(hours_res), \"results\":hours_res},\n 'days':{\"count\":len(days_res), \"results\":days_res},\n 'months':{\"count\":len(months_res), \"results\":months_res},\n }\n db.close()\n except:\n return {}\n db.close()\n\n@api_view(['GET', 'POST'])\n@renderer_classes([JSONRenderer])\ndef get_host_records(request, host_id):\n user = request.user\n if user.is_superuser:\n host = Hosts.objects.filter(id=host_id)\n else:\n host = Hosts.objects.filter(id=host_id, user_id=user.id)\n if not host:\n return Response('error :host not found or is not yours!',status=status.HTTP_404_NOT_FOUND)\n try:\n res = recordsqeury(host_id) \n return Response({'host':host[0].to_dict(), 'records':res})\n except Exception as e:\n print(e)\n return Response('error :please contact admin!',status=status.HTTP_404_NOT_FOUND)\n\n\n\n@api_view(['DELETE'])\n@renderer_classes([JSONRenderer])\ndef delete_records_by_host(request, host_id):\n res = delete_records_by_host_task(host_id)\n return Response('delete task is running')\n\n\n", "id": "6216413", "language": "Python", "matching_score": 2.8371708393096924, "max_stars_count": 81, "path": "api/records.py" }, { "content": "\nfrom rest_framework.decorators import api_view, renderer_classes, permission_classes, throttle_classes\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.throttling import UserRateThrottle, AnonRateThrottle\nfrom users.models import Accounts\nfrom django.db.models import Q\n\n@api_view(['POST'])\n@renderer_classes([JSONRenderer])\n@permission_classes([AllowAny])\n@throttle_classes([AnonRateThrottle, UserRateThrottle])\ndef sigup(request):\n data = request.data\n username = data['username'] if data['username'] else None\n password = data['password'] if data['password'] else None\n email = data['email'] if data['email'] else None\n print(username, email, password)\n # return Response({'msg':'ok' })\n if username and email:\n try:\n user_count = Accounts.objects.filter(username=username).count()\n email_count = Accounts.objects.filter(email=email).count()\n if user_count >= 1:\n return Response({'msg':'用户名已经存在.',}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n elif email_count >= 1:\n return Response({'msg':'邮箱已经存在.',}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n user = Accounts(username=username, email=email)\n user.set_password(password)\n user.save()\n return Response({'msg':'ok' })\n except Exception as e:\n print(e)\n return Response({'msg':'error'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n return Response({'msg':'username and email required'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n", "id": "6696059", "language": "Python", "matching_score": 2.67339825630188, "max_stars_count": 81, "path": "api/user.py" }, { "content": "\nimport json\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login as auth_login,logout as auth_logout\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\n\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom rest_framework.authtoken.views import ObtainAuthToken\n \n@csrf_exempt\ndef sigup(request):\n if request.user.is_authenticated:\n return HttpResponseRedirect('/dashboard/')\n return render(request, 'sigup.html')\n\n@csrf_exempt\ndef login(request):\n if request.method == 'GET':\n if request.user.is_authenticated:\n return HttpResponseRedirect('/dashboard/')\n return render(request, 'login.html')\n elif request.method == 'POST':\n username = request.POST.get('username', None)\n password = request.POST.get('password', None)\n user = authenticate(username=username,password=password)\n print(user)\n if user and user is not None:\n try:\n auth_login(request, user)\n return HttpResponseRedirect('/dashboard')\n except Exception as e:\n print(e)\n return render(request, 'login.html')\n\n\n@login_required\ndef logout(request):\n auth_logout(request)\n return HttpResponseRedirect('/login')\n\n\n\nclass AuthToken(ObtainAuthToken):\n \n def post(self, request, *args, **kwargs):\n try:\n serializer = self.serializer_class(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({\n 'token': token.key,\n 'uid': user.pk, \n 'username': user.username,\n 'user':user.to_dict\n })\n except Exception as e:\n print(e)\n return Response({'msg':'error'}, status=401)\n\n\n# @api_view\n# def refreshToken(request):\n# if request.method == 'POST':\n# username = request.user.username\n# try:\n# user = Accounts.objects.get(username=username)\n# token = user.refreshToken()\n# return Response({\n# 'token': token.key,\n# 'uuid': user.pk, \n# 'username': user.username, \n# })\n# except:\n# return HttpResponse(json.dumps({'msg':'user error'}), status=400, content_type=\"application/json\")\n# else:\n# msg = {'error':'post needed!'}\n# return HttpResponse(json.dumps(msg), status=400, content_type=\"application/json\")\n\n", "id": "7273856", "language": "Python", "matching_score": 2.299592971801758, "max_stars_count": 81, "path": "users/auth.py" }, { "content": "import requests\nfrom django.http import JsonResponse, HttpResponse\n\ndef get_ip_info(request):\n ip = request.GET.get('ip', None)\n if not ip:\n return JsonResponse({'msg':'ip needed'})\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36\"\n }\n url = 'http://ip-api.com/json/{ip}?lang=zh-CN'.format(ip=ip)\n try:\n r = requests.get(url, headers=headers)\n return HttpResponse(r.text)\n except Exception as e:\n return JsonResponse({'msg':'ok'})", "id": "998184", "language": "Python", "matching_score": 0.6989341378211975, "max_stars_count": 81, "path": "api/get_ip.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom .config import config\nimport requests\nfrom .wx_push import WechatPush\n\nwp = WechatPush()\nRET_OK = 0\nRET_ERROR = -1\n\n\nclass CheckConfig:\n\n def _check_template_id(self, openid):\n msg = {\n 'echo_type': '*** This is test ***',\n 'code': str(10000),\n 'price': str(101.1),\n 'total_deal_price': str(200),\n 'quantity': str(20000),\n 'time': str(123)\n }\n ret, msg = wp.send_template_msg(openid, msg)\n if ret != RET_OK:\n return ret, msg\n return RET_OK, msg\n\n def _check_test_nickname(self, test_nickname):\n nickname_openid = {}\n # nickname\n ret, all_user_openid = wp.get_user_openid_list()\n if ret != RET_OK:\n return ret, all_user_openid\n\n if all_user_openid:\n for user in all_user_openid:\n ret, nickname = wp.get_user_nickname(user)\n if ret != RET_OK:\n return ret, nickname\n nickname_openid.update({nickname: user})\n\n res_nickname_openid = {}\n for name in test_nickname:\n if name not in nickname_openid:\n return RET_ERROR, name + \", this nickname is wrong.\"\n res_nickname_openid.update({name: nickname_openid[name]})\n\n return RET_OK, res_nickname_openid\n\n def _send_test_msg_to_test_nickname(self):\n print(\"Trying to send test msg to your nickname...\")\n ret, nickname_openid = self._check_test_nickname(config.test_user_nickname)\n if ret != RET_OK:\n return ret, nickname_openid\n\n for name in nickname_openid:\n ret, msg = self._check_template_id(nickname_openid[name])\n if ret != RET_OK:\n return ret, msg\n return RET_OK, \"Send test template massage successfully.\"\n\n def _check_wechat(self):\n msg = ''\n if config.appid == '':\n msg += 'Wechat appid is null.'\n return RET_ERROR, msg\n if config.secrect == '':\n msg += 'Wechat secret is null.'\n return RET_ERROR, msg\n print('Wechat appid and secret checked.')\n ret, access_token = wp.get_access_token_from_wechat()\n if ret != RET_OK:\n return ret, access_token\n\n if config.test_user_nickname == '':\n return RET_ERROR, \"Please fill your nickname in config.py\"\n\n # ---- send test msg to tester.\n # else:\n # if len(config.test_user_list) == 0 and len():\n # return RET_ERROR, \"Please fill the test_user_list correctly.\"\n # else:\n # for test_openid in config.test_user_list:\n # ret, msg = self._check_template_id(test_openid)\n # if ret != RET_OK:\n # return ret, msg\n # print(\"Template_id is ready.\")\n\n return RET_OK, \"Connect wechat successfully.\"\n\n def _check_others(self):\n msg = ''\n if config.database == '':\n msg += 'Database name is null.\\n'\n\n if config.host == '':\n msg += 'Host is null.\\n'\n\n if config.port == '':\n msg += 'Port is null.\\n'\n\n if config.token == '':\n msg += 'Token is null.\\n'\n\n if config.template_id == '':\n msg += 'Template_id is null.\\n'\n\n if msg != '':\n return RET_ERROR, msg\n\n return RET_OK, \"Other parameter is ready.\"\n\n def check_all(self):\n print(\"+-------------------------------+\")\n ret, msg = self._check_wechat()\n if ret != RET_OK:\n return ret, msg\n print(msg)\n\n ret, msg = self._check_others()\n if ret != RET_OK:\n return ret, msg\n print(msg)\n print(\"Check config successfully.\")\n print(\"+-------------------------------+\")\n return RET_OK, \"\"\n\n\n# ---- test code\nif __name__ == '__main__':\n cc = CheckConfig()\n ret, msg = cc.check_all()\n if ret != RET_OK:\n print(ret, msg)\n\n\n", "id": "8822796", "language": "Python", "matching_score": 2.6334526538848877, "max_stars_count": 5, "path": "futuquant/examples/app/stock_alarm/check_config.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom futuquant import *\nfrom .data_acquisition import *\nfrom .config import config\nfrom .check_config import CheckConfig\nimport sys\n\ncc = CheckConfig()\nret, msg = cc.check_all()\nif ret != RET_OK:\n print(ret, msg)\n sys.exit(1)\n\nbig_sub_codes = ['HK.02318', 'HK.02828', 'HK.00939', 'HK.01093', 'HK.01299', 'HK.00175',\n 'HK.01299', 'HK.01833', 'HK.00005', 'HK.00883', 'HK.00388', 'HK.01398',\n 'HK.01114', 'HK.02800', 'HK.02018', 'HK.03988', 'HK.00386', 'HK.01211']\n\n\nret, msg = quote_test(big_sub_codes, config.host, config.port)\nif ret != RET_OK:\n print(ret, msg)\n sys.exit(1)\n", "id": "6971558", "language": "Python", "matching_score": 1.0956714153289795, "max_stars_count": 5, "path": "futuquant/examples/app/stock_alarm/main.py" }, { "content": "from django.apps import AppConfig\n\n\nclass HttpmntConfig(AppConfig):\n name = 'httpmnt'\n", "id": "1259775", "language": "Python", "matching_score": 0, "max_stars_count": 81, "path": "httpmnt/apps.py" }, { "content": "# https://www.jb51.net/article/142059.htm", "id": "4139586", "language": "Python", "matching_score": 0, "max_stars_count": 5, "path": "futuquant/examples/app/stock_alarm/receive_and_reply/__init__.py" }, { "content": "# Generated by Django 3.1.2 on 2021-01-06 14:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Hosts',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('user_id', models.IntegerField(blank=True, null=True)),\n ('code', models.CharField(max_length=180)),\n ('name', models.CharField(max_length=180)),\n ('version', models.CharField(blank=True, max_length=255, null=True)),\n ('uptime', models.FloatField(blank=True, max_length=255, null=True)),\n ('sessions', models.CharField(blank=True, max_length=255, null=True)),\n ('processes', models.CharField(blank=True, max_length=255, null=True)),\n ('processes_array', models.TextField(blank=True, null=True)),\n ('file_handles', models.CharField(blank=True, max_length=255, null=True)),\n ('file_handles_limit', models.CharField(blank=True, max_length=255, null=True)),\n ('os_kernel', models.CharField(blank=True, max_length=255, null=True)),\n ('os_name', models.CharField(blank=True, max_length=255, null=True)),\n ('os_arch', models.CharField(blank=True, max_length=255, null=True)),\n ('cpu_name', models.CharField(blank=True, max_length=255, null=True)),\n ('cpu_cores', models.CharField(blank=True, max_length=255, null=True)),\n ('cpu_freq', models.CharField(blank=True, max_length=255, null=True)),\n ('ram_total', models.FloatField(default=0, max_length=255, null=True)),\n ('ram_usage', models.FloatField(default=0, max_length=255, null=True)),\n ('swap_total', models.FloatField(default=0, max_length=255, null=True)),\n ('swap_usage', models.FloatField(default=0, max_length=255, null=True)),\n ('disk_array', models.TextField(blank=True, null=True)),\n ('disk_total', models.FloatField(default=0, max_length=255, null=True)),\n ('disk_usage', models.FloatField(default=0, max_length=255, null=True)),\n ('connections', models.CharField(blank=True, max_length=255, null=True)),\n ('nic', models.CharField(blank=True, max_length=255, null=True)),\n ('ipv_4', models.CharField(blank=True, max_length=255, null=True)),\n ('ipv_6', models.CharField(blank=True, max_length=255, null=True)),\n ('rx', models.FloatField(default=0, max_length=255, null=True)),\n ('tx', models.FloatField(default=0, max_length=255, null=True)),\n ('rx_gap', models.FloatField(default=0, max_length=255, null=True)),\n ('tx_gap', models.FloatField(default=0, max_length=255, null=True)),\n ('loads', models.CharField(default=0, max_length=255, null=True)),\n ('load_cpu', models.FloatField(blank=True, default=0, max_length=255, null=True)),\n ('load_io', models.FloatField(blank=True, default=0, max_length=255, null=True)),\n ('load_system', models.FloatField(blank=True, default=0, max_length=255, null=True)),\n ('ping_eu', models.FloatField(default=0, max_length=255, null=True)),\n ('ping_us', models.FloatField(default=0, max_length=255, null=True)),\n ('ping_as', models.FloatField(default=0, max_length=255, null=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('is_notice', models.BooleanField(default=False)),\n ('is_check', models.BooleanField(default=False)),\n ('notice_load', models.IntegerField(default=80)),\n ('notice_ram', models.IntegerField(default=80)),\n ('notice_disk', models.IntegerField(default=80)),\n ('have_notice', models.BooleanField(default=False)),\n ('last_notice_at', models.DateTimeField(null=True)),\n ],\n options={\n 'db_table': 'hosts',\n 'ordering': ['-id'],\n 'default_permissions': (),\n },\n ),\n ]\n", "id": "11278436", "language": "Python", "matching_score": 4.371465682983398, "max_stars_count": 81, "path": "servers/migrations/0001_initial.py" }, { "content": "from django.db import models\nfrom django.forms.models import model_to_dict\n# Create your models here.\nfrom users.models import Accounts\n\nclass Hosts(models.Model):\n user_id = models.IntegerField(blank=True, null=True)\n code = models.CharField(max_length=180)\n name = models.CharField(max_length=180)\n version = models.CharField(max_length=255, blank=True, null=True)\n uptime = models.FloatField(max_length=255, blank=True, null=True)\n sessions = models.CharField(max_length=255, blank=True, null=True)\n processes = models.CharField(max_length=255, blank=True, null=True)\n processes_array = models.TextField(blank=True, null=True)\n file_handles = models.CharField(max_length=255, blank=True, null=True)\n file_handles_limit = models.CharField(max_length=255, blank=True, null=True)\n os_kernel = models.CharField(max_length=255, blank=True, null=True)\n os_name = models.CharField(max_length=255, blank=True, null=True)\n os_arch = models.CharField(max_length=255, blank=True, null=True)\n cpu_name = models.CharField(max_length=255, blank=True, null=True)\n cpu_cores = models.CharField(max_length=255, blank=True, null=True)\n cpu_freq = models.CharField(max_length=255, blank=True, null=True)\n ram_total = models.FloatField(max_length=255, default=0, null=True)\n ram_usage = models.FloatField(max_length=255, default=0, null=True)\n swap_total = models.FloatField(max_length=255, default=0, null=True)\n swap_usage = models.FloatField(max_length=255, default=0, null=True)\n disk_array = models.TextField(blank=True, null=True)\n disk_total = models.FloatField(max_length=255, default=0, null=True)\n disk_usage = models.FloatField(max_length=255, default=0, null=True)\n connections = models.CharField(max_length=255, blank=True, null=True)\n nic = models.CharField(max_length=255, blank=True, null=True)\n ipv_4 = models.CharField(max_length=255, blank=True, null=True)\n ipv_6 = models.CharField(max_length=255, blank=True, null=True)\n rx = models.FloatField(max_length=255, default=0, null=True)\n tx = models.FloatField(max_length=255, default=0, null=True)\n rx_gap =models.FloatField(max_length=255, default=0, null=True)\n tx_gap = models.FloatField(max_length=255, default=0, null=True)\n loads = models.CharField(max_length=255, default=0, null=True)\n load_cpu = models.FloatField(max_length=255, default=0, blank=True, null=True)\n load_io = models.FloatField(max_length=255, default=0, blank=True, null=True)\n load_system = models.FloatField(max_length=255, default=0, blank=True, null=True)\n ping_eu = models.FloatField(max_length=255, default=0, null=True)\n ping_us = models.FloatField(max_length=255, default=0, null=True)\n ping_as = models.FloatField(max_length=255, default=0, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n is_notice = models.BooleanField(default=False)\n is_check = models.BooleanField(default=False)\n notice_load = models.IntegerField(default=80)\n notice_ram = models.IntegerField(default=80)\n notice_disk = models.IntegerField(default=80)\n\n have_notice = models.BooleanField(default=False)\n last_notice_at = models.DateTimeField(null=True)\n\n @property\n def notice_email(self):\n try:\n user = Accounts.objects.get(id=self.user_id)\n return user.email\n except Exception as e:\n print(e)\n return None\n\n def to_dict(self):\n host = model_to_dict(self)\n host['updated_at'] = self.updated_at.strftime('%Y-%m-%d %H:%M:%S')\n host['notice_email'] = self.notice_email\n return host\n \n \n class Meta:\n # managed = False\n db_table = 'hosts'\n default_permissions = ()\n ordering = ['-id']\n", "id": "1992447", "language": "Python", "matching_score": 3.1537513732910156, "max_stars_count": 81, "path": "servers/models.py" }, { "content": "from datetime import datetime\nimport json\n\nfrom django.db import models\nfrom django.contrib.auth.models import User, Group, AbstractUser, Permission\nfrom django.contrib.sessions.models import Session\nfrom django.conf import settings\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom rest_framework.authtoken.models import Token\nfrom django.forms.models import model_to_dict\n\n\nclass Accounts(AbstractUser):\n phone = models.CharField(max_length=20, null=True, blank=True)\n name = models.CharField(max_length=200, null=True, blank=True)\n address = models.TextField(null=True, blank=True)\n email = models.CharField(max_length=100, unique=True)\n tel = models.CharField(max_length=100, null=True, blank=True)\n qq = models.CharField(max_length=100, null=True, blank=True)\n wechat = models.CharField(max_length=100, null=True, blank=True)\n company = models.CharField(max_length=100, null=True, blank=True)\n address = models.CharField(max_length=255, null=True, blank=True)\n city = models.CharField(max_length=100, null=True, blank=True)\n level = models.IntegerField(default=0)\n\n\n def __str__(self):\n return self.username\n\n \n\n @property\n def get_group(self):\n group_name = self.groups.all()\n groups = [i.name for i in group_name]\n return groups\n\n @property\n def list_permissions(self):\n permissions = list(self.get_all_permissions())\n return permissions\n\n @property\n def get_token(self):\n token = Token.objects.get(user_id=self.id)\n return 'Token ' + token.key\n\n @property\n def to_dict(self):\n return model_to_dict(self, exclude=['password'])\n\n\n def refreshToken(self):\n Token.objects.get(user_id=self.id).delete()\n token = Token.objects.create(user=self)\n return token.key\n\n\n class Meta:\n default_permissions = ()\n\n\n@receiver(post_save, sender=settings.AUTH_USER_MODEL)\ndef create_auth_token(sender, instance=None, created=False, **kwargs):\n if created:\n Token.objects.create(user=instance)\n\n\n\n\nclass VerifyCode(models.Model):\n \"\"\"\n 短信验证码\n \"\"\"\n code = models.CharField(max_length=10, verbose_name=\"验证码\")\n mobile = models.CharField(max_length=11, verbose_name=\"电话\")\n add_time = models.DateTimeField(default=datetime.now, verbose_name=\"添加时间\")\n status = models.BooleanField(default=True)\n\n class Meta:\n default_permissions = ()\n\n def __str__(self):\n return self.code\n\n\n\n\n\n\n\n\n\n", "id": "12310416", "language": "Python", "matching_score": 2.104444742202759, "max_stars_count": 81, "path": "users/models.py" }, { "content": "\"\"\"\nDjango settings for nqmonitor project.\n\nGenerated by 'django-admin startproject' using Django 3.1.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nfrom pathlib import Path\nimport os\nimport sys\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\nSTATIC_DIR = os.path.join(BASE_DIR, 'static')\nMEDIA_DIR = os.path.join(BASE_DIR, 'media')\nTEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')\n\nsys.path.insert(0, os.path.join(BASE_DIR, 'appcation'))\nsys.path.insert(0, os.path.join(BASE_DIR, 'libs'))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '&_)c%u^3)y6*4)fh%%@*$01aiv7#%8bhhkw-7x#0-c=whd4hpr'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = ['*', ]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django_filters',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'rest_framework_filters',\n 'corsheaders',\n 'django_celery_results',\n 'users',\n 'servers',\n]\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n\n 'middleware.csrf.DisableCSRFMiddleware',\n\n # 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'nqmonitor.urls'\n\nCSRF_COOKIE_SECURE = True\nCORS_ORIGIN_ALLOW_ALL = True\n\nAUTH_USER_MODEL = 'users.Accounts'\n\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n # 'rest_framework.authentication.BasicAuthentication',\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework.authentication.TokenAuthentication',\n # 'users.auth.QueryStringBasedTokenAuthentication'\n ),\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': 100,\n 'DEFAULT_FILTER_BACKENDS': ['rest_framework_filters.backends.RestFrameworkFilterBackend'],\n # 'DEFAULT_THROTTLE_CLASSES': [\n # 'rest_framework.throttling.AnonRateThrottle',\n # 'rest_framework.throttling.UserRateThrottle'\n # ],\n # 'DEFAULT_THROTTLE_RATES': {\n # 'anon': '100/day',\n # 'user': '1000/day'\n # }\n\n}\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATES_DIR, ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'nqmonitor.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.environ.get('MYSQL_DB', 'nqmonitor'),\n 'HOST': os.environ.get('MYSQL_HOST', 'mysql'),\n 'USER': os.environ.get('MYSQL_USER', 'root'),\n 'PASSWORD': os.environ.get('MYSQL_PASSWORD', '<PASSWORD>'),\n 'PORT': os.environ.get('MYSQL_PORT', 3306),\n 'OPTIONS': {\n \"init_command\": \"SET foreign_key_checks = 0;\",\n # \"charset\": \"utf8mb4\",\n },\n },\n\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = False\n\nUSE_TZ = False\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(os.path.join(BASE_DIR, 'static')),\n)\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\n\nLOGIN_URL = '/login'\n\n\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_USE_TLS = False # 是否使用TLS安全传输协议(用于在两个通信应用程序之间提供保密性和数据完整性。)\nEMAIL_USE_SSL = True # 是否使用SSL加密,qq企业邮箱要求使用\nEMAIL_HOST = 'smtp.qq.com' # 如果是 163 改成 smtp.163.com\nEMAIL_PORT = 465\nEMAIL_HOST_USER = '<EMAIL>' # 帐号\nEMAIL_HOST_PASSWORD = 'xx' # 密码\nDEFAULT_FROM_EMAIL = 'MonitorX <<EMAIL>>'\n\nMONGO_HOST = os.environ.get('MONGO_HOST', 'mongo')\nMONGO_USER = os.environ.get('MONGO_USER', 'root')\nMONGO_PASSWORD = os.environ.get('MONGO_PASSWORD', '<PASSWORD>')\nMONGO_PORT = os.environ.get('MONGO_PORT', 27017)\nMONGO_DB = os.environ.get('MONGO_DB', 'nqmonitor')\n\n\nMONGO_BROKER_URL = 'mongodb://{user}:{password}@{host}:{port}/{db}'.format(\n host=MONGO_HOST, user=MONGO_USER, password=MONGO_PASSWORD, port=MONGO_PORT, db=MONGO_DB)\n\nCELERY_TASK_TRACK_STARTED = True\nCELERY_TASK_TIME_LIMIT = 30 * 60\nCELERY_BROKER_URL = MONGO_BROKER_URL\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_BACKEND = 'django-db'\nCELERY_TIMEZONE = 'Asia/Shanghai'\n", "id": "8436861", "language": "Python", "matching_score": 2.3031630516052246, "max_stars_count": 81, "path": "nqmonitor/settings.py" }, { "content": "\n\nimport pymongo\nfrom django.conf import settings\n\ndef mgdb():\n try:\n client = pymongo.MongoClient(settings.MONGO_BROKER_URL)\n return client\n except Exception as e:\n client.close()\n raise e\n finally:\n client.close()\n\n\n ", "id": "7582046", "language": "Python", "matching_score": 0.09091578423976898, "max_stars_count": 81, "path": "libs/mg/query.py" }, { "content": "# Copyright (c) 2020 <NAME> & AICAMP.CO.,LTD\n#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n\nfrom mlpm.server import aidserver\n\napp = aidserver\n\n\ndef test_index_returns_200():\n request, response = app.test_client.get('/')\n assert response.status == 200\n\n\ndef test_index_put_not_allowed():\n request, response = app.test_client.put('/')\n assert response.status == 405\n\n\nif __name__ == '__main__':\n test_index_returns_200()\n test_index_put_not_allowed()\n", "id": "443781", "language": "Python", "matching_score": 2.398456335067749, "max_stars_count": 207, "path": "components/mlserve/tests/server.py" }, { "content": "# Copyright (c) 2020 <NAME> & AICAMP.CO.,LTD\n#\n# This software is released under the MIT License.\n# https://opensource.org/licenses/MIT\n#coding:utf-8\n\nfrom quart import Quart\n\naidserver = Quart(__name__)\n# Explicitly declare solver in the aidserver\naidserver.solver = None\n", "id": "11963428", "language": "Python", "matching_score": 0.03211892768740654, "max_stars_count": 207, "path": "components/mlserve/mlpm/app.py" }, { "content": "from django.db.models.constants import LOOKUP_SEP\nfrom django.db.models.expressions import Expression\nfrom django.db.models.lookups import Transform\n\n\ndef lookups_for_field(model_field):\n \"\"\"\n Generates a list of all possible lookup expressions for a model field.\n \"\"\"\n lookups = []\n\n for expr, lookup in model_field.get_lookups().items():\n if issubclass(lookup, Transform):\n transform = lookup(Expression(model_field))\n lookups += [\n LOOKUP_SEP.join([expr, sub_expr]) for sub_expr\n in lookups_for_transform(transform)\n ]\n\n else:\n lookups.append(expr)\n\n return lookups\n\n\ndef lookups_for_transform(transform):\n \"\"\"\n Generates a list of subsequent lookup expressions for a transform.\n\n Note:\n Infinite transform recursion is only prevented when the subsequent and\n passed in transforms are the same class. For example, the ``Unaccent``\n transform from ``django.contrib.postgres``.\n There is no cycle detection across multiple transforms. For example,\n ``a__b__a__b`` would continue to recurse. However, this is not currently\n a problem (no builtin transforms exhibit this behavior).\n\n \"\"\"\n lookups = []\n\n for expr, lookup in transform.output_field.get_lookups().items():\n if issubclass(lookup, Transform):\n\n # type match indicates recursion.\n if type(transform) == lookup:\n continue\n\n sub_transform = lookup(transform)\n lookups += [\n LOOKUP_SEP.join([expr, sub_expr]) for sub_expr\n in lookups_for_transform(sub_transform)\n ]\n\n else:\n lookups.append(expr)\n\n return lookups\n\n\ndef lookahead(iterable):\n it = iter(iterable)\n try:\n current = next(it)\n except StopIteration:\n return\n\n for value in it:\n yield current, True\n current = value\n yield current, False\n", "id": "12474290", "language": "Python", "matching_score": 0.6149647235870361, "max_stars_count": 81, "path": "libs/rest_framework_filters/utils.py" }, { "content": "\nimport django\n\n\ndef set_many(instance, field, value):\n if django.VERSION < (1, 10):\n setattr(instance, field, value)\n else:\n field = getattr(instance, field)\n field.set(value)\n", "id": "9353329", "language": "Python", "matching_score": 0.1824643909931183, "max_stars_count": 81, "path": "libs/rest_framework_filters/compat.py" }, { "content": "\nfrom rest_framework import status, generics, viewsets\nimport django_filters.rest_framework\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom django_filters import Filter, DateRangeFilter, DateFilter, FilterSet\nimport rest_framework_filters as filters\nfrom .serializers import AccountsSerializer, GroupSerializer, PermissionSerializer\nfrom django.contrib.auth.models import Group, Permission\n\nfrom .models import Accounts\n\n\nclass AccountFilter(filters.FilterSet):\n\n username = filters.CharFilter(field_name='username', lookup_expr=\"icontains\")\n level = filters.CharFilter(field_name='level', lookup_expr=\"icontains\")\n # q = filters.CharFilter(field_name='cus_coding', lookup_expr=\"icontains\" )\n\n class Meta:\n model = Accounts\n fields = ['is_active']\n\nclass AccountsViewSet(viewsets.ModelViewSet):\n\n permission_classes = [IsAdminUser]\n\n queryset = Accounts.objects.all()\n serializer_class = AccountsSerializer\n # filter_backends = [django_filters.rest_framework.DjangoFilterBackend]\n # filter_fields = ('username', 'level','is_active')\n filter_class = AccountFilter\n\nclass GroupViewSet(viewsets.ModelViewSet):\n permission_classes = [IsAdminUser]\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n # filter_backends = [django_filters.rest_framework.DjangoFilterBackend]\n filter_fields = ('name',)\n\nclass PermissionViewSet(viewsets.ModelViewSet):\n permission_classes = [IsAdminUser]\n queryset = Permission.objects.all()\n serializer_class = PermissionSerializer\n # filter_backends = [django_filters.rest_framework.DjangoFilterBackend]\n filter_fields = ('name',)", "id": "8257079", "language": "Python", "matching_score": 4.055592060089111, "max_stars_count": 81, "path": "users/viewsets.py" }, { "content": "from django.http import Http404\nfrom django.db.models import Q\n\nfrom rest_framework.response import Response\nfrom rest_framework import status, generics, viewsets\nfrom django_filters.rest_framework import DjangoFilterBackend \n# from rest_framework_filters.backends import DjangoFilterBackend as rsf_DjangoFilterBackend\n# from django_filters import DateRangeFilter,DateFilter,FilterSet\nfrom rest_framework.filters import SearchFilter, OrderingFilter \nfrom django.db.models import Q\n\nimport rest_framework_filters as filters\n\n\nfrom .models import Hosts\nfrom .serializers import HostsSerializer\n\n\nclass HostsViewSet(viewsets.ModelViewSet):\n\tqueryset = Hosts.objects.all()\n\tserializer_class = HostsSerializer\n\tfilter_backends = ( DjangoFilterBackend, SearchFilter, OrderingFilter)\n\tfilter_fields = ['user_id', 'code', 'name']\n\n\tdef get_queryset(self):\n\t\tif self.request.user.is_superuser:\n\t\t\tqueryset = Hosts.objects.all()\n\t\telse:\n\t\t\tqueryset = self.queryset.filter(user_id=self.request.user.id)\n\t\treturn queryset\n\n\tdef create(self, request, *args, **kwargs):\n\t\torder = request.data.copy()\n\t\t# ex_hosts = self.objects.filter(user_id=request.user.id).count()\n\t\t# if ex_hosts >= 100 and not request.user.is_superuser:\n\t\t# \treturn Response({'msg':'最多只能创建100台服务器'}, status=status.HTTP_403_FORBIDDEN, headers=headers)\n\t\t#This QueryDict instance is immutable(意思是无法被更改),所以COPY一份,不管通过接口提交人是谁,全部设置为提交TOKEN的用户\n\t\torder.update({'user_id': request.user.id })\n\t\tserializer = self.get_serializer(data=order)\n\t\tserializer.is_valid(raise_exception=True)\n\t\tself.perform_create(serializer)\n\t\theaders = self.get_success_headers(serializer.data)\n\t\treturn Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n\t\n", "id": "12836177", "language": "Python", "matching_score": 2.2729082107543945, "max_stars_count": 81, "path": "servers/viewset.py" }, { "content": "from .filters import * # noqa\nfrom .filterset import FilterSet # noqa\n", "id": "6479408", "language": "Python", "matching_score": 0.5849322080612183, "max_stars_count": 743, "path": "libs/rest_framework_filters/__init__.py" }, { "content": "from django import template\n\nregister = template.Library()\n\n\[email protected]\ndef label(filterset, relationship):\n f = filterset\n f = f.filters[relationship]\n return f.label\n", "id": "1399581", "language": "Python", "matching_score": 0.005857182200998068, "max_stars_count": 743, "path": "libs/rest_framework_filters/templatetags/rest_framework_filters.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: Qot_GetOrderBook.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport Common_pb2 as Common__pb2\nimport Qot_Common_pb2 as Qot__Common__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='Qot_GetOrderBook.proto',\n package='Qot_GetOrderBook',\n syntax='proto2',\n serialized_pb=_b('\\n\\x16Qot_GetOrderBook.proto\\x12\\x10Qot_GetOrderBook\\x1a\\x0c\\x43ommon.proto\\x1a\\x10Qot_Common.proto\\\":\\n\\x03\\x43\\x32S\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12\\x0b\\n\\x03num\\x18\\x02 \\x02(\\x05\\\"\\x8f\\x01\\n\\x03S2C\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12/\\n\\x10orderBookAskList\\x18\\x02 \\x03(\\x0b\\x32\\x15.Qot_Common.OrderBook\\x12/\\n\\x10orderBookBidList\\x18\\x03 \\x03(\\x0b\\x32\\x15.Qot_Common.OrderBook\\\"-\\n\\x07Request\\x12\\\"\\n\\x03\\x63\\x32s\\x18\\x01 \\x02(\\x0b\\x32\\x15.Qot_GetOrderBook.C2S\\\"f\\n\\x08Response\\x12\\x15\\n\\x07retType\\x18\\x01 \\x02(\\x05:\\x04-400\\x12\\x0e\\n\\x06retMsg\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07\\x65rrCode\\x18\\x03 \\x01(\\x05\\x12\\\"\\n\\x03s2c\\x18\\x04 \\x01(\\x0b\\x32\\x15.Qot_GetOrderBook.S2C')\n ,\n dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])\n\n\n\n\n_C2S = _descriptor.Descriptor(\n name='C2S',\n full_name='Qot_GetOrderBook.C2S',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetOrderBook.C2S.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='num', full_name='Qot_GetOrderBook.C2S.num', index=1,\n number=2, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=76,\n serialized_end=134,\n)\n\n\n_S2C = _descriptor.Descriptor(\n name='S2C',\n full_name='Qot_GetOrderBook.S2C',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetOrderBook.S2C.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='orderBookAskList', full_name='Qot_GetOrderBook.S2C.orderBookAskList', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='orderBookBidList', full_name='Qot_GetOrderBook.S2C.orderBookBidList', index=2,\n number=3, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=137,\n serialized_end=280,\n)\n\n\n_REQUEST = _descriptor.Descriptor(\n name='Request',\n full_name='Qot_GetOrderBook.Request',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='c2s', full_name='Qot_GetOrderBook.Request.c2s', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=282,\n serialized_end=327,\n)\n\n\n_RESPONSE = _descriptor.Descriptor(\n name='Response',\n full_name='Qot_GetOrderBook.Response',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='retType', full_name='Qot_GetOrderBook.Response.retType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=True, default_value=-400,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='retMsg', full_name='Qot_GetOrderBook.Response.retMsg', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='errCode', full_name='Qot_GetOrderBook.Response.errCode', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='s2c', full_name='Qot_GetOrderBook.Response.s2c', index=3,\n number=4, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=329,\n serialized_end=431,\n)\n\n_C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['orderBookAskList'].message_type = Qot__Common__pb2._ORDERBOOK\n_S2C.fields_by_name['orderBookBidList'].message_type = Qot__Common__pb2._ORDERBOOK\n_REQUEST.fields_by_name['c2s'].message_type = _C2S\n_RESPONSE.fields_by_name['s2c'].message_type = _S2C\nDESCRIPTOR.message_types_by_name['C2S'] = _C2S\nDESCRIPTOR.message_types_by_name['S2C'] = _S2C\nDESCRIPTOR.message_types_by_name['Request'] = _REQUEST\nDESCRIPTOR.message_types_by_name['Response'] = _RESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nC2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(\n DESCRIPTOR = _C2S,\n __module__ = 'Qot_GetOrderBook_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetOrderBook.C2S)\n ))\n_sym_db.RegisterMessage(C2S)\n\nS2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(\n DESCRIPTOR = _S2C,\n __module__ = 'Qot_GetOrderBook_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetOrderBook.S2C)\n ))\n_sym_db.RegisterMessage(S2C)\n\nRequest = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(\n DESCRIPTOR = _REQUEST,\n __module__ = 'Qot_GetOrderBook_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetOrderBook.Request)\n ))\n_sym_db.RegisterMessage(Request)\n\nResponse = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(\n DESCRIPTOR = _RESPONSE,\n __module__ = 'Qot_GetOrderBook_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetOrderBook.Response)\n ))\n_sym_db.RegisterMessage(Response)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "3765213", "language": "Python", "matching_score": 6.261250972747803, "max_stars_count": 5, "path": "futuquant/common/pb/Qot_GetOrderBook_pb2.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: Qot_GetBroker.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport Common_pb2 as Common__pb2\nimport Qot_Common_pb2 as Qot__Common__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='Qot_GetBroker.proto',\n package='Qot_GetBroker',\n syntax='proto2',\n serialized_pb=_b('\\n\\x13Qot_GetBroker.proto\\x12\\rQot_GetBroker\\x1a\\x0c\\x43ommon.proto\\x1a\\x10Qot_Common.proto\\\"-\\n\\x03\\x43\\x32S\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\\"\\x83\\x01\\n\\x03S2C\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12)\\n\\rbrokerAskList\\x18\\x02 \\x03(\\x0b\\x32\\x12.Qot_Common.Broker\\x12)\\n\\rbrokerBidList\\x18\\x03 \\x03(\\x0b\\x32\\x12.Qot_Common.Broker\\\"*\\n\\x07Request\\x12\\x1f\\n\\x03\\x63\\x32s\\x18\\x01 \\x02(\\x0b\\x32\\x12.Qot_GetBroker.C2S\\\"c\\n\\x08Response\\x12\\x15\\n\\x07retType\\x18\\x01 \\x02(\\x05:\\x04-400\\x12\\x0e\\n\\x06retMsg\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07\\x65rrCode\\x18\\x03 \\x01(\\x05\\x12\\x1f\\n\\x03s2c\\x18\\x04 \\x01(\\x0b\\x32\\x12.Qot_GetBroker.S2C')\n ,\n dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])\n\n\n\n\n_C2S = _descriptor.Descriptor(\n name='C2S',\n full_name='Qot_GetBroker.C2S',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetBroker.C2S.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=70,\n serialized_end=115,\n)\n\n\n_S2C = _descriptor.Descriptor(\n name='S2C',\n full_name='Qot_GetBroker.S2C',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetBroker.S2C.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='brokerAskList', full_name='Qot_GetBroker.S2C.brokerAskList', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='brokerBidList', full_name='Qot_GetBroker.S2C.brokerBidList', index=2,\n number=3, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=118,\n serialized_end=249,\n)\n\n\n_REQUEST = _descriptor.Descriptor(\n name='Request',\n full_name='Qot_GetBroker.Request',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='c2s', full_name='Qot_GetBroker.Request.c2s', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=251,\n serialized_end=293,\n)\n\n\n_RESPONSE = _descriptor.Descriptor(\n name='Response',\n full_name='Qot_GetBroker.Response',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='retType', full_name='Qot_GetBroker.Response.retType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=True, default_value=-400,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='retMsg', full_name='Qot_GetBroker.Response.retMsg', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='errCode', full_name='Qot_GetBroker.Response.errCode', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='s2c', full_name='Qot_GetBroker.Response.s2c', index=3,\n number=4, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=295,\n serialized_end=394,\n)\n\n_C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['brokerAskList'].message_type = Qot__Common__pb2._BROKER\n_S2C.fields_by_name['brokerBidList'].message_type = Qot__Common__pb2._BROKER\n_REQUEST.fields_by_name['c2s'].message_type = _C2S\n_RESPONSE.fields_by_name['s2c'].message_type = _S2C\nDESCRIPTOR.message_types_by_name['C2S'] = _C2S\nDESCRIPTOR.message_types_by_name['S2C'] = _S2C\nDESCRIPTOR.message_types_by_name['Request'] = _REQUEST\nDESCRIPTOR.message_types_by_name['Response'] = _RESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nC2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(\n DESCRIPTOR = _C2S,\n __module__ = 'Qot_GetBroker_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetBroker.C2S)\n ))\n_sym_db.RegisterMessage(C2S)\n\nS2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(\n DESCRIPTOR = _S2C,\n __module__ = 'Qot_GetBroker_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetBroker.S2C)\n ))\n_sym_db.RegisterMessage(S2C)\n\nRequest = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(\n DESCRIPTOR = _REQUEST,\n __module__ = 'Qot_GetBroker_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetBroker.Request)\n ))\n_sym_db.RegisterMessage(Request)\n\nResponse = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(\n DESCRIPTOR = _RESPONSE,\n __module__ = 'Qot_GetBroker_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetBroker.Response)\n ))\n_sym_db.RegisterMessage(Response)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "9988851", "language": "Python", "matching_score": 5.068221092224121, "max_stars_count": 5, "path": "futuquant/common/pb/Qot_GetBroker_pb2.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: Qot_GetHoldingChangeList.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport Common_pb2 as Common__pb2\nimport Qot_Common_pb2 as Qot__Common__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='Qot_GetHoldingChangeList.proto',\n package='Qot_GetHoldingChangeList',\n syntax='proto2',\n serialized_pb=_b('\\n\\x1eQot_GetHoldingChangeList.proto\\x12\\x18Qot_GetHoldingChangeList\\x1a\\x0c\\x43ommon.proto\\x1a\\x10Qot_Common.proto\\\"i\\n\\x03\\x43\\x32S\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12\\x16\\n\\x0eholderCategory\\x18\\x02 \\x02(\\x05\\x12\\x11\\n\\tbeginTime\\x18\\x03 \\x01(\\t\\x12\\x0f\\n\\x07\\x65ndTime\\x18\\x04 \\x01(\\t\\\"h\\n\\x03S2C\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12\\x39\\n\\x11holdingChangeList\\x18\\x02 \\x03(\\x0b\\x32\\x1e.Qot_Common.ShareHoldingChange\\\"5\\n\\x07Request\\x12*\\n\\x03\\x63\\x32s\\x18\\x01 \\x02(\\x0b\\x32\\x1d.Qot_GetHoldingChangeList.C2S\\\"n\\n\\x08Response\\x12\\x15\\n\\x07retType\\x18\\x01 \\x02(\\x05:\\x04-400\\x12\\x0e\\n\\x06retMsg\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07\\x65rrCode\\x18\\x03 \\x01(\\x05\\x12*\\n\\x03s2c\\x18\\x04 \\x01(\\x0b\\x32\\x1d.Qot_GetHoldingChangeList.S2C')\n ,\n dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])\n\n\n\n\n_C2S = _descriptor.Descriptor(\n name='C2S',\n full_name='Qot_GetHoldingChangeList.C2S',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetHoldingChangeList.C2S.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='holderCategory', full_name='Qot_GetHoldingChangeList.C2S.holderCategory', index=1,\n number=2, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='beginTime', full_name='Qot_GetHoldingChangeList.C2S.beginTime', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='endTime', full_name='Qot_GetHoldingChangeList.C2S.endTime', index=3,\n number=4, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=92,\n serialized_end=197,\n)\n\n\n_S2C = _descriptor.Descriptor(\n name='S2C',\n full_name='Qot_GetHoldingChangeList.S2C',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetHoldingChangeList.S2C.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='holdingChangeList', full_name='Qot_GetHoldingChangeList.S2C.holdingChangeList', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=199,\n serialized_end=303,\n)\n\n\n_REQUEST = _descriptor.Descriptor(\n name='Request',\n full_name='Qot_GetHoldingChangeList.Request',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='c2s', full_name='Qot_GetHoldingChangeList.Request.c2s', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=305,\n serialized_end=358,\n)\n\n\n_RESPONSE = _descriptor.Descriptor(\n name='Response',\n full_name='Qot_GetHoldingChangeList.Response',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='retType', full_name='Qot_GetHoldingChangeList.Response.retType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=True, default_value=-400,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='retMsg', full_name='Qot_GetHoldingChangeList.Response.retMsg', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='errCode', full_name='Qot_GetHoldingChangeList.Response.errCode', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='s2c', full_name='Qot_GetHoldingChangeList.Response.s2c', index=3,\n number=4, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=360,\n serialized_end=470,\n)\n\n_C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['holdingChangeList'].message_type = Qot__Common__pb2._SHAREHOLDINGCHANGE\n_REQUEST.fields_by_name['c2s'].message_type = _C2S\n_RESPONSE.fields_by_name['s2c'].message_type = _S2C\nDESCRIPTOR.message_types_by_name['C2S'] = _C2S\nDESCRIPTOR.message_types_by_name['S2C'] = _S2C\nDESCRIPTOR.message_types_by_name['Request'] = _REQUEST\nDESCRIPTOR.message_types_by_name['Response'] = _RESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nC2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(\n DESCRIPTOR = _C2S,\n __module__ = 'Qot_GetHoldingChangeList_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHoldingChangeList.C2S)\n ))\n_sym_db.RegisterMessage(C2S)\n\nS2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(\n DESCRIPTOR = _S2C,\n __module__ = 'Qot_GetHoldingChangeList_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHoldingChangeList.S2C)\n ))\n_sym_db.RegisterMessage(S2C)\n\nRequest = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(\n DESCRIPTOR = _REQUEST,\n __module__ = 'Qot_GetHoldingChangeList_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHoldingChangeList.Request)\n ))\n_sym_db.RegisterMessage(Request)\n\nResponse = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(\n DESCRIPTOR = _RESPONSE,\n __module__ = 'Qot_GetHoldingChangeList_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHoldingChangeList.Response)\n ))\n_sym_db.RegisterMessage(Response)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "4481036", "language": "Python", "matching_score": 5.80934476852417, "max_stars_count": 5, "path": "futuquant/common/pb/Qot_GetHoldingChangeList_pb2.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: Qot_GetHistoryKL.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport Common_pb2 as Common__pb2\nimport Qot_Common_pb2 as Qot__Common__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='Qot_GetHistoryKL.proto',\n package='Qot_GetHistoryKL',\n syntax='proto2',\n serialized_pb=_b('\\n\\x16Qot_GetHistoryKL.proto\\x12\\x10Qot_GetHistoryKL\\x1a\\x0c\\x43ommon.proto\\x1a\\x10Qot_Common.proto\\\"\\xa3\\x01\\n\\x03\\x43\\x32S\\x12\\x11\\n\\trehabType\\x18\\x01 \\x02(\\x05\\x12\\x0e\\n\\x06klType\\x18\\x02 \\x02(\\x05\\x12&\\n\\x08security\\x18\\x03 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12\\x11\\n\\tbeginTime\\x18\\x04 \\x02(\\t\\x12\\x0f\\n\\x07\\x65ndTime\\x18\\x05 \\x02(\\t\\x12\\x13\\n\\x0bmaxAckKLNum\\x18\\x06 \\x01(\\x05\\x12\\x18\\n\\x10needKLFieldsFlag\\x18\\x07 \\x01(\\x03\\\"d\\n\\x03S2C\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12!\\n\\x06klList\\x18\\x02 \\x03(\\x0b\\x32\\x11.Qot_Common.KLine\\x12\\x12\\n\\nnextKLTime\\x18\\x03 \\x01(\\t\\\"-\\n\\x07Request\\x12\\\"\\n\\x03\\x63\\x32s\\x18\\x01 \\x02(\\x0b\\x32\\x15.Qot_GetHistoryKL.C2S\\\"f\\n\\x08Response\\x12\\x15\\n\\x07retType\\x18\\x01 \\x02(\\x05:\\x04-400\\x12\\x0e\\n\\x06retMsg\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07\\x65rrCode\\x18\\x03 \\x01(\\x05\\x12\\\"\\n\\x03s2c\\x18\\x04 \\x01(\\x0b\\x32\\x15.Qot_GetHistoryKL.S2C')\n ,\n dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])\n\n\n\n\n_C2S = _descriptor.Descriptor(\n name='C2S',\n full_name='Qot_GetHistoryKL.C2S',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='rehabType', full_name='Qot_GetHistoryKL.C2S.rehabType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='klType', full_name='Qot_GetHistoryKL.C2S.klType', index=1,\n number=2, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetHistoryKL.C2S.security', index=2,\n number=3, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='beginTime', full_name='Qot_GetHistoryKL.C2S.beginTime', index=3,\n number=4, type=9, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='endTime', full_name='Qot_GetHistoryKL.C2S.endTime', index=4,\n number=5, type=9, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='maxAckKLNum', full_name='Qot_GetHistoryKL.C2S.maxAckKLNum', index=5,\n number=6, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='needKLFieldsFlag', full_name='Qot_GetHistoryKL.C2S.needKLFieldsFlag', index=6,\n number=7, type=3, cpp_type=2, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=77,\n serialized_end=240,\n)\n\n\n_S2C = _descriptor.Descriptor(\n name='S2C',\n full_name='Qot_GetHistoryKL.S2C',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetHistoryKL.S2C.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='klList', full_name='Qot_GetHistoryKL.S2C.klList', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='nextKLTime', full_name='Qot_GetHistoryKL.S2C.nextKLTime', index=2,\n number=3, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=242,\n serialized_end=342,\n)\n\n\n_REQUEST = _descriptor.Descriptor(\n name='Request',\n full_name='Qot_GetHistoryKL.Request',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='c2s', full_name='Qot_GetHistoryKL.Request.c2s', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=344,\n serialized_end=389,\n)\n\n\n_RESPONSE = _descriptor.Descriptor(\n name='Response',\n full_name='Qot_GetHistoryKL.Response',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='retType', full_name='Qot_GetHistoryKL.Response.retType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=True, default_value=-400,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='retMsg', full_name='Qot_GetHistoryKL.Response.retMsg', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='errCode', full_name='Qot_GetHistoryKL.Response.errCode', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='s2c', full_name='Qot_GetHistoryKL.Response.s2c', index=3,\n number=4, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=391,\n serialized_end=493,\n)\n\n_C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['klList'].message_type = Qot__Common__pb2._KLINE\n_REQUEST.fields_by_name['c2s'].message_type = _C2S\n_RESPONSE.fields_by_name['s2c'].message_type = _S2C\nDESCRIPTOR.message_types_by_name['C2S'] = _C2S\nDESCRIPTOR.message_types_by_name['S2C'] = _S2C\nDESCRIPTOR.message_types_by_name['Request'] = _REQUEST\nDESCRIPTOR.message_types_by_name['Response'] = _RESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nC2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(\n DESCRIPTOR = _C2S,\n __module__ = 'Qot_GetHistoryKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHistoryKL.C2S)\n ))\n_sym_db.RegisterMessage(C2S)\n\nS2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(\n DESCRIPTOR = _S2C,\n __module__ = 'Qot_GetHistoryKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHistoryKL.S2C)\n ))\n_sym_db.RegisterMessage(S2C)\n\nRequest = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(\n DESCRIPTOR = _REQUEST,\n __module__ = 'Qot_GetHistoryKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHistoryKL.Request)\n ))\n_sym_db.RegisterMessage(Request)\n\nResponse = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(\n DESCRIPTOR = _RESPONSE,\n __module__ = 'Qot_GetHistoryKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetHistoryKL.Response)\n ))\n_sym_db.RegisterMessage(Response)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "2016584", "language": "Python", "matching_score": 6.828049182891846, "max_stars_count": 5, "path": "futuquant/common/pb/Qot_GetHistoryKL_pb2.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: Qot_GetKL.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport Common_pb2 as Common__pb2\nimport Qot_Common_pb2 as Qot__Common__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='Qot_GetKL.proto',\n package='Qot_GetKL',\n syntax='proto2',\n serialized_pb=_b('\\n\\x0fQot_GetKL.proto\\x12\\tQot_GetKL\\x1a\\x0c\\x43ommon.proto\\x1a\\x10Qot_Common.proto\\\"`\\n\\x03\\x43\\x32S\\x12\\x11\\n\\trehabType\\x18\\x01 \\x02(\\x05\\x12\\x0e\\n\\x06klType\\x18\\x02 \\x02(\\x05\\x12&\\n\\x08security\\x18\\x03 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12\\x0e\\n\\x06reqNum\\x18\\x04 \\x02(\\x05\\\"P\\n\\x03S2C\\x12&\\n\\x08security\\x18\\x01 \\x02(\\x0b\\x32\\x14.Qot_Common.Security\\x12!\\n\\x06klList\\x18\\x02 \\x03(\\x0b\\x32\\x11.Qot_Common.KLine\\\"&\\n\\x07Request\\x12\\x1b\\n\\x03\\x63\\x32s\\x18\\x01 \\x02(\\x0b\\x32\\x0e.Qot_GetKL.C2S\\\"_\\n\\x08Response\\x12\\x15\\n\\x07retType\\x18\\x01 \\x02(\\x05:\\x04-400\\x12\\x0e\\n\\x06retMsg\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07\\x65rrCode\\x18\\x03 \\x01(\\x05\\x12\\x1b\\n\\x03s2c\\x18\\x04 \\x01(\\x0b\\x32\\x0e.Qot_GetKL.S2C')\n ,\n dependencies=[Common__pb2.DESCRIPTOR,Qot__Common__pb2.DESCRIPTOR,])\n\n\n\n\n_C2S = _descriptor.Descriptor(\n name='C2S',\n full_name='Qot_GetKL.C2S',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='rehabType', full_name='Qot_GetKL.C2S.rehabType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='klType', full_name='Qot_GetKL.C2S.klType', index=1,\n number=2, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetKL.C2S.security', index=2,\n number=3, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='reqNum', full_name='Qot_GetKL.C2S.reqNum', index=3,\n number=4, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=62,\n serialized_end=158,\n)\n\n\n_S2C = _descriptor.Descriptor(\n name='S2C',\n full_name='Qot_GetKL.S2C',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='security', full_name='Qot_GetKL.S2C.security', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='klList', full_name='Qot_GetKL.S2C.klList', index=1,\n number=2, type=11, cpp_type=10, label=3,\n has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=160,\n serialized_end=240,\n)\n\n\n_REQUEST = _descriptor.Descriptor(\n name='Request',\n full_name='Qot_GetKL.Request',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='c2s', full_name='Qot_GetKL.Request.c2s', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=242,\n serialized_end=280,\n)\n\n\n_RESPONSE = _descriptor.Descriptor(\n name='Response',\n full_name='Qot_GetKL.Response',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='retType', full_name='Qot_GetKL.Response.retType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=True, default_value=-400,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='retMsg', full_name='Qot_GetKL.Response.retMsg', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='errCode', full_name='Qot_GetKL.Response.errCode', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='s2c', full_name='Qot_GetKL.Response.s2c', index=3,\n number=4, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=282,\n serialized_end=377,\n)\n\n_C2S.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['security'].message_type = Qot__Common__pb2._SECURITY\n_S2C.fields_by_name['klList'].message_type = Qot__Common__pb2._KLINE\n_REQUEST.fields_by_name['c2s'].message_type = _C2S\n_RESPONSE.fields_by_name['s2c'].message_type = _S2C\nDESCRIPTOR.message_types_by_name['C2S'] = _C2S\nDESCRIPTOR.message_types_by_name['S2C'] = _S2C\nDESCRIPTOR.message_types_by_name['Request'] = _REQUEST\nDESCRIPTOR.message_types_by_name['Response'] = _RESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nC2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(\n DESCRIPTOR = _C2S,\n __module__ = 'Qot_GetKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetKL.C2S)\n ))\n_sym_db.RegisterMessage(C2S)\n\nS2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(\n DESCRIPTOR = _S2C,\n __module__ = 'Qot_GetKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetKL.S2C)\n ))\n_sym_db.RegisterMessage(S2C)\n\nRequest = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(\n DESCRIPTOR = _REQUEST,\n __module__ = 'Qot_GetKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetKL.Request)\n ))\n_sym_db.RegisterMessage(Request)\n\nResponse = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(\n DESCRIPTOR = _RESPONSE,\n __module__ = 'Qot_GetKL_pb2'\n # @@protoc_insertion_point(class_scope:Qot_GetKL.Response)\n ))\n_sym_db.RegisterMessage(Response)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "2646048", "language": "Python", "matching_score": 4.507417678833008, "max_stars_count": 5, "path": "futuquant/common/pb/Qot_GetKL_pb2.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: Trd_PlaceOrder.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport Common_pb2 as Common__pb2\nimport Trd_Common_pb2 as Trd__Common__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='Trd_PlaceOrder.proto',\n package='Trd_PlaceOrder',\n syntax='proto2',\n serialized_pb=_b('\\n\\x14Trd_PlaceOrder.proto\\x12\\x0eTrd_PlaceOrder\\x1a\\x0c\\x43ommon.proto\\x1a\\x10Trd_Common.proto\\\"\\xe2\\x01\\n\\x03\\x43\\x32S\\x12\\\"\\n\\x08packetID\\x18\\x01 \\x02(\\x0b\\x32\\x10.Common.PacketID\\x12%\\n\\x06header\\x18\\x02 \\x02(\\x0b\\x32\\x15.Trd_Common.TrdHeader\\x12\\x0f\\n\\x07trdSide\\x18\\x03 \\x02(\\x05\\x12\\x11\\n\\torderType\\x18\\x04 \\x02(\\x05\\x12\\x0c\\n\\x04\\x63ode\\x18\\x05 \\x02(\\t\\x12\\x0b\\n\\x03qty\\x18\\x06 \\x02(\\x01\\x12\\r\\n\\x05price\\x18\\x07 \\x01(\\x01\\x12\\x13\\n\\x0b\\x61\\x64justPrice\\x18\\x08 \\x01(\\x08\\x12\\x1a\\n\\x12\\x61\\x64justSideAndLimit\\x18\\t \\x01(\\x01\\x12\\x11\\n\\tsecMarket\\x18\\n \\x01(\\x05\\\"=\\n\\x03S2C\\x12%\\n\\x06header\\x18\\x01 \\x02(\\x0b\\x32\\x15.Trd_Common.TrdHeader\\x12\\x0f\\n\\x07orderID\\x18\\x02 \\x01(\\x04\\\"+\\n\\x07Request\\x12 \\n\\x03\\x63\\x32s\\x18\\x01 \\x02(\\x0b\\x32\\x13.Trd_PlaceOrder.C2S\\\"d\\n\\x08Response\\x12\\x15\\n\\x07retType\\x18\\x01 \\x02(\\x05:\\x04-400\\x12\\x0e\\n\\x06retMsg\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07\\x65rrCode\\x18\\x03 \\x01(\\x05\\x12 \\n\\x03s2c\\x18\\x04 \\x01(\\x0b\\x32\\x13.Trd_PlaceOrder.S2C')\n ,\n dependencies=[Common__pb2.DESCRIPTOR,Trd__Common__pb2.DESCRIPTOR,])\n\n\n\n\n_C2S = _descriptor.Descriptor(\n name='C2S',\n full_name='Trd_PlaceOrder.C2S',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='packetID', full_name='Trd_PlaceOrder.C2S.packetID', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='header', full_name='Trd_PlaceOrder.C2S.header', index=1,\n number=2, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='trdSide', full_name='Trd_PlaceOrder.C2S.trdSide', index=2,\n number=3, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='orderType', full_name='Trd_PlaceOrder.C2S.orderType', index=3,\n number=4, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='code', full_name='Trd_PlaceOrder.C2S.code', index=4,\n number=5, type=9, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='qty', full_name='Trd_PlaceOrder.C2S.qty', index=5,\n number=6, type=1, cpp_type=5, label=2,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='price', full_name='Trd_PlaceOrder.C2S.price', index=6,\n number=7, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='adjustPrice', full_name='Trd_PlaceOrder.C2S.adjustPrice', index=7,\n number=8, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='adjustSideAndLimit', full_name='Trd_PlaceOrder.C2S.adjustSideAndLimit', index=8,\n number=9, type=1, cpp_type=5, label=1,\n has_default_value=False, default_value=float(0),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='secMarket', full_name='Trd_PlaceOrder.C2S.secMarket', index=9,\n number=10, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=73,\n serialized_end=299,\n)\n\n\n_S2C = _descriptor.Descriptor(\n name='S2C',\n full_name='Trd_PlaceOrder.S2C',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='header', full_name='Trd_PlaceOrder.S2C.header', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='orderID', full_name='Trd_PlaceOrder.S2C.orderID', index=1,\n number=2, type=4, cpp_type=4, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=301,\n serialized_end=362,\n)\n\n\n_REQUEST = _descriptor.Descriptor(\n name='Request',\n full_name='Trd_PlaceOrder.Request',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='c2s', full_name='Trd_PlaceOrder.Request.c2s', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=364,\n serialized_end=407,\n)\n\n\n_RESPONSE = _descriptor.Descriptor(\n name='Response',\n full_name='Trd_PlaceOrder.Response',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='retType', full_name='Trd_PlaceOrder.Response.retType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=True, default_value=-400,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='retMsg', full_name='Trd_PlaceOrder.Response.retMsg', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='errCode', full_name='Trd_PlaceOrder.Response.errCode', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='s2c', full_name='Trd_PlaceOrder.Response.s2c', index=3,\n number=4, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=409,\n serialized_end=509,\n)\n\n_C2S.fields_by_name['packetID'].message_type = Common__pb2._PACKETID\n_C2S.fields_by_name['header'].message_type = Trd__Common__pb2._TRDHEADER\n_S2C.fields_by_name['header'].message_type = Trd__Common__pb2._TRDHEADER\n_REQUEST.fields_by_name['c2s'].message_type = _C2S\n_RESPONSE.fields_by_name['s2c'].message_type = _S2C\nDESCRIPTOR.message_types_by_name['C2S'] = _C2S\nDESCRIPTOR.message_types_by_name['S2C'] = _S2C\nDESCRIPTOR.message_types_by_name['Request'] = _REQUEST\nDESCRIPTOR.message_types_by_name['Response'] = _RESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nC2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(\n DESCRIPTOR = _C2S,\n __module__ = 'Trd_PlaceOrder_pb2'\n # @@protoc_insertion_point(class_scope:Trd_PlaceOrder.C2S)\n ))\n_sym_db.RegisterMessage(C2S)\n\nS2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(\n DESCRIPTOR = _S2C,\n __module__ = 'Trd_PlaceOrder_pb2'\n # @@protoc_insertion_point(class_scope:Trd_PlaceOrder.S2C)\n ))\n_sym_db.RegisterMessage(S2C)\n\nRequest = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(\n DESCRIPTOR = _REQUEST,\n __module__ = 'Trd_PlaceOrder_pb2'\n # @@protoc_insertion_point(class_scope:Trd_PlaceOrder.Request)\n ))\n_sym_db.RegisterMessage(Request)\n\nResponse = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(\n DESCRIPTOR = _RESPONSE,\n __module__ = 'Trd_PlaceOrder_pb2'\n # @@protoc_insertion_point(class_scope:Trd_PlaceOrder.Response)\n ))\n_sym_db.RegisterMessage(Response)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "4888758", "language": "Python", "matching_score": 6.832123756408691, "max_stars_count": 5, "path": "futuquant/common/pb/Trd_PlaceOrder_pb2.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: Common.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf.internal import enum_type_wrapper\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='Common.proto',\n package='Common',\n syntax='proto2',\n serialized_pb=_b('\\n\\x0c\\x43ommon.proto\\x12\\x06\\x43ommon\\\",\\n\\x08PacketID\\x12\\x0e\\n\\x06\\x63onnID\\x18\\x01 \\x02(\\x04\\x12\\x10\\n\\x08serialNo\\x18\\x02 \\x02(\\r*w\\n\\x07RetType\\x12\\x13\\n\\x0fRetType_Succeed\\x10\\x00\\x12\\x1b\\n\\x0eRetType_Failed\\x10\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x01\\x12\\x1c\\n\\x0fRetType_TimeOut\\x10\\x9c\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x01\\x12\\x1c\\n\\x0fRetType_Unknown\\x10\\xf0\\xfc\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x01')\n)\n\n_RETTYPE = _descriptor.EnumDescriptor(\n name='RetType',\n full_name='Common.RetType',\n filename=None,\n file=DESCRIPTOR,\n values=[\n _descriptor.EnumValueDescriptor(\n name='RetType_Succeed', index=0, number=0,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(\n name='RetType_Failed', index=1, number=-1,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(\n name='RetType_TimeOut', index=2, number=-100,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(\n name='RetType_Unknown', index=3, number=-400,\n options=None,\n type=None),\n ],\n containing_type=None,\n options=None,\n serialized_start=70,\n serialized_end=189,\n)\n_sym_db.RegisterEnumDescriptor(_RETTYPE)\n\nRetType = enum_type_wrapper.EnumTypeWrapper(_RETTYPE)\nRetType_Succeed = 0\nRetType_Failed = -1\nRetType_TimeOut = -100\nRetType_Unknown = -400\n\n\n\n_PACKETID = _descriptor.Descriptor(\n name='PacketID',\n full_name='Common.PacketID',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='connID', full_name='Common.PacketID.connID', index=0,\n number=1, type=4, cpp_type=4, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='serialNo', full_name='Common.PacketID.serialNo', index=1,\n number=2, type=13, cpp_type=3, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=24,\n serialized_end=68,\n)\n\nDESCRIPTOR.message_types_by_name['PacketID'] = _PACKETID\nDESCRIPTOR.enum_types_by_name['RetType'] = _RETTYPE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nPacketID = _reflection.GeneratedProtocolMessageType('PacketID', (_message.Message,), dict(\n DESCRIPTOR = _PACKETID,\n __module__ = 'Common_pb2'\n # @@protoc_insertion_point(class_scope:Common.PacketID)\n ))\n_sym_db.RegisterMessage(PacketID)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "6380458", "language": "Python", "matching_score": 4.649496555328369, "max_stars_count": 5, "path": "futuquant/common/pb/Common_pb2.py" }, { "content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: InitConnect.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nimport Common_pb2 as Common__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='InitConnect.proto',\n package='InitConnect',\n syntax='proto2',\n serialized_pb=_b('\\n\\x11InitConnect.proto\\x12\\x0bInitConnect\\x1a\\x0c\\x43ommon.proto\\\">\\n\\x03\\x43\\x32S\\x12\\x11\\n\\tclientVer\\x18\\x01 \\x02(\\x05\\x12\\x10\\n\\x08\\x63lientID\\x18\\x02 \\x02(\\t\\x12\\x12\\n\\nrecvNotify\\x18\\x03 \\x01(\\x08\\\"l\\n\\x03S2C\\x12\\x11\\n\\tserverVer\\x18\\x01 \\x02(\\x05\\x12\\x13\\n\\x0bloginUserID\\x18\\x02 \\x02(\\x04\\x12\\x0e\\n\\x06\\x63onnID\\x18\\x03 \\x02(\\x04\\x12\\x12\\n\\nconnAESKey\\x18\\x04 \\x02(\\t\\x12\\x19\\n\\x11keepAliveInterval\\x18\\x05 \\x02(\\x05\\\"(\\n\\x07Request\\x12\\x1d\\n\\x03\\x63\\x32s\\x18\\x01 \\x02(\\x0b\\x32\\x10.InitConnect.C2S\\\"a\\n\\x08Response\\x12\\x15\\n\\x07retType\\x18\\x01 \\x02(\\x05:\\x04-400\\x12\\x0e\\n\\x06retMsg\\x18\\x02 \\x01(\\t\\x12\\x0f\\n\\x07\\x65rrCode\\x18\\x03 \\x01(\\x05\\x12\\x1d\\n\\x03s2c\\x18\\x04 \\x01(\\x0b\\x32\\x10.InitConnect.S2C')\n ,\n dependencies=[Common__pb2.DESCRIPTOR,])\n\n\n\n\n_C2S = _descriptor.Descriptor(\n name='C2S',\n full_name='InitConnect.C2S',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='clientVer', full_name='InitConnect.C2S.clientVer', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='clientID', full_name='InitConnect.C2S.clientID', index=1,\n number=2, type=9, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='recvNotify', full_name='InitConnect.C2S.recvNotify', index=2,\n number=3, type=8, cpp_type=7, label=1,\n has_default_value=False, default_value=False,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=48,\n serialized_end=110,\n)\n\n\n_S2C = _descriptor.Descriptor(\n name='S2C',\n full_name='InitConnect.S2C',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='serverVer', full_name='InitConnect.S2C.serverVer', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='loginUserID', full_name='InitConnect.S2C.loginUserID', index=1,\n number=2, type=4, cpp_type=4, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='connID', full_name='InitConnect.S2C.connID', index=2,\n number=3, type=4, cpp_type=4, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='connAESKey', full_name='InitConnect.S2C.connAESKey', index=3,\n number=4, type=9, cpp_type=9, label=2,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='keepAliveInterval', full_name='InitConnect.S2C.keepAliveInterval', index=4,\n number=5, type=5, cpp_type=1, label=2,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=112,\n serialized_end=220,\n)\n\n\n_REQUEST = _descriptor.Descriptor(\n name='Request',\n full_name='InitConnect.Request',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='c2s', full_name='InitConnect.Request.c2s', index=0,\n number=1, type=11, cpp_type=10, label=2,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=222,\n serialized_end=262,\n)\n\n\n_RESPONSE = _descriptor.Descriptor(\n name='Response',\n full_name='InitConnect.Response',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='retType', full_name='InitConnect.Response.retType', index=0,\n number=1, type=5, cpp_type=1, label=2,\n has_default_value=True, default_value=-400,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='retMsg', full_name='InitConnect.Response.retMsg', index=1,\n number=2, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='errCode', full_name='InitConnect.Response.errCode', index=2,\n number=3, type=5, cpp_type=1, label=1,\n has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n _descriptor.FieldDescriptor(\n name='s2c', full_name='InitConnect.Response.s2c', index=3,\n number=4, type=11, cpp_type=10, label=1,\n has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n options=None,\n is_extendable=False,\n syntax='proto2',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=264,\n serialized_end=361,\n)\n\n_REQUEST.fields_by_name['c2s'].message_type = _C2S\n_RESPONSE.fields_by_name['s2c'].message_type = _S2C\nDESCRIPTOR.message_types_by_name['C2S'] = _C2S\nDESCRIPTOR.message_types_by_name['S2C'] = _S2C\nDESCRIPTOR.message_types_by_name['Request'] = _REQUEST\nDESCRIPTOR.message_types_by_name['Response'] = _RESPONSE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nC2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(\n DESCRIPTOR = _C2S,\n __module__ = 'InitConnect_pb2'\n # @@protoc_insertion_point(class_scope:InitConnect.C2S)\n ))\n_sym_db.RegisterMessage(C2S)\n\nS2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(\n DESCRIPTOR = _S2C,\n __module__ = 'InitConnect_pb2'\n # @@protoc_insertion_point(class_scope:InitConnect.S2C)\n ))\n_sym_db.RegisterMessage(S2C)\n\nRequest = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(\n DESCRIPTOR = _REQUEST,\n __module__ = 'InitConnect_pb2'\n # @@protoc_insertion_point(class_scope:InitConnect.Request)\n ))\n_sym_db.RegisterMessage(Request)\n\nResponse = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(\n DESCRIPTOR = _RESPONSE,\n __module__ = 'InitConnect_pb2'\n # @@protoc_insertion_point(class_scope:InitConnect.Response)\n ))\n_sym_db.RegisterMessage(Response)\n\n\n# @@protoc_insertion_point(module_scope)\n", "id": "7395433", "language": "Python", "matching_score": 1.4377036094665527, "max_stars_count": 5, "path": "futuquant/common/pb/InitConnect_pb2.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport hashlib, json, os, sys, socket, traceback, time, struct, collections\nfrom datetime import datetime, timedelta\nfrom struct import calcsize\nfrom google.protobuf.json_format import MessageToJson\nfrom threading import RLock\nfrom futu.common.conn_mng import *\nfrom futu.common.sys_config import *\nfrom futu.common.pbjson import json2pb\n\n\nProtoInfo = collections.namedtuple('ProtoInfo', ['proto_id', 'serial_no'])\n\n\ndef get_message_head_len():\n return calcsize(MESSAGE_HEAD_FMT)\n\n\ndef check_date_str_format(s, default_time=\"00:00:00\"):\n \"\"\"Check the format of date string\"\"\"\n try:\n str_fmt = s\n if \":\" not in s:\n str_fmt = '{} {}'.format(s, default_time)\n\n dt_obj = datetime.strptime(str_fmt, \"%Y-%m-%d %H:%M:%S\")\n\n return RET_OK, dt_obj\n\n except ValueError:\n error_str = ERROR_STR_PREFIX + \"wrong time or time format\"\n return RET_ERROR, error_str\n\n\ndef normalize_date_format(date_str, default_time=\"00:00:00\"):\n \"\"\"normalize the format of data\"\"\"\n ret_code, ret_data = check_date_str_format(date_str, default_time)\n if ret_code != RET_OK:\n return ret_code, ret_data\n\n return RET_OK, ret_data.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\ndef normalize_start_end_date(start, end, delta_days=0, default_time_start=\"00:00:00\", default_time_end=\"23:59:59\", prefer_end_now=True):\n \"\"\"\n\n :param start:\n :param end:\n :param delta_days:\n :param default_time_start:\n :param default_time_end:\n :param prefer_end_now: 为True时,当start和end都为None时,end设为当前时间,为False则start设为当前时间\n :return:\n \"\"\"\n if start is not None and is_str(start) is False:\n error_str = ERROR_STR_PREFIX + \"the type of start param is wrong\"\n return RET_ERROR, error_str, None, None\n\n if end is not None and is_str(end) is False:\n error_str = ERROR_STR_PREFIX + \"the type of end param is wrong\"\n return RET_ERROR, error_str, None, None\n\n dt_start = None\n dt_end = None\n delta = timedelta(days=delta_days)\n hour_end, min_end, sec_end = [int(x) for x in default_time_end.split(':')]\n hour_start, min_start, sec_start = [int(x) for x in default_time_start.split(':')]\n\n if start:\n ret_code, ret_data = check_date_str_format(start, default_time_start)\n if ret_code != RET_OK:\n return ret_code, ret_data, start, end\n dt_start = ret_data\n\n if end:\n ret_code, ret_data = check_date_str_format(end, default_time_end)\n if ret_code != RET_OK:\n return ret_code, ret_data, start, end\n dt_end = ret_data\n\n if end and not start:\n dt_tmp = dt_end - delta\n dt_start = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_start, minute=min_start, second=sec_start)\n\n if start and not end:\n dt_tmp = dt_start + delta\n dt_end = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_end, minute=min_end, second=sec_end)\n\n if not start and not end:\n if prefer_end_now:\n dt_now = datetime.now()\n dt_end = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=hour_end, minute=min_end, second=sec_end)\n dt_tmp = dt_end - delta\n dt_start = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_start, minute=min_start, second=sec_start)\n else:\n dt_now = datetime.now()\n dt_start = datetime(year=dt_now.year, month=dt_now.month, day=dt_now.day, hour=hour_start, minute=min_start,\n second=sec_start)\n dt_tmp = dt_start + delta\n dt_end = datetime(year=dt_tmp.year, month=dt_tmp.month, day=dt_tmp.day, hour=hour_end, minute=min_end,\n second=sec_end)\n\n start = dt_start.strftime(\"%Y-%m-%d %H:%M:%S\")\n end = dt_end.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n return RET_OK, '', start, end\n\n\ndef is_str(s):\n if IS_PY2:\n return isinstance(s, str) or isinstance(s, unicode)\n else:\n return isinstance(s, str)\n\n\ndef extract_pls_rsp(rsp_str):\n \"\"\"Extract the response of PLS\"\"\"\n try:\n rsp = json.loads(rsp_str)\n except ValueError:\n traceback.print_exc()\n err = sys.exc_info()[1]\n err_str = ERROR_STR_PREFIX + str(err)\n return RET_ERROR, err_str, None\n\n error_code = int(rsp['retType'])\n\n if error_code != 1:\n error_str = ERROR_STR_PREFIX + rsp['retMsg']\n return RET_ERROR, error_str, None\n\n return RET_OK, \"\", rsp\n\n\ndef split_stock_str(stock_str_param):\n \"\"\"split the stock string\"\"\"\n stock_str = str(stock_str_param)\n\n split_loc = stock_str.find(\".\")\n '''do not use the built-in split function in python.\n The built-in function cannot handle some stock strings correctly.\n for instance, US..DJI, where the dot . itself is a part of original code'''\n if 0 <= split_loc < len(stock_str) - 1 and Market.if_has_key(stock_str[0:split_loc]):\n market_str = stock_str[0:split_loc]\n _, market_code = Market.to_number(market_str)\n partial_stock_str = stock_str[split_loc + 1:]\n return RET_OK, (market_code, partial_stock_str)\n\n else:\n error_str = ERROR_STR_PREFIX + \"format of code %s is wrong. (US.AAPL, HK.00700, SZ.000001)\" % stock_str\n return RET_ERROR, error_str\n\n\ndef merge_qot_mkt_stock_str(qot_mkt, partial_stock_str):\n \"\"\"\n Merge the string of stocks\n :param market: market code\n :param partial_stock_str: original stock code string. i.e. \"AAPL\",\"00700\", \"000001\"\n :return: unified representation of a stock code. i.e. \"US.AAPL\", \"HK.00700\", \"SZ.000001\"\n\n \"\"\"\n market_str = Market.to_string2(qot_mkt)\n stock_str = '.'.join([market_str, partial_stock_str])\n return stock_str\n\n\ndef merge_trd_mkt_stock_str(trd_sec_mkt, partial_stock_str):\n \"\"\"\n Merge the string of stocks\n :param market: market code\n :param partial_stock_str: original stock code string. i.e. \"AAPL\",\"00700\", \"000001\"\n :return: unified representation of a stock code. i.e. \"US.AAPL\", \"HK.00700\", \"SZ.000001\"\n\n \"\"\"\n mkt_qot = Market.NONE\n if trd_sec_mkt == Trd_Common_pb2.TrdSecMarket_HK:\n mkt_qot = Market.HK\n elif trd_sec_mkt == Trd_Common_pb2.TrdSecMarket_CN_SH:\n mkt_qot = Market.SH\n elif trd_sec_mkt == Trd_Common_pb2.TrdSecMarket_CN_SZ:\n mkt_qot = Market.SZ\n elif trd_sec_mkt == Trd_Common_pb2.TrdSecMarket_US:\n mkt_qot = Market.US\n elif trd_sec_mkt == Trd_Common_pb2.TrdSecMarket_SG:\n mkt_qot = Market.SG\n elif trd_sec_mkt == Trd_Common_pb2.TrdSecMarket_JP:\n mkt_qot = Market.JP\n _, mkt = Market.to_number(mkt_qot)\n return merge_qot_mkt_stock_str(mkt, partial_stock_str)\n\n\ndef str2binary(s):\n \"\"\"\n Transfer string to binary\n :param s: string content to be transformed to binary\n :return: binary\n \"\"\"\n return s.encode('utf-8')\n\n\ndef is_str(obj):\n if sys.version_info.major == 3:\n return isinstance(obj, str) or isinstance(obj, bytes)\n else:\n return isinstance(obj, basestring)\n\n\ndef price_to_str_int1000(price):\n return str(int(round(float(price) * 1000,\n 0))) if str(price) != '' else ''\n\n\n# 1000*int price to float val\ndef int1000_price_to_float(price):\n return round(float(price) / 1000.0,\n 3) if str(price) != '' else float(0)\n\n\n# 10^9 int price to float val\ndef int10_9_price_to_float(price):\n return round(float(price) / float(10**9),\n 3) if str(price) != '' else float(0)\n\n\n# list 参数除重及规整\ndef unique_and_normalize_list(lst):\n ret = []\n if not lst:\n return ret\n tmp = lst if isinstance(lst, list) else [lst]\n [ret.append(x) for x in tmp if x not in ret]\n return ret\n\n\ndef md5_transform(raw_str):\n h1 = hashlib.md5()\n h1.update(raw_str.encode(encoding='utf-8'))\n return h1.hexdigest()\n\n\ng_unique_id = int(time.time() % 10000)\ng_unique_lock = RLock()\ndef get_unique_id32():\n global g_unique_id\n with g_unique_lock:\n g_unique_id += 1\n if g_unique_id >= 4294967295:\n g_unique_id = int(time.time() % 10000)\n ret_id = g_unique_id\n return ret_id\n\n\nclass ProtobufMap(dict):\n created_protobuf_map = {}\n\n def __init__(self):\n\n \"\"\" InitConnect = 1001 # 初始化连接 \"\"\"\n from futu.common.pb.InitConnect_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.InitConnect] = Response()\n\n \"\"\" GetGlobalState = 1002 # 获取全局状态 \"\"\"\n from futu.common.pb.GetGlobalState_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.GetGlobalState] = Response()\n\n \"\"\" Notify = 1003 # 通知推送 \"\"\"\n from futu.common.pb.Notify_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Notify] = Response()\n\n \"\"\" KeepAlive = 1004 # 通知推送 \"\"\"\n from futu.common.pb.KeepAlive_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.KeepAlive] = Response()\n\n \"\"\" GetUserInfo = 1005 # 获取全局状态 \"\"\"\n from futu.common.pb.GetUserInfo_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.GetUserInfo] = Response()\n\n \"\"\" GetUserInfo = 1006 # 获取用户信息 \"\"\"\n from futu.common.pb.Verification_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Verification] = Response()\n\n \"\"\" GetUserInfo = 1007 # 获取延迟统计 \"\"\"\n from futu.common.pb.GetDelayStatistics_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.GetDelayStatistics] = Response()\n\n \"\"\" TestCmd = 1008 # 测试命令 \"\"\"\n from futu.common.pb.TestCmd_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.TestCmd] = Response()\n\n \"\"\" Trd_GetAccList = 2001 # 获取业务账户列表 \"\"\"\n from futu.common.pb.Trd_GetAccList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetAccList] = Response()\n\n \"\"\" Trd_UnlockTrade = 2005 # 解锁或锁定交易 \"\"\"\n from futu.common.pb.Trd_UnlockTrade_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_UnlockTrade] = Response()\n\n \"\"\" Trd_SubAccPush = 2008 # 订阅业务账户的交易推送数据 \"\"\"\n from futu.common.pb.Trd_SubAccPush_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_SubAccPush] = Response()\n\n \"\"\" Trd_GetFunds = 2101 # 获取账户资金 \"\"\"\n from futu.common.pb.Trd_GetFunds_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetFunds] = Response()\n\n \"\"\" Trd_GetPositionList = 2102 # 获取账户持仓 \"\"\"\n from futu.common.pb.Trd_GetPositionList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetPositionList] = Response()\n\n \"\"\" Trd_GetOrderList = 2201 # 获取订单列表 \"\"\"\n from futu.common.pb.Trd_GetOrderList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetOrderList] = Response()\n\n \"\"\" Trd_PlaceOrder = 2202 # 下单 \"\"\"\n from futu.common.pb.Trd_PlaceOrder_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_PlaceOrder] = Response()\n\n \"\"\" Trd_ModifyOrder = 2205 # 修改订单 \"\"\"\n from futu.common.pb.Trd_ModifyOrder_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_ModifyOrder] = Response()\n\n \"\"\" Trd_UpdateOrder = 2208 # 订单状态变动通知(推送) \"\"\"\n from futu.common.pb.Trd_UpdateOrder_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_UpdateOrder] = Response()\n\n \"\"\" Trd_GetOrderFillList = 2211 # 获取成交列表 \"\"\"\n from futu.common.pb.Trd_GetOrderFillList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetOrderFillList] = Response()\n\n \"\"\" Trd_UpdateOrderFill = 2218 # 成交通知(推送) \"\"\"\n from futu.common.pb.Trd_UpdateOrderFill_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_UpdateOrderFill] = Response()\n\n \"\"\" Trd_GetHistoryOrderList = 2221 # 获取历史订单列表 \"\"\"\n from futu.common.pb.Trd_GetHistoryOrderList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetHistoryOrderList] = Response()\n\n \"\"\" Trd_GetHistoryOrderFillList = 2222 # 获取历史成交列表 \"\"\"\n from futu.common.pb.Trd_GetHistoryOrderFillList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetHistoryOrderFillList] = Response()\n\n \"\"\" Qot_GetReference = 2223 获取正股相关股票,暂时只有窝轮\"\"\"\n from futu.common.pb.Trd_GetMarginRatio_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetMarginRatio] = Response()\n\n \"\"\" Qot_Sub = 3001 # 订阅或者反订阅 \"\"\"\n from futu.common.pb.Qot_Sub_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_Sub] = Response()\n\n \"\"\" Qot_RegQotPush = 3002 # 注册推送 \"\"\"\n from futu.common.pb.Qot_RegQotPush_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_RegQotPush] = Response()\n\n \"\"\" Qot_GetSubInfo = 3003 # 获取订阅信息 \"\"\"\n from futu.common.pb.Qot_GetSubInfo_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSubInfo] = Response()\n\n \"\"\" Qot_GetBasicQot = 3004 # 获取股票基本行情 \"\"\"\n from futu.common.pb.Qot_GetBasicQot_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetBasicQot] = Response()\n\n \"\"\" Qot_UpdateBasicQot = 3005 # 推送股票基本行情 \"\"\"\n from futu.common.pb.Qot_UpdateBasicQot_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateBasicQot] = Response()\n\n \"\"\" Qot_GetKL = 3006 # 获取K线 \"\"\"\n from futu.common.pb.Qot_GetKL_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetKL] = Response()\n\n \"\"\" Qot_UpdateKL = 3007 # 推送K线 \"\"\"\n from futu.common.pb.Qot_UpdateKL_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateKL] = Response()\n\n \"\"\" Qot_GetRT = 3008 # 获取分时 \"\"\"\n from futu.common.pb.Qot_GetRT_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetRT] = Response()\n\n \"\"\" Qot_UpdateRT = 3009 # 推送分时 \"\"\"\n from futu.common.pb.Qot_UpdateRT_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateRT] = Response()\n\n \"\"\" Qot_GetTicker = 3010 # 获取逐笔 \"\"\"\n from futu.common.pb.Qot_GetTicker_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetTicker] = Response()\n\n \"\"\" Qot_UpdateTicker = 3011 # 推送逐笔 \"\"\"\n from futu.common.pb.Qot_UpdateTicker_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateTicker] = Response()\n\n \"\"\" Qot_GetOrderBook = 3012 # 获取买卖盘 \"\"\"\n from futu.common.pb.Qot_GetOrderBook_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOrderBook] = Response()\n\n \"\"\" Qot_UpdateOrderBook = 3013 # 推送买卖盘 \"\"\"\n from futu.common.pb.Qot_UpdateOrderBook_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateOrderBook] = Response()\n\n \"\"\" Qot_GetBroker = 3014 # 获取经纪队列 \"\"\"\n from futu.common.pb.Qot_GetBroker_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetBroker] = Response()\n\n \"\"\" Qot_UpdateBroker = 3015 # 推送经纪队列 \"\"\"\n from futu.common.pb.Qot_UpdateBroker_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdateBroker] = Response()\n\n \"\"\" Qot_UpdatePriceReminder = 3019 # 推送到价提醒 \"\"\"\n from futu.common.pb.Qot_UpdatePriceReminder_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_UpdatePriceReminder] = Response()\n\n \"\"\" Qot_GetSuspend = 3201 # 获取股票停牌信息 \"\"\"\n from futu.common.pb.Qot_GetSuspend_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSuspend] = Response()\n\n \"\"\" Qot_GetStaticInfo = 3202 # 获取股票列表 \"\"\"\n from futu.common.pb.Qot_GetStaticInfo_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetStaticInfo] = Response()\n\n \"\"\" Qot_GetSecuritySnapshot = 3203 # 获取股票快照 \"\"\"\n from futu.common.pb.Qot_GetSecuritySnapshot_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetSecuritySnapshot] = Response()\n\n \"\"\" Qot_GetPlateSet = 3204 # 获取板块集合下的板块 \"\"\"\n from futu.common.pb.Qot_GetPlateSet_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetPlateSet] = Response()\n\n \"\"\" Qot_GetPlateSecurity = 3205 # 获取板块下的股票 \"\"\"\n from futu.common.pb.Qot_GetPlateSecurity_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetPlateSecurity] = Response()\n\n \"\"\" Trd_GetMaxTrdQtys = 2111 查询最大买卖数量 \"\"\"\n from futu.common.pb.Trd_GetMaxTrdQtys_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Trd_GetMaxTrdQtys] = Response()\n\n \"\"\" Qot_GetReference = 3206 获取正股相关股票,暂时只有窝轮\"\"\"\n from futu.common.pb.Qot_GetReference_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetReference] = Response()\n\n \"\"\" Qot_GetOwnerPlate = 3207 获取股票所属板块\"\"\"\n from futu.common.pb.Qot_GetOwnerPlate_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOwnerPlate] = Response()\n\n \"\"\" Qot_GetOwnerPlate = 3208 获取高管持股变动\"\"\"\n from futu.common.pb.Qot_GetHoldingChangeList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetHoldingChangeList] = Response()\n\n from futu.common.pb.Qot_RequestHistoryKL_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestHistoryKL] = Response()\n\n from futu.common.pb.Qot_GetOptionChain_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOptionChain] = Response()\n\n \"\"\" Qot_GetWarrantData = 3210 获取窝轮 \"\"\"\n from futu.common.pb.Qot_GetWarrant_pb2 import Response as GetWarrantPBResponse\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetWarrant] = GetWarrantPBResponse()\n\n \"\"\" Qot_GetOrderDetail = 3104 已使用过的额度 \"\"\"\n from futu.common.pb.Qot_RequestHistoryKLQuota_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestHistoryKLQuota] = Response()\n\n \"\"\"获取除权信息\"\"\"\n from futu.common.pb.Qot_RequestRehab_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestRehab] = Response()\n\n from futu.common.pb.Qot_GetCapitalDistribution_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetCapitalDistribution] = Response()\n\n from futu.common.pb.Qot_GetCapitalFlow_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetCapitalFlow] = Response()\n\n from futu.common.pb.Qot_GetUserSecurity_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetUserSecurity] = Response()\n\n from futu.common.pb.Qot_ModifyUserSecurity_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_ModifyUserSecurity] = Response()\n\n from futu.common.pb.Qot_StockFilter_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_StockFilter] = Response()\n\n from futu.common.pb.Qot_GetCodeChange_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetCodeChange] = Response()\n\n from futu.common.pb.Qot_GetIpoList_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetIpoList] = Response()\n \n from futu.common.pb.Qot_GetFutureInfo_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetFutureInfo] = Response()\n\n from futu.common.pb.Qot_RequestTradeDate_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_RequestTradeDate] = Response()\n\n from futu.common.pb.Qot_SetPriceReminder_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_SetPriceReminder] = Response()\n\n from futu.common.pb.Qot_GetPriceReminder_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetPriceReminder] = Response()\n\n from futu.common.pb.Qot_GetUserSecurityGroup_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetUserSecurityGroup] = Response()\n\n from futu.common.pb.Qot_GetMarketState_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetMarketState] = Response()\n\n from futu.common.pb.Qot_GetOptionExpirationDate_pb2 import Response\n ProtobufMap.created_protobuf_map[ProtoId.Qot_GetOptionExpirationDate] = Response()\n\n def __getitem__(self, key):\n return ProtobufMap.created_protobuf_map[key] if key in ProtobufMap.created_protobuf_map else None\n\n\npb_map = ProtobufMap()\n\ndef binary2str(b, proto_id, proto_fmt_type):\n \"\"\"\n Transfer binary to string\n :param b: binary content to be transformed to string\n :return: string\n \"\"\"\n if proto_fmt_type == ProtoFMT.Json:\n return b.decode('utf-8')\n elif proto_fmt_type == ProtoFMT.Protobuf:\n rsp = pb_map[proto_id]\n if IS_PY2:\n rsp.ParseFromString(str(b))\n else:\n rsp.ParseFromString(b)\n return MessageToJson(rsp)\n else:\n raise Exception(\"binary2str: unknown proto format.\")\n\n\ndef binary2pb(b, proto_id, proto_fmt_type):\n \"\"\"\n Transfer binary to pb message\n :param b: binary content to be transformed to pb message\n :return: pb message\n \"\"\"\n rsp = pb_map[proto_id]\n if rsp is None:\n return None\n if proto_fmt_type == ProtoFMT.Json:\n return json2pb(type(rsp), b.decode('utf-8'))\n elif proto_fmt_type == ProtoFMT.Protobuf:\n try:\n rsp = type(rsp)()\n # logger.debug((proto_id))\n if IS_PY2:\n rsp.ParseFromString(str(b))\n else:\n rsp.ParseFromString(b)\n except Exception as e:\n print(e)\n return rsp\n else:\n raise Exception(\"binary2str: unknown proto format.\")\n\n\ndef pack_pb_req(pb_req, proto_id, conn_id, serial_no=0):\n proto_fmt = SysConfig.get_proto_fmt()\n serial_no = serial_no if serial_no else get_unique_id32()\n is_encrypt = FutuConnMng.is_conn_encrypt(conn_id)\n\n if proto_fmt == ProtoFMT.Json:\n req_json = MessageToJson(pb_req)\n ret, msg, req = _joint_head(proto_id, proto_fmt, len(req_json),\n req_json.encode(), conn_id, serial_no, is_encrypt)\n return ret, msg, req\n\n elif proto_fmt == ProtoFMT.Protobuf:\n ret, msg, req = _joint_head(proto_id, proto_fmt, pb_req.ByteSize(), pb_req, conn_id, serial_no, is_encrypt)\n return ret, msg, req\n else:\n error_str = ERROR_STR_PREFIX + 'unknown protocol format, %d' % proto_fmt\n return RET_ERROR, error_str, None\n\n\ndef _joint_head(proto_id, proto_fmt_type, body_len, str_body, conn_id, serial_no, is_encrypt):\n\n # sha20 = b'00000000000000000000'\n reserve8 = b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\n if proto_fmt_type == ProtoFMT.Protobuf:\n str_body = str_body.SerializeToString()\n\n if type(str_body) is not bytes:\n str_body = bytes_utf8(str_body)\n sha20 = hashlib.sha1(str_body).digest()\n\n # init connect 需要用rsa加密\n try:\n if proto_id == ProtoId.InitConnect:\n if SysConfig.INIT_RSA_FILE != '':\n str_body = RsaCrypt.encrypt(str_body)\n body_len = len(str_body)\n else:\n if is_encrypt:\n ret, msg, str_body = FutuConnMng.encrypt_conn_data(conn_id, str_body)\n body_len = len(str_body)\n if ret != RET_OK:\n return ret, msg, str_body\n except Exception as e:\n return RET_ERROR, str(e), None\n\n fmt = \"%s%ds\" % (MESSAGE_HEAD_FMT, body_len)\n\n bin_head = struct.pack(fmt, b'F', b'T', proto_id, proto_fmt_type,\n API_PROTO_VER, serial_no, body_len, sha20, reserve8, str_body)\n\n return RET_OK, \"\", bin_head\n\n\ndef parse_head(head_bytes):\n head_dict = {}\n head_dict['head_1'], head_dict['head_2'], head_dict['proto_id'], \\\n head_dict['proto_fmt_type'], head_dict['proto_ver'], \\\n head_dict['serial_no'], head_dict['body_len'], head_dict['sha20'], \\\n head_dict['reserve8'], = struct.unpack(MESSAGE_HEAD_FMT, head_bytes)\n return head_dict\n\n\ndef parse_proto_info(head_bytes):\n unpacked = struct.unpack(MESSAGE_HEAD_FMT, head_bytes)\n return ProtoInfo(unpacked[2], unpacked[5])\n\n\ndef decrypt_rsp_body(rsp_body, head_dict, conn_id, is_encrypt):\n ret_code = RET_OK\n msg = ''\n sha20 = head_dict['sha20']\n proto_id = head_dict['proto_id']\n\n if is_encrypt:\n try:\n if proto_id == ProtoId.InitConnect:\n rsp_body = RsaCrypt.decrypt(rsp_body)\n else:\n ret_code, msg, decrypt_data = FutuConnMng.decrypt_conn_data(conn_id, rsp_body)\n rsp_body = decrypt_data\n\n except Exception as e:\n msg = sys.exc_info()[1]\n ret_code = RET_ERROR\n\n # check sha20\n if ret_code == RET_OK:\n sha20_check = hashlib.sha1(rsp_body).digest()\n if sha20_check != sha20:\n ret_code = RET_ERROR\n msg = \"proto_id:{} conn_id:{} check sha error!\".format(proto_id, conn_id)\n\n return ret_code, msg, rsp_body\n\n\ndef make_from_namedtuple(t, **kwargs):\n \"\"\"\n t是namedtuple,复制一份t,但其中部分字段更新为kwargs的值\n :param t:\n :param kwargs:\n :return:\n \"\"\"\n d = t._asdict()\n d.update(kwargs)\n cls = type(t)\n return cls(**d)\n\n\ndef get_pb_value(pb, field):\n if pb.HasField(field):\n return getattr(pb, field)\n return 'N/A'\n\n\ndef get_pb_enum(pb, field, enum_cls, enum_default):\n if pb.HasField(field):\n return enum_cls.to_string2(getattr(pb, field))\n return enum_default\n", "id": "8114328", "language": "Python", "matching_score": 5.909056663513184, "max_stars_count": 858, "path": "futu/common/utils.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n Trade query\n\"\"\"\nimport datetime as dt\nfrom futu.common.utils import *\nfrom futu.quote.quote_query import pack_pb_req\n\n# 无数据时的值\nNoneDataValue = 'N/A'\n\ndef is_HKTrade_order_status_finish(status):\n val = int(status)\n if val == 3 or val == 5 or val == 6 or val == 7:\n return True\n return False\n\n\ndef is_USTrade_order_status_finish(status):\n val = int(status)\n if val == 3 or val == 5 or val == 6 or val == 7:\n return True\n return False\n\n\nclass GetAccountList:\n \"\"\"Get the trade account list\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, user_id, conn_id, trd_category, need_general_sec_acc):\n from futu.common.pb.Trd_GetAccList_pb2 import Request\n\n req = Request()\n req.c2s.userID = user_id\n _, req.c2s.trdCategory = TrdCategory.to_number(trd_category)\n req.c2s.needGeneralSecAccount = need_general_sec_acc\n return pack_pb_req(req, ProtoId.Trd_GetAccList, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n raw_acc_list = rsp_pb.s2c.accList\n acc_list = [{\n 'acc_id': record.accID,\n 'trd_env': TrdEnv.to_string2(record.trdEnv) if record.HasField('trdEnv') else 'N/A',# 初始化枚举类型\n 'trdMarket_list': [TrdMarket.to_string2(trdMkt) for trdMkt in record.trdMarketAuthList],\n 'acc_type': TrdAccType.to_string2(record.accType) if record.HasField(\"accType\") else TrdAccType.NONE,# 初始化枚举类型\n 'card_num': record.cardNum if record.HasField(\"cardNum\") else \"N/A\",\n 'security_firm': SecurityFirm.to_string2(record.securityFirm) if record.HasField('securityFirm') else SecurityFirm.NONE,# 初始化枚举类型\n 'sim_acc_type': SimAccType.to_string2(record.simAccType) if record.HasField('simAccType') else SimAccType.NONE,# 初始化枚举类型\n 'trdmarket_auth': list(record.trdMarketAuthList),\n } for record in raw_acc_list]\n\n return RET_OK, \"\", acc_list\n\n\nclass UnlockTrade:\n \"\"\"Unlock trade limitation lock\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, is_unlock, password_md5, conn_id, security_firm):\n \"\"\"Convert from user request for trading days to PLS request\"\"\"\n from futu.common.pb.Trd_UnlockTrade_pb2 import Request\n req = Request()\n req.c2s.unlock = is_unlock\n req.c2s.pwdMD5 = <PASSWORD>\n _, req.c2s.securityFirm = SecurityFirm.to_number(security_firm)\n\n return pack_pb_req(req, ProtoId.Trd_UnlockTrade, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n if rsp_pb.HasField('retMsg'):\n return RET_OK, rsp_pb.retMsg, None\n return RET_OK, \"\", None\n\n\nclass SubAccPush:\n \"\"\"sub acc push\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, acc_id_list, conn_id):\n from futu.common.pb.Trd_SubAccPush_pb2 import Request\n req = Request()\n for x in acc_id_list:\n req.c2s.accIDList.append(x)\n\n return pack_pb_req(req, ProtoId.Trd_SubAccPush, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n return RET_OK, \"\", None\n\n\nclass AccInfoQuery:\n \"\"\"Class for querying information of account\"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, acc_id, trd_market, trd_env, conn_id, refresh_cache, currency):\n from futu.common.pb.Trd_GetFunds_pb2 import Request\n req = Request()\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_market)\n if refresh_cache:\n req.c2s.refreshCache = refresh_cache\n req.c2s.currency = Currency.to_number(currency)[1]\n return pack_pb_req(req, ProtoId.Trd_GetFunds, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n raw_funds = rsp_pb.s2c.funds\n accinfo_list = [{\n 'power': raw_funds.power,\n 'max_power_short': raw_funds.maxPowerShort if raw_funds.HasField('maxPowerShort') else NoneDataValue,\n 'net_cash_power': raw_funds.netCashPower if raw_funds.HasField('netCashPower') else NoneDataValue,\n 'total_assets': raw_funds.totalAssets,\n 'cash': raw_funds.cash,\n 'market_val': raw_funds.marketVal,\n 'long_mv': raw_funds.longMv if raw_funds.HasField('longMv') else NoneDataValue,\n 'short_mv': raw_funds.shortMv if raw_funds.HasField('shortMv') else NoneDataValue,\n 'pending_asset': raw_funds.pendingAsset if raw_funds.HasField('pendingAsset') else NoneDataValue,\n 'interest_charged_amount': raw_funds.debtCash if raw_funds.HasField('debtCash') else NoneDataValue,\n 'frozen_cash': raw_funds.frozenCash,\n 'avl_withdrawal_cash': raw_funds.avlWithdrawalCash if raw_funds.HasField('avlWithdrawalCash') else NoneDataValue,\n 'max_withdrawal': raw_funds.maxWithdrawal if raw_funds.HasField('maxWithdrawal') else NoneDataValue,\n 'currency': Currency.to_string2(raw_funds.currency) if raw_funds.HasField('currency') else Currency.NONE,# 初始化枚举类型\n 'available_funds': raw_funds.availableFunds if raw_funds.HasField('availableFunds') else NoneDataValue,\n 'unrealized_pl': raw_funds.unrealizedPL if raw_funds.HasField('unrealizedPL') else NoneDataValue,\n 'realized_pl': raw_funds.realizedPL if raw_funds.HasField('realizedPL') else NoneDataValue,\n 'risk_level': CltRiskLevel.to_string2(raw_funds.riskLevel) if raw_funds.HasField('riskLevel') else CltRiskLevel.NONE,# 初始化枚举类型\n 'risk_status': CltRiskStatus.to_string2(raw_funds.riskStatus) if raw_funds.HasField('riskStatus') else CltRiskStatus.NONE,# 初始化枚举类型\n 'initial_margin': raw_funds.initialMargin if raw_funds.HasField('initialMargin') else NoneDataValue,\n 'margin_call_margin': raw_funds.marginCallMargin if raw_funds.HasField('marginCallMargin') else NoneDataValue,\n 'maintenance_margin': raw_funds.maintenanceMargin if raw_funds.HasField('maintenanceMargin') else NoneDataValue,\n 'hk_cash': NoneDataValue,\n 'hk_avl_withdrawal_cash': NoneDataValue,\n 'us_cash': NoneDataValue,\n 'us_avl_withdrawal_cash': NoneDataValue,\n 'cn_cash': NoneDataValue,\n 'cn_avl_withdrawal_cash': NoneDataValue,\n 'jp_cash': NoneDataValue,\n 'jp_avl_withdrawal_cash': NoneDataValue,\n 'sg_cash': NoneDataValue,\n 'sg_avl_withdrawal_cash': NoneDataValue,\n 'is_pdt': get_pb_value(raw_funds, 'isPdt'),\n 'pdt_seq': get_pb_value(raw_funds, 'pdtSeq'),\n 'beginning_dtbp': get_pb_value(raw_funds, 'beginningDTBP'),\n 'remaining_dtbp': get_pb_value(raw_funds, 'remainingDTBP'),\n 'dt_call_amount': get_pb_value(raw_funds, 'dtCallAmount'),\n 'dt_status': get_pb_enum(raw_funds, 'dtStatus', DTStatus, DTStatus.NONE)\n }]\n for cashInfo in raw_funds.cashInfoList:\n if cashInfo.currency == Trd_Common_pb2.Currency_HKD:\n accinfo_list[0]['hk_cash'] = cashInfo.cash\n accinfo_list[0]['hk_avl_withdrawal_cash'] = cashInfo.availableBalance\n elif cashInfo.currency == Trd_Common_pb2.Currency_USD:\n accinfo_list[0]['us_cash'] = cashInfo.cash\n accinfo_list[0]['us_avl_withdrawal_cash'] = cashInfo.availableBalance\n elif cashInfo.currency == Trd_Common_pb2.Currency_CNH:\n accinfo_list[0]['cn_cash'] = cashInfo.cash\n accinfo_list[0]['cn_avl_withdrawal_cash'] = cashInfo.availableBalance\n elif cashInfo.currency == Trd_Common_pb2.Currency_JPY:\n accinfo_list[0]['jp_cash'] = cashInfo.cash\n accinfo_list[0]['jp_avl_withdrawal_cash'] = cashInfo.availableBalance\n elif cashInfo.currency == Trd_Common_pb2.Currency_SGD:\n accinfo_list[0]['sg_cash'] = cashInfo.cash\n accinfo_list[0]['sg_avl_withdrawal_cash'] = cashInfo.availableBalance\n return RET_OK, \"\", accinfo_list\n\n\nclass PositionListQuery:\n \"\"\"Class for querying position list\"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, code, pl_ratio_min,\n pl_ratio_max, trd_env, acc_id, trd_mkt, conn_id, refresh_cache):\n \"\"\"Convert from user request for trading days to PLS request\"\"\"\n from futu.common.pb.Trd_GetPositionList_pb2 import Request\n req = Request()\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n if code:\n req.c2s.filterConditions.codeList.append(code)\n if pl_ratio_min is not None:\n req.c2s.filterPLRatioMin = float(pl_ratio_min) / 100.0\n if pl_ratio_max is not None:\n req.c2s.filterPLRatioMax = float(pl_ratio_max) / 100.0\n if refresh_cache:\n req.c2s.refreshCache = refresh_cache\n\n return pack_pb_req(req, ProtoId.Trd_GetPositionList, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n raw_position_list = rsp_pb.s2c.positionList\n\n position_list = [{\n \"code\": merge_trd_mkt_stock_str(position.secMarket, position.code),\n \"stock_name\": position.name,\n \"qty\": position.qty,\n \"can_sell_qty\": position.canSellQty,\n \"cost_price\": position.costPrice if position.HasField('costPrice') else NoneDataValue,\n \"cost_price_valid\": position.HasField('costPrice'),\n \"market_val\": position.val,\n \"nominal_price\": position.price,\n \"pl_ratio\": 100 * position.plRatio if position.HasField('plRatio') else NoneDataValue,\n \"pl_ratio_valid\": position.HasField('plRatio'),\n \"pl_val\": position.plVal,\n \"pl_val_valid\": position.HasField('plVal'),\n \"today_buy_qty\": position.td_buyQty if position.HasField('td_buyQty') else NoneDataValue,\n \"today_buy_val\": position.td_buyVal if position.HasField('td_buyVal') else NoneDataValue,\n \"today_pl_val\": position.td_plVal if position.HasField('td_plVal') else NoneDataValue,\n \"today_trd_val\": position.td_plVal if position.HasField('td_trdVal') else NoneDataValue,\n \"today_sell_qty\": position.td_sellQty if position.HasField('td_sellQty') else NoneDataValue,\n \"today_sell_val\": position.td_sellVal if position.HasField('td_sellVal') else NoneDataValue,\n \"position_side\": PositionSide.to_string2(position.positionSide) if position.HasField('positionSide') else 'N/A',# 初始化枚举类型\n \"unrealized_pl\": position.unrealizedPL if position.HasField('unrealizedPL') else NoneDataValue,\n \"realized_pl\": position.realizedPL if position.HasField('realizedPL') else NoneDataValue,\n \"currency\": Currency.to_string2(position.currency) if position.HasField('currency') else NoneDataValue,\n } for position in raw_position_list]\n return RET_OK, \"\", position_list\n\n\nclass OrderListQuery:\n \"\"\"Class for querying list queue\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, order_id, status_filter_list, code, start, end,\n trd_env, acc_id, trd_mkt, conn_id, refresh_cache):\n \"\"\"Convert from user request for trading days to PLS request\"\"\"\n from futu.common.pb.Trd_GetOrderList_pb2 import Request\n req = Request()\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n if code:\n req.c2s.filterConditions.codeList.append(code)\n if order_id:\n req.c2s.filterConditions.idList.append(int(order_id))\n\n if start:\n req.c2s.filterConditions.beginTime = start\n if end:\n req.c2s.filterConditions.endTime = end\n if refresh_cache:\n req.c2s.refreshCache = refresh_cache\n\n if len(status_filter_list):\n for order_status in status_filter_list:\n r, v = OrderStatus.to_number(order_status)\n if r:\n req.c2s.filterStatusList.append(v)\n\n return pack_pb_req(req, ProtoId.Trd_GetOrderList, conn_id)\n\n @classmethod\n def parse_order(cls, rsp_pb, order):\n order_dict = {\n \"code\": merge_trd_mkt_stock_str(order.secMarket, order.code),\n \"stock_name\": order.name,\n \"trd_side\": TrdSide.to_string2(order.trdSide) if order.HasField('trdSide') else 'N/A',# 初始化枚举类型\n \"order_type\": OrderType.to_string2(order.orderType) if order.HasField('orderType') else 'N/A',# 初始化枚举类型\n \"order_status\": OrderStatus.to_string2(order.orderStatus) if order.HasField('orderStatus') else 'N/A',# 初始化枚举类型\n \"order_id\": str(order.orderID),\n \"qty\": order.qty,\n \"price\": order.price,\n \"create_time\": order.createTime,\n \"updated_time\": order.updateTime,\n \"dealt_qty\": order.fillQty,\n \"dealt_avg_price\": order.fillAvgPrice,\n \"last_err_msg\": order.lastErrMsg,\n \"remark\": order.remark if order.HasField(\"remark\") else \"\",\n \"time_in_force\": TimeInForce.to_string2(order.timeInForce) if order.HasField('timeInForce') else 'N/A',# 初始化枚举类型\n \"fill_outside_rth\": order.fillOutsideRTH if order.HasField(\"fillOutsideRTH\") else 'N/A',\n \"aux_price\": order.auxPrice if order.HasField(\"auxPrice\") else 'N/A',\n \"trail_type\": TrailType.to_string2(order.trailType) if order.HasField(\"trailType\") else 'N/A',\n \"trail_value\": order.trailValue if order.HasField(\"trailValue\") else 'N/A',\n \"trail_spread\": order.trailSpread if order.HasField(\"trailSpread\") else 'N/A',\n \"currency\": Currency.to_string2(order.currency) if order.HasField(\"currency\") else 'N/A',\n }\n return order_dict\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n raw_order_list = rsp_pb.s2c.orderList\n order_list = [OrderListQuery.parse_order(rsp_pb, order) for order in raw_order_list]\n return RET_OK, \"\", order_list\n\n\nclass PlaceOrder:\n \"\"\"Palce order class\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, trd_side, order_type, price, qty,\n code, adjust_limit, trd_env, sec_mkt_str, acc_id, trd_mkt, conn_id, remark,\n time_in_force, fill_outside_rth, aux_price, trail_type ,trail_value ,trail_spread):\n \"\"\"Convert from user request for place order to PLS request\"\"\"\n from futu.common.pb.Trd_PlaceOrder_pb2 import Request\n req = Request()\n serial_no = get_unique_id32()\n req.c2s.packetID.serialNo = serial_no\n req.c2s.packetID.connID = conn_id\n\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n _, req.c2s.trdSide = TrdSide.to_number(trd_side)\n _, req.c2s.orderType = OrderType.to_number(order_type)\n req.c2s.code = code\n req.c2s.qty = qty\n req.c2s.price = price\n req.c2s.adjustPrice = adjust_limit != 0\n req.c2s.adjustSideAndLimit = adjust_limit\n if remark is not None:\n req.c2s.remark = remark\n r, proto_qot_mkt = Market.to_number(sec_mkt_str)\n if not r:\n proto_qot_mkt = Qot_Common_pb2.QotMarket_Unknown\n proto_trd_sec_mkt = QOT_MARKET_TO_TRD_SEC_MARKET_MAP.get(proto_qot_mkt, Trd_Common_pb2.TrdSecMarket_Unknown)\n req.c2s.secMarket = proto_trd_sec_mkt\n ret, val = TimeInForce.to_number(time_in_force)\n if not ret:\n return RET_ERROR, val, None\n else:\n req.c2s.timeInForce = val\n if aux_price is not None:\n req.c2s.auxPrice = aux_price\n if trail_type is not None:\n ret, val = TrailType.to_number(trail_type)\n if not ret:\n return RET_ERROR, val, None\n else:\n req.c2s.trailType = val\n if trail_value is not None:\n req.c2s.trailValue = trail_value\n if trail_spread is not None:\n req.c2s.trailSpread = trail_spread\n\n req.c2s.fillOutsideRTH = fill_outside_rth\n\n return pack_pb_req(req, ProtoId.Trd_PlaceOrder, conn_id, serial_no)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n order_id = str(rsp_pb.s2c.orderID)\n\n return RET_OK, \"\", order_id\n\n\nclass ModifyOrder:\n \"\"\"modify order class\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, modify_order_op, order_id, price, qty,\n adjust_limit, trd_env, acc_id, trd_mkt, conn_id,\n aux_price, trail_type, trail_value, trail_spread):\n \"\"\"Convert from user request for place order to PLS request\"\"\"\n from futu.common.pb.Trd_ModifyOrder_pb2 import Request\n req = Request()\n serial_no = get_unique_id32()\n req.c2s.packetID.serialNo = serial_no\n req.c2s.packetID.connID = conn_id\n\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n req.c2s.orderID = int(order_id)\n _, req.c2s.modifyOrderOp = ModifyOrderOp.to_number(modify_order_op)\n req.c2s.forAll = False\n\n if modify_order_op == ModifyOrderOp.NORMAL:\n req.c2s.qty = qty\n req.c2s.price = price\n req.c2s.adjustPrice = adjust_limit != 0\n req.c2s.adjustSideAndLimit = adjust_limit\n if aux_price is not None:\n req.c2s.auxPrice = float(aux_price)\n if trail_type is not None:\n _, req.c2s.trailType = TrailType.to_number(trail_type)\n if trail_value is not None:\n req.c2s.trailValue = float(trail_value)\n if trail_spread is not None:\n req.c2s.trailSpread = float(trail_spread)\n\n return pack_pb_req(req, ProtoId.Trd_ModifyOrder, conn_id, serial_no)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n order_id = str(rsp_pb.s2c.orderID)\n modify_order_list = [{\n 'trd_env': TrdEnv.to_string2(rsp_pb.s2c.header.trdEnv) if rsp_pb.s2c.header.HasField('trdEnv') else 'N/A',# 初始化枚举类型\n 'order_id': order_id\n }]\n\n return RET_OK, \"\", modify_order_list\n\n\nclass CancelOrder:\n \"\"\"modify order class\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, trd_env, acc_id, trd_mkt, conn_id, trdmarket):\n \"\"\"Convert from user request for place order to PLS request\"\"\"\n from futu.common.pb.Trd_ModifyOrder_pb2 import Request\n req = Request()\n serial_no = get_unique_id32()\n req.c2s.packetID.serialNo = serial_no\n req.c2s.packetID.connID = conn_id\n\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n _, req.c2s.trdMarket = TrdMarket.to_number(trdmarket)\n\n req.c2s.orderID = 0\n req.c2s.modifyOrderOp = Trd_Common_pb2.ModifyOrderOp_Cancel\n req.c2s.forAll = True\n return pack_pb_req(req, ProtoId.Trd_ModifyOrder, conn_id, serial_no)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n return RET_OK, \"success\", None\n\n\nclass DealListQuery:\n \"\"\"Class for \"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, code, trd_env, acc_id, trd_mkt, conn_id, refresh_cache):\n \"\"\"Convert from user request for place order to PLS request\"\"\"\n from futu.common.pb.Trd_GetOrderFillList_pb2 import Request\n req = Request()\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n if code:\n req.c2s.filterConditions.codeList.append(code)\n\n if refresh_cache:\n req.c2s.refreshCache = refresh_cache\n\n return pack_pb_req(req, ProtoId.Trd_GetOrderFillList, conn_id)\n\n @classmethod\n def parse_deal(cls, rsp_pb, deal):\n deal_dict = {\n \"code\": merge_trd_mkt_stock_str(deal.secMarket, deal.code),\n \"stock_name\": deal.name,\n \"deal_id\": deal.fillID,\n \"order_id\": str(deal.orderID) if deal.HasField('orderID') else NoneDataValue,\n \"qty\": deal.qty,\n \"price\": deal.price,\n \"trd_side\": TrdSide.to_string2(deal.trdSide) if deal.HasField('trdSide') else 'N/A',# 初始化枚举类型\n \"create_time\": deal.createTime,\n \"counter_broker_id\": deal.counterBrokerID if deal.HasField('counterBrokerID') else NoneDataValue,\n \"counter_broker_name\": deal.counterBrokerName if deal.HasField('counterBrokerName') else NoneDataValue,\n \"status\": DealStatus.to_string2(deal.status) if deal.HasField(\"status\") else NoneDataValue\n }\n return deal_dict\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n raw_deal_list = rsp_pb.s2c.orderFillList\n deal_list = [DealListQuery.parse_deal(rsp_pb, deal) for deal in raw_deal_list]\n\n return RET_OK, \"\", deal_list\n\n\nclass HistoryOrderListQuery:\n \"\"\"Class for querying Histroy Order\"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, status_filter_list, code, start, end,\n trd_env, acc_id, trd_mkt, conn_id):\n\n from futu.common.pb.Trd_GetHistoryOrderList_pb2 import Request\n req = Request()\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n if code:\n req.c2s.filterConditions.codeList.append(code)\n\n req.c2s.filterConditions.beginTime = start\n req.c2s.filterConditions.endTime = end\n\n if status_filter_list:\n for order_status in status_filter_list:\n r, v = OrderStatus.to_number(order_status)\n if r:\n req.c2s.filterStatusList.append(v)\n\n return pack_pb_req(req, ProtoId.Trd_GetHistoryOrderList, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n raw_order_list = rsp_pb.s2c.orderList\n order_list = [{\n \"code\": merge_trd_mkt_stock_str(order.secMarket, order.code),\n \"stock_name\": order.name,\n \"trd_side\": TrdSide.to_string2(order.trdSide) if order.HasField('trdSide') else 'N/A',# 初始化枚举类型\n \"order_type\": OrderType.to_string2(order.orderType) if order.HasField('orderType') else 'N/A',# 初始化枚举类型\n \"order_status\": OrderStatus.to_string2(order.orderStatus) if order.HasField('orderStatus') else 'N/A',# 初始化枚举类型\n \"order_id\": str(order.orderID),\n \"qty\": order.qty,\n \"price\": order.price,\n \"create_time\": order.createTime,\n \"updated_time\": order.updateTime,\n \"dealt_qty\": order.fillQty,\n \"dealt_avg_price\": order.fillAvgPrice,\n \"last_err_msg\": order.lastErrMsg,\n \"remark\": order.remark if order.HasField(\"remark\") else \"\",\n \"time_in_force\": TimeInForce.to_string2(order.timeInForce) if order.HasField('timeInForce') else 'N/A',# 初始化枚举类型\n \"fill_outside_rth\": order.fillOutsideRTH if order.HasField(\"fillOutsideRTH\") else 'N/A',\n \"aux_price\": order.auxPrice if order.HasField(\"auxPrice\") else 'N/A',\n \"trail_type\": order.trailType if order.HasField(\"trailType\") else 'N/A',\n \"trail_value\": order.trailValue if order.HasField(\"trailValue\") else 'N/A',\n \"trail_spread\": order.trailSpread if order.HasField(\"trailSpread\") else 'N/A',\n \"currency\": Currency.to_string2(order.currency) if order.HasField('currency') else NoneDataValue,\n } for order in raw_order_list]\n return RET_OK, \"\", order_list\n\n\nclass HistoryDealListQuery:\n \"\"\"Class for \"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, code, start, end, trd_env, acc_id, trd_mkt, conn_id):\n\n from futu.common.pb.Trd_GetHistoryOrderFillList_pb2 import Request\n req = Request()\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n if code:\n req.c2s.filterConditions.codeList.append(code)\n\n req.c2s.filterConditions.beginTime = start\n req.c2s.filterConditions.endTime = end\n\n return pack_pb_req(req, ProtoId.Trd_GetHistoryOrderFillList, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n raw_deal_list = rsp_pb.s2c.orderFillList\n deal_list = [{\n \"code\": merge_trd_mkt_stock_str(deal.secMarket, deal.code),\n \"stock_name\": deal.name,\n \"deal_id\": deal.fillID,\n \"order_id\": str(deal.orderID) if deal.HasField('orderID') else \"\",\n \"qty\": deal.qty,\n \"price\": deal.price,\n \"trd_side\": TrdSide.to_string2(deal.trdSide) if deal.HasField('trdSide') else 'N/A',# 初始化枚举类型\n \"create_time\": deal.createTime,\n \"counter_broker_id\": deal.counterBrokerID if deal.HasField('counterBrokerID') else \"\",\n \"counter_broker_name\": deal.counterBrokerName,\n \"status\": DealStatus.to_string2(deal.status) if deal.HasField('status') else 'N/A'# 初始化枚举类型\n } for deal in raw_deal_list]\n\n return RET_OK, \"\", deal_list\n\n\nclass UpdateOrderPush:\n \"\"\"Class for order update push\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg\n\n order_dict = OrderListQuery.parse_order(rsp_pb, rsp_pb.s2c.order)\n order_dict['trd_env'] = TrdEnv.to_string2(rsp_pb.s2c.header.trdEnv)\n order_dict['trd_market'] = TrdMarket.to_string2(rsp_pb.s2c.order.trdMarket)\n\n return RET_OK, order_dict\n\n\nclass UpdateDealPush:\n \"\"\"Class for order update push\"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg\n\n deal_dict = DealListQuery.parse_deal(rsp_pb, rsp_pb.s2c.orderFill)\n deal_dict['trd_env'] = TrdEnv.to_string2(rsp_pb.s2c.header.trdEnv)\n deal_dict['trd_market'] = TrdMarket.to_string2(rsp_pb.s2c.header.trdMarket)\n\n return RET_OK, deal_dict\n\n\nclass AccTradingInfoQuery:\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, order_type, code, price, order_id, adjust_limit, sec_mkt_str, trd_env, acc_id, trd_mkt, conn_id):\n\n from futu.common.pb.Trd_GetMaxTrdQtys_pb2 import Request\n req = Request()\n _, req.c2s.header.trdEnv = TrdEnv.to_number(trd_env)\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n _, req.c2s.orderType = OrderType.to_number(order_type)\n req.c2s.code = code\n req.c2s.price = price\n if order_id is not None:\n req.c2s.orderID = int(order_id)\n if adjust_limit == 0:\n req.c2s.adjustPrice = False\n else:\n req.c2s.adjustPrice = True\n req.c2s.adjustSideAndLimit = adjust_limit\n\n r, proto_qot_mkt = Market.to_number(sec_mkt_str)\n if not r:\n proto_qot_mkt = Qot_Common_pb2.QotMarket_Unknown\n\n proto_trd_sec_mkt = QOT_MARKET_TO_TRD_SEC_MARKET_MAP.get(proto_qot_mkt, Trd_Common_pb2.TrdSecMarket_Unknown)\n req.c2s.secMarket = proto_trd_sec_mkt\n\n return pack_pb_req(req, ProtoId.Trd_GetMaxTrdQtys, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n from futu.common.pb.Trd_Common_pb2 import MaxTrdQtys\n\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n info = rsp_pb.s2c.maxTrdQtys # type: MaxTrdQtys\n data = [{\n 'max_cash_buy': info.maxCashBuy,\n 'max_cash_and_margin_buy': info.maxCashAndMarginBuy if info.HasField('maxCashAndMarginBuy') else NoneDataValue,\n 'max_position_sell': info.maxPositionSell,\n 'max_sell_short': info.maxSellShort if info.HasField('maxSellShort') else NoneDataValue,\n 'max_buy_back': info.maxBuyBack if info.HasField('maxBuyBack') else NoneDataValue,\n 'long_required_im': info.longRequiredIM if info.HasField('longRequiredIM') else NoneDataValue,\n 'short_required_im': info.shortRequiredIM if info.HasField('shortRequiredIM') else NoneDataValue\n }]\n\n return RET_OK, \"\", data\n\n\nclass MarginRatio:\n \"\"\"Class for \"\"\"\n def __init__(self):\n pass\n\n @classmethod\n def pack_req(cls, code_list, conn_id, acc_id, trd_mkt):\n \"\"\"Convert from user request for place order to PLS request\"\"\"\n stock_tuple_list = []\n failure_tuple_list = []\n for stock_str in code_list:\n ret_code, content = split_stock_str(stock_str)\n if ret_code != RET_OK:\n error_str = content\n failure_tuple_list.append((ret_code, error_str))\n continue\n\n market_code, stock_code = content\n stock_tuple_list.append((market_code, stock_code))\n\n if len(failure_tuple_list) > 0:\n error_str = '\\n'.join([x[1] for x in failure_tuple_list])\n return RET_ERROR, error_str, None\n\n from futu.common.pb.Trd_GetMarginRatio_pb2 import Request\n req = Request()\n\n req.c2s.header.trdEnv = 1\n req.c2s.header.accID = acc_id\n _, req.c2s.header.trdMarket = TrdMarket.to_number(trd_mkt)\n\n for market, code in stock_tuple_list:\n stock_inst = req.c2s.securityList.add()\n stock_inst.market = market\n stock_inst.code = code\n\n return pack_pb_req(req, ProtoId.Trd_GetMarginRatio, conn_id)\n\n @classmethod\n def unpack_rsp(cls, rsp_pb):\n \"\"\"Convert from PLS response to user response\"\"\"\n if rsp_pb.retType != RET_OK:\n return RET_ERROR, rsp_pb.retMsg, None\n\n margin_ratio_list = rsp_pb.s2c.marginRatioInfoList\n ret_margin_ratio_list = []\n for margin_info in margin_ratio_list:\n margin_ratio_tmp = {}\n margin_ratio_tmp['code'] = merge_qot_mkt_stock_str(\n int(margin_info.security.market), margin_info.security.code)\n margin_ratio_tmp['is_long_permit'] = margin_info.isLongPermit if margin_info.HasField('isLongPermit') else 'N/A' # 是否允许融资\n margin_ratio_tmp['is_short_permit'] = margin_info.isShortPermit if margin_info.HasField('isShortPermit') else 'N/A' # 是否允许融券\n margin_ratio_tmp['short_pool_remain'] = margin_info.shortPoolRemain if margin_info.HasField('shortPoolRemain') else 'N/A' # 卖空池剩余量\n margin_ratio_tmp['short_fee_rate'] = margin_info.shortFeeRate if margin_info.HasField('shortFeeRate') else 'N/A' # 融券参考利率\n margin_ratio_tmp['alert_long_ratio'] = margin_info.alertLongRatio if margin_info.HasField('alertLongRatio') else 'N/A' # 融资预警比率\n margin_ratio_tmp['alert_short_ratio'] = margin_info.alertShortRatio if margin_info.HasField('alertShortRatio') else 'N/A' # 融券预警比率\n margin_ratio_tmp['im_long_ratio'] = margin_info.imLongRatio if margin_info.HasField('imLongRatio') else 'N/A' # 融资初始保证金率\n margin_ratio_tmp['im_short_ratio'] = margin_info.imShortRatio if margin_info.HasField('imShortRatio') else 'N/A' # 融券初始保证金率\n margin_ratio_tmp['mcm_long_ratio'] = margin_info.mcmLongRatio if margin_info.HasField('mcmLongRatio') else 'N/A' # 融资 margin call 保证金率\n margin_ratio_tmp['mcm_short_ratio'] = margin_info.mcmShortRatio if margin_info.HasField('mcmShortRatio') else 'N/A' # 融券 margin call 保证金率\n margin_ratio_tmp['mm_long_ratio'] = margin_info.mmLongRatio if margin_info.HasField('mmLongRatio') else 'N/A' # 融资维持保证金率\n margin_ratio_tmp['mm_short_ratio'] = margin_info.mmShortRatio if margin_info.HasField('mmShortRatio') else 'N/A' # 融券维持保证金率\n ret_margin_ratio_list.append(margin_ratio_tmp)\n\n return RET_OK, \"\", ret_margin_ratio_list\n", "id": "7782816", "language": "Python", "matching_score": 3.5204014778137207, "max_stars_count": 858, "path": "futu/trade/trade_query.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n Market quote and trade context setting\n\"\"\"\nfrom futu import *\nfrom futu.common.constant import *\nfrom futu.common.utils import *\n\n\nclass SimpleFilter(object):\n def __init__(self):\n self.stock_field = StockField.NONE # StockField 简单属性\n self.filter_min = None # 区间下限,闭区间\n self.filter_max = None # 区间上限,闭区间\n self.sort = None # SortDir 排序方向 SortDir\n self.is_no_filter = None # 如果这个字段不需要筛选,指定该字段为ture。当该字段为true时,以上三个字段无效。\n\n def fill_request_pb(self, filter_req):\n if self.stock_field == StockField.NONE:\n return RET_ERROR, 'Missing nessary parameters: stock_field'\n r, v = StockField.to_number(self.stock_field)\n if not r:\n return RET_ERROR, 'Stock_field is wrong. It must be StockField'\n filter_req.fieldName = v - StockField.simple_enum_begin\n \"\"\"有了这个字段,别的字段都可以不要了\"\"\"\n if self.is_no_filter is False:\n filter_req.isNoFilter = False\n if self.filter_min is not None:\n filter_req.filterMin = self.filter_min\n if self.filter_max is not None:\n filter_req.filterMax = self.filter_max\n\n if self.sort is not None:\n r, v = SortDir.to_number(self.sort)\n if not r:\n return RET_ERROR, 'Sort is wrong. It must be SortDir'\n filter_req.sortDir = v\n return RET_OK, \"\"\n\n # 简单 (stock_field) 作为筛选的key\n @property\n def query_key(self):\n return self.stock_field.lower()\n\n\nclass AccumulateFilter(object):\n def __init__(self):\n self.stock_field = StockField.NONE # StockField 累计属性\n self.filter_min = None # 区间下限,闭区间\n self.filter_max = None # 区间上限,闭区间\n self.sort = None # SortDir 排序方向 SortDir\n self.is_no_filter = None # 如果这个字段不需要筛选,指定该字段为ture。当该字段为true时,以上三个字段无效。\n self.days = 1 # 所筛选的数据的累计天数\n\n def fill_request_pb(self, filter_req):\n if self.stock_field == StockField.NONE:\n return RET_ERROR, 'Missing nessary parameters: stock_field'\n r, v = StockField.to_number(self.stock_field)\n if not r:\n return RET_ERROR, 'Stock_field is wrong. It must be StockField'\n filter_req.fieldName = v - StockField.acc_enum_begin\n filter_req.days = self.days\n \"\"\"有了这个字段,别的字段都可以不要了\"\"\"\n if self.is_no_filter is False:\n filter_req.isNoFilter = False\n if self.filter_min is not None:\n filter_req.filterMin = self.filter_min\n if self.filter_max is not None:\n filter_req.filterMax = self.filter_max\n\n if self.sort is not None:\n r, v = SortDir.to_number(self.sort)\n if not r:\n return RET_ERROR, 'Sort is wrong. It must be SortDir'\n filter_req.sortDir = v\n return RET_OK, \"\"\n\n # 累积 (stock_field + days) 作为筛选的key\n @property\n def query_key(self):\n return (self.stock_field.lower(), self.days)\n\n\nclass FinancialFilter(object):\n def __init__(self):\n self.stock_field = StockField.NONE # StockField 财务属性\n self.filter_min = None # 区间下限,闭区间\n self.filter_max = None # 区间上限,闭区间\n self.sort = None # SortDir 排序方向 SortDir\n self.is_no_filter = None # 如果这个字段不需要筛选,指定该字段为ture。当该字段为true时,以上三个字段无效。\n self.quarter = FinancialQuarter.ANNUAL # 财报累积时间\n\n def fill_request_pb(self, filter_req):\n if self.stock_field == StockField.NONE:\n return RET_ERROR, 'Missing nessary parameters: stock_field'\n r, v = StockField.to_number(self.stock_field)\n if not r:\n return RET_ERROR, 'Stock_field is wrong. It must be StockField'\n filter_req.fieldName = v - StockField.financial_enum_begin\n\n r, v = FinancialQuarter.to_number(self.quarter)\n if not r:\n return RET_ERROR, 'Quarter is wrong. It must be FinancialQuarter'\n filter_req.quarter = v\n\n \"\"\"有了这个字段,别的字段都可以不要了\"\"\"\n if self.is_no_filter is False:\n filter_req.isNoFilter = False\n if self.filter_min is not None:\n filter_req.filterMin = self.filter_min\n if self.filter_max is not None:\n filter_req.filterMax = self.filter_max\n\n if self.sort is not None:\n r, v = SortDir.to_number(self.sort)\n if not r:\n return RET_ERROR, 'Sort is wrong. It must be SortDir'\n filter_req.sortDir = v\n\n return RET_OK, \"\"\n\n # 财务 (stock_field + quarter) 作为筛选的key\n @property\n def query_key(self):\n return self.stock_field.lower(), self.quarter.lower()\n\n\nclass CustomIndicatorFilter(object):\n stock_field1 = StockField.NONE # StockField 指标属性\n stock_field2 = StockField.NONE # StockField 指标属性\n relative_position = None # RelativePosition 相对位置,主要用于MA,EMA,RSI指标做比较\n value = None # 自定义数值,用于与RSI进行比较\n ktype = KLType.NONE # KLType, K线类型,仅支持K_60M,K_DAY,K_WEEK,K_MON 四种时间周期\n is_no_filter = None # 如果这个字段不需要筛选\n\n def __init__(self):\n self.stock_field1 = StockField.NONE\n self.stock_field2 = StockField.NONE\n self.relative_position = RelativePosition.NONE\n self.ktype = KLType.NONE\n self.is_no_filter = None\n\n def fill_request_pb(self, filter_req):\n if self.stock_field1 == StockField.NONE:\n return RET_ERROR, 'Missing nessary parameters: stock_field1'\n r, v = StockField.to_number(self.stock_field1)\n if not r:\n return RET_ERROR, 'Stock_field1 is wrong. It must be StockField'\n filter_req.firstFieldName = v - StockField.indicator_enum_begin\n\n if self.stock_field2 == StockField.NONE:\n return RET_ERROR, 'Missing nessary parameters: stock_field2'\n r, v = StockField.to_number(self.stock_field2)\n if not r:\n return RET_ERROR, 'Stock_field2 is wrong. It must be StockField'\n filter_req.secondFieldName = v - StockField.indicator_enum_begin\n\n if self.relative_position == RelativePosition.NONE:\n return RET_ERROR, 'Missing nessary parameters: relative_position'\n r, v = RelativePosition.to_number(self.relative_position)\n if not r:\n return RET_ERROR, 'Relative_position is wrong. It must be RelativePosition'\n filter_req.relativePosition = v\n\n if self.value is not None:\n filter_req.fieldValue = self.value\n\n if self.ktype == KLType.NONE:\n return RET_ERROR, 'Missing nessary parameters: ktype'\n r2, v2 = KLType.to_number(self.ktype)\n if not r2:\n return RET_ERROR, 'Ktype is wrong. It must be KLType' + 'Wrong'\n filter_req.klType = v2\n\n if self.is_no_filter is False:\n filter_req.isNoFilter = False\n\n return RET_OK, \"\"\n\n # 自定义 (stock_field + ktype) 作为筛选的key\n @property\n def query_key1(self):\n return self.stock_field1.lower(), self.ktype.lower()\n\n @property\n def query_key2(self):\n return self.stock_field2.lower(), self.ktype.lower()\n\n\nclass PatternFilter(object):\n stock_field = StockField.NONE # StockField 指标形态属性\n ktype = None # KLType, K线类型,仅支持K_60M,K_DAY,K_WEEK,K_MON 四种时间周期\n is_no_filter = None # 如果这个字段不需要筛选\n\n def __init__(self):\n self.stock_field = StockField.NONE\n self.ktype = KLType.NONE\n self.is_no_filter = None\n\n def fill_request_pb(self, filter_req):\n if self.stock_field == StockField.NONE:\n return RET_ERROR, 'Missing nessary parameters: stock_field'\n r, v = StockField.to_number(self.stock_field)\n if not r:\n return RET_ERROR, 'Stock_field is wrong. It must be StockField'\n filter_req.fieldName = v - StockField.pattern_enum_begin\n\n if self.ktype == KLType.NONE:\n return RET_ERROR, 'Missing nessary parameters: ktype'\n r2, v2 = KLType.to_number(self.ktype)\n if not r2:\n return RET_ERROR, 'Ktype is wrong. It must be KLType'\n filter_req.klType = v2\n\n if self.is_no_filter is False:\n filter_req.isNoFilter = False\n\n return RET_OK, \"\"\n\n\nclass FilterStockData(object):\n # 以下是简单数据过滤所支持的字段\n # cur_price = None # 最新价\n # cur_price_to_highest_52weeks_ratio = None # (现价 - 52周最高) / 52周最高,对应pc端离52周高点百分比\n # cur_price_to_lowest_52weeks_ratio = None # (现价 - 52周最低) / 52周最低,对应pc端离52周低点百分比\n # high_price_to_highest_52weeks_ratio = None # (今日最高 - 52周最高) / 52周最高,对应pc端52周新高\n # low_price_to_lowest_52weeks_ratio = None # (今日最低 - 52周最低) / 52周最低 对应pc端52周新低\n # volume_ratio = None # 量比\n # bid_ask_ratio = None # 委比\n # lot_price = None # 每手价格\n # market_val = None # 市值\n # pe_annual = None # 年化(静态) 市盈率\n # pe_ttm = None # 市盈率ttm\n # pb_rate = None # 市净率\n # change_rate_5min = None # 五分钟价格涨跌幅\n # change_rate_begin_year = None # 年初至今价格涨跌幅\n # ps_ttm # 市销率(ttm) 例如填写 [100, 500] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # pcf_ttm # 市现率(ttm) 例如填写 [100, 1000] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # total_share # 总股数 例如填写 [1000000000,1000000000] 值区间 (单位:股)\n # float_share # 流通股数 例如填写 [1000000000,1000000000] 值区间 (单位:股)\n # float_market_val # 流通市值 例如填写 [1000000000,1000000000] 值区间 (单位:元)\n\n # 以下是累积数据过滤所支持的字段\n # change_rate = None # 涨跌幅\n # amplitude = None # 振幅\n # volume = None # 成交量\n # turnover = None # 成交额\n # turnover_rate = None # 换手率\n\n # 以下是财务数据过滤所支持的字段\n # net_profit = None # 净利润\n # net_profix_growth = None # 净利润增长率\n # sum_of_business = None # 营业收入\n # sum_of_business_growth = None # 营业同比增长率\n # net_profit_rate = None # 净利率\n # gross_profit_rate = None # 毛利率\n # debt_asset_rate = None # 资产负债率\n # return_on_equity_rate = None # 净资产收益率\n # roic # 盈利能力属性投入资本回报率 例如填写 [1.0,10.0] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # roa_ttm # 资产回报率(ttm) 例如填写 [1.0,10.0] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%。仅适用于年报。)\n # ebit_ttm # 息税前利润(ttm) 例如填写 [1000000000,1000000000] 值区间(单位:元。仅适用于年报。)\n # ebitda # 税息折旧及摊销前利润 例如填写 [1000000000,1000000000] 值区间(单位:元)\n # operating_margin_ttm # 营业利润率(ttm) 例如填写 [1.0,10.0] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%。仅适用于年报。)\n # ebit_margin # ebit利润率 例如填写 [1.0,10.0] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # ebitda_margin # ebitda利润率 例如填写 [1.0,10.0] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # financial_cost_rate # 财务成本率 例如填写 [1.0,10.0] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # operating_profit_ttm # 营业利润(ttm) 例如填写 [1000000000,1000000000] 值区间 (单位:元。仅适用于年报。)\n # shareholder_net_profit_ttm # 归属于母公司的净利润 例如填写 [1000000000,1000000000] 值区间 (单位:元。仅适用于年报。)\n # net_profit_cash_cover_ttm # 盈利中的现金收入比例 例如填写 [1.0,60.0] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%。仅适用于年报。)\n # current_ratio # 偿债能力属性流动比率 例如填写 [100,250] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # quick_ratio # 速动比率 例如填写 [100,250] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # current_asset_ratio # 清债能力属性流动资产率 例如填写 [10,100] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # current_debt_ratio # 流动负债率 例如填写 [10,100] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # equity_multiplier # 权益乘数 例如填写 [100,180] 值区间\n # property_ratio # 产权比率 例如填写 [50,100] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # cash_and_cash_equivalents # 现金和现金等价 例如填写 [1000000000,1000000000] 值区间(单位:元)\n # total_asset_turnover # 运营能力属性总资产周转率 例如填写 [50,100] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # fixed_asset_turnover # 固定资产周转率 例如填写 [50,100] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # inventory_turnover # 存货周转率 例如填写 [50,100] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # operating_cash_flow_ttm # 经营活动现金流(ttm) 例如填写 [1000000000,1000000000] 值区间(单位:元。仅适用于年报。)\n # accounts_receivable # 应收账款净额 例如填写 [1000000000,1000000000] 值区间 例如填写 [1000000000,1000000000] 值区间 (单位:元)\n # ebit_growth_rate # 成长能力属性ebit同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # operating_profit_growth_rate # 营业利润同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # total_assets_growth_rate # 总资产同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # profit_to_shareholders_growth_rate # 归母净利润同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # profit_before_tax_growth_rate # 总利润同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # eps_growth_rate # eps同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # roe_growth_rate # roe同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # roic_growth_rate # roic同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # nocf_growth_rate # 经营现金流同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # nocf_per_share_growth_rate # 每股经营现金流同比增长率 例如填写 [1.0,10.0] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # operating_revenue_cash_cover # 现金流属性经营现金收入比 例如填写 [10,100] 值区间(该字段为百分比字段,默认省略%,如20实际对应20%)\n # operating_profit_to_total_profit # 营业利润占比 例如填写 [10,100] 值区间 (该字段为百分比字段,默认省略%,如20实际对应20%)\n # basic_eps # 市场表现属性基本每股收益 例如填写 [0.1,10] 值区间 (单位:元)\n # diluted_eps # 稀释每股收益 例如填写 [0.1,10] 值区间 (单位:元)\n # nocf_per_share # 每股经营现金净流量 例如填写 [0.1,10] 值区间 (单位:元)\n\n # 以下是技术指标过滤所支持的枚举\n # price # 最新价格\n # ma5 # 5日简单均线\n # ma10 # 10日简单均线\n # ma20 # 20日简单均线\n # ma30 # 30日简单均线\n # ma60 # 60日简单均线\n # ma120 # 120日简单均线\n # ma250 # 250日简单均线\n # rsi # 动态rsi\n # ema5 # 5日指数移动均线\n # ema10 # 10日指数移动均线\n # ema20 # 20日指数移动均线\n # ema30 # 30日指数移动均线\n # ema60 # 60日指数移动均线\n # ema120 # 120日指数移动均线\n # ema250 # 250日指数移动均线\n\n def __init__(self, rsp_item):\n self.stock_code = None\n self.stock_name = None\n\n from futu.common.pb.Qot_StockFilter_pb2 import StockData\n self.stock_code = merge_qot_mkt_stock_str(rsp_item.security.market, rsp_item.security.code)\n # 名称 type = string\n self.stock_name = rsp_item.name\n\n # ls = StockField.get_all_key_list()\n # for key in ls:\n # attr = key.lower()\n # if attr not in self.__dict__:\n # \"\"\"增加一个属性\"\"\"\n # self.__dict__[attr] = None\n\n # 筛选后的简单属性数据 type = Qot_StockFilter.BaseData\n base_data_list = rsp_item.baseDataList\n for sub_item in base_data_list:\n ret, field = StockField.to_string(sub_item.fieldName + StockField.simple_enum_begin)\n if ret:\n self.__dict__[field.lower()] = sub_item.value\n\n # 筛选后的累计属性数据 type = Qot_StockFilter.AccumulateData\n base_data_list = rsp_item.accumulateDataList\n for sub_item in base_data_list:\n ret, field = StockField.to_string(sub_item.fieldName + StockField.acc_enum_begin)\n if ret:\n self.__dict__[(field.lower(), sub_item.days)] = sub_item.value\n\n # 筛选后的财务属性数据 type = Qot_StockFilter.FinancialData\n base_data_list = rsp_item.financialDataList\n for sub_item in base_data_list:\n ret1, field = StockField.to_string(sub_item.fieldName + StockField.financial_enum_begin)\n ret2, quarter = FinancialQuarter.to_string(sub_item.quarter)\n if ret1 and ret2:\n self.__dict__[(field.lower(), quarter.lower())] = sub_item.value\n\n # 筛选后的指标属性数据 type = Qot_StockFilter.CustomIndicatorData\n base_data_list = rsp_item.customIndicatorDataList\n for sub_item in base_data_list:\n ret1, field = StockField.to_string(sub_item.fieldName + StockField.indicator_enum_begin)\n ret2, klType = KLType.to_string(sub_item.klType)\n if ret1 and ret2:\n self.__dict__[(field.lower(), klType.lower())] = sub_item.value\n\n def __repr__(self):\n ls = StockField.get_all_key_list()\n s = \"\"\n for key in self.__dict__:\n value = self.__dict__[key]\n if value is not None:\n if isinstance(key, tuple):\n s += (\" {}({}):{} \".format(key[0], key[1], value))\n else:\n s += (\" {}:{} \".format(key, value))\n return s\n\n # 获取筛选条件的某字段,比如FinancialFilter的筛选字段。\n def __getitem__(self, key):\n if isinstance(key, SimpleFilter) or isinstance(key, FinancialFilter) or isinstance(key, AccumulateFilter):\n return self.__dict__[key.query_key]\n if isinstance(key, CustomIndicatorFilter):\n key1 = key.stock_field1.lower()\n value1 = self.__dict__[key.query_key1]\n if key.stock_field2 != StockField.VALUE:\n key2 = key.stock_field2.lower()\n value2 = self.__dict__[key.query_key2]\n return {key1: value1, key2: value2}\n else:\n return {key1: value1}\n raise KeyError('Unknown key: {}'.format(key))\n", "id": "5598689", "language": "Python", "matching_score": 2.969423294067383, "max_stars_count": 858, "path": "futu/quote/quote_stockfilter_info.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\n query_history_change_stocks\n 指定涨跌幅,查询本地下载的历史日k数据,返回符合条件的股票\n\"\"\"\n\nfrom datetime import datetime\n\nfrom futuquant import *\n\n\n\ndef query_history_change_stocks(quote_context=None, markets=[Market.HK], start='2017-01-05', end='2017-1-10', change_min=5.0,\n change_max=None, stock_type=SecurityType.STOCK, ascend=True):\n '''\n :param quote_context: api 行情对象\n :param markets: 要查询的市场列表, 可以只传单个市场如'HK'字符串\n :param start: 开始时间\n :param end: 截止时间\n :param change_min: 涨跌幅最小值 eg: 1.0% 传值 1.0, None表示忽略\n :param change_max: 涨跌幅最大值\n :param stock_type: 要查询的股票类型, 见 SEC_TYPE_MAP - 'STOCK','IDX','ETF','WARRANT','BOND'\n :param ascend: 结果是否升序排列\n :return: (ret, data), ret == 0返回pd dataframe, 表头为 'code'(股票代码), 'change_rate'(涨跌率*100), 'real_times'(起止真实交易时间字符串)\n ret != 0 data 为错误字符串\n '''\n if not markets or (not is_str(markets) and not isinstance(markets, list)):\n error_str = \"the type of markets param is wrong\"\n return RET_ERROR, error_str\n req_markets = markets if isinstance(markets, list) else [markets]\n\n if change_min is None and change_max is None:\n return RET_ERROR, \"param change is wrong\"\n\n # float 比较有偏差 比如 a = 1.0 , b = 1.1, c = (b-a)/a * 100, d = 10 , c<=d 结果为False\n if change_min is not None:\n change_min = int(float(change_min) * 1000)\n if change_max is not None:\n change_max = int(float(change_max) * 1000)\n\n # 汇总得到需要查询的所有股票code\n list_stocks = []\n for mk in req_markets:\n ret, data = quote_context.get_stock_basicinfo(mk, stock_type)\n if 0 != ret:\n return ret, data\n for ix, row in data.iterrows():\n list_stocks.append(row['code'])\n\n # 多点k线数据查询\n dt_last = datetime.now()\n ret_list = []\n ret, data_start = quote_context.get_multi_points_history_kline(list_stocks, [start],\n [KL_FIELD.DATE_TIME, KL_FIELD.CLOSE], KLType.K_DAY, AuType.QFQ,\n KLNoDataMode.FORWARD)\n if ret != 0:\n return ret, data_start\n ret, data_end = quote_context.get_multi_points_history_kline(list_stocks, [end],\n [KL_FIELD.DATE_TIME, KL_FIELD.CLOSE], KLType.K_DAY, AuType.QFQ,\n KLNoDataMode.FORWARD)\n if ret != 0:\n return ret, data_end\n\n # 合并数据\n data = data_start.append(data_end)\n\n dt = datetime.now() - dt_last\n print('get_multi_points_history_kline - run time = %s秒' % dt.seconds)\n\n # 返回计算涨跌幅,统计符合条件的股票\n for stock in list_stocks:\n pd_find = data[data.code == stock]\n close_start = 0\n close_end = 0\n real_times = []\n for _, row in pd_find.iterrows():\n if KLDataStatus.NONE == row['data_status']:\n break\n if row['time_point'] == start:\n close_start = row['close']\n real_times.append(row['time_key'])\n elif row['time_point'] == end:\n close_end = row['close']\n real_times.append(row['time_key'])\n if close_start and close_end:\n change_rate = (close_end - close_start) / float(close_start) * 100000.0\n data_ok = True\n if change_min is not None:\n data_ok = change_rate >= change_min\n if data_ok and change_max is not None:\n data_ok = change_rate <= change_max\n if data_ok:\n ret_list.append({'code': stock, 'change_rate': float(change_rate / 1000.0), 'real_times': ','.join(real_times)})\n\n # 数据排序\n ret_list = sorted(ret_list, key=lambda x: x['change_rate'], reverse=(not ascend))\n\n # 组装返回pdframe数据\n col_list = ['code', 'change_rate', 'real_times']\n pd_frame = pd.DataFrame(ret_list, columns=col_list)\n\n return RET_OK, pd_frame\n\nif __name__ == \"__main__\":\n api_ip = '127.0.0.1' # ''172.16.31.10'\n api_port = 11111\n change_min = 1\n change_max = 2\n\n quote_context = OpenQuoteContext(host=api_ip, port=api_port)\n print(query_history_change_stocks(quote_context, [Market.HK], '2017-01-10', '2017-1-15', change_min, change_max, SecurityType.ETF))\n quote_context.close()", "id": "2645440", "language": "Python", "matching_score": 0.4296773672103882, "max_stars_count": 5, "path": "futuquant/examples/learn/query_history_change_stocks.py" }, { "content": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport traceback\nfrom futuquant.common import bytes_utf8, IS_PY2, str_utf8\nfrom futuquant.common.constant import *\nfrom futuquant.common.ft_logger import logger\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_v1_5 as Cipher_pkcs1\nfrom Crypto import Random\n\n\nclass SysConfig(object):\n IS_PROTO_ENCRYPT = False # api通讯协议是否加密\n INIT_RSA_FILE = '' # 初始连接协议用到的rsa private key file\n RSA_OBJ = None # ras加解密对象\n PROTO_FMT = None # 协议格式\n CLINET_ID = None # Client标识\n CLIENT_VER = None # Client ver\n ALL_THREAD_DAEMON = False # 是否所有产生的线程都是daemon线程\n\n @classmethod\n def set_client_info(cls, client_id, client_ver):\n \"\"\"\n .. py:function:: set_client_info(cls, client_id, client_ver)\n\n 设置调用api的客户端信息, 非必调接口\n\n :param client_id: str, 客户端标识\n :param client_ver: int, 客户端版本号\n :return: None\n\n :example:\n\n .. code:: python\n\n from futuquant import *\n SysConfig.set_client_info(\"MyFutuQuant\", 0)\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n quote_ctx.close()\n\n \"\"\"\n\n SysConfig.CLINET_ID = client_id\n SysConfig.CLIENT_VER = client_ver\n\n @classmethod\n def get_client_id(cls):\n return SysConfig.CLINET_ID if SysConfig.CLINET_ID else DEFULAT_CLIENT_ID\n\n @classmethod\n def get_client_ver(cls):\n return SysConfig.CLIENT_VER if SysConfig.CLIENT_VER is not None else CLIENT_VERSION\n\n @classmethod\n def get_proto_fmt(cls):\n return SysConfig.PROTO_FMT if SysConfig.PROTO_FMT else DEFULAT_PROTO_FMT\n\n @classmethod\n def set_proto_fmt(cls, proto_fmt):\n \"\"\"\n\n .. py:function:: set_proto_fmt(cls, proto_fmt)\n\n 设置通讯协议body格式, 目前支持Protobuf | Json两种格式, 非必调接口\n\n :param proto_fmt: ProtoFMT\n :return: None\n\n :example:\n\n .. code:: python\n\n from futuquant import *\n SysConfig.set_proto_fmt(ProtoFMT.Protobuf)\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n quote_ctx.close()\n\n \"\"\"\n fmt_list = [ProtoFMT.Protobuf, ProtoFMT.Json]\n\n if proto_fmt not in fmt_list:\n raise Exception(\"proto_fmt error\")\n SysConfig.PROTO_FMT = proto_fmt\n\n @classmethod\n def enable_proto_encrypt(cls, is_encrypt):\n \"\"\"\n .. py:function:: enable_proto_encrypt(cls, is_encrypt)\n\n 设置通讯协议是否加密, 网关客户端和api需配置相同的RSA私钥文件,在连接初始化成功后,网关会下发随机生成的AES 加密密钥\n\n :param is_encrypt: bool\n :return: None\n\n :example:\n\n .. code:: python\n\n from futuquant import *\n SysConfig.enable_proto_encrypt(True)\n SysConfig.set_init_rsa_file(\"conn_key.txt\") # rsa 私钥文件路径\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n quote_ctx.close()\n\n \"\"\"\n SysConfig.IS_PROTO_ENCRYPT = bool(is_encrypt)\n\n @classmethod\n def set_init_rsa_file(cls, file):\n \"\"\"\n .. py:function:: set_init_rsa_file(cls, file)\n\n 设置RSA私钥文件, 要求1024位, 格式为PKCS#1\n\n :param file: str, 文件路径\n :return: None\n\n :example:\n\n .. code:: python\n\n from futuquant import *\n SysConfig.enable_proto_encrypt(True)\n SysConfig.set_init_rsa_file(\"conn_key.txt\") # rsa 私钥文件路径\n quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)\n quote_ctx.close()\n\n \"\"\"\n SysConfig.INIT_RSA_FILE = str(file)\n pass\n\n @classmethod\n def get_init_rsa_obj(cls):\n \"\"\"\n :return: str , private key for init connect protocol\n \"\"\"\n\n if not SysConfig.RSA_OBJ:\n SysConfig._read_rsa_keys()\n\n return SysConfig.RSA_OBJ\n\n @classmethod\n def is_proto_encrypt(cls):\n \"\"\"\n :return: bool\n \"\"\"\n return SysConfig.IS_PROTO_ENCRYPT\n\n @classmethod\n def _read_rsa_keys(cls):\n file_path = SysConfig.INIT_RSA_FILE if SysConfig.INIT_RSA_FILE \\\n else os.path.join(os.path.dirname(__file__), DEFAULT_INIT_PRI_KEY_FILE)\n\n try:\n f = open(file_path, 'rb')\n df = f.read()\n if type(df) is not str:\n df = str_utf8(df)\n\n rsa = RSA.importKey(df)\n pub_key = rsa.publickey().exportKey()\n if not pub_key:\n raise Exception(\"Illegal format of file content\")\n\n SysConfig.RSA_OBJ = rsa\n\n except Exception as e:\n traceback.print_exc()\n err = sys.exc_info()[1]\n err_msg = \"Fatal error occurred in getting proto key, detail:{}\".format(err)\n logger.error(err_msg)\n raise Exception(err_msg)\n\n @classmethod\n def set_all_thread_daemon(cls, all_daemon):\n \"\"\"\n 设置是否所有内部创建的线程都是daemon线程\n :param all_daemon: bool\n :return:\n \"\"\"\n SysConfig.ALL_THREAD_DAEMON = all_daemon\n\n @classmethod\n def get_all_thread_daemon(cls):\n return SysConfig.ALL_THREAD_DAEMON\n\n\nclass RsaCrypt(object):\n RANDOM_GENERATOR = Random.new().read\n CHIPPER = None\n @classmethod\n def encrypt(cls, data):\n if RsaCrypt.CHIPPER is None:\n rsa = SysConfig.get_init_rsa_obj()\n RsaCrypt.CHIPPER = Cipher_pkcs1.new(rsa)\n\n if type(data) is not bytes:\n data = bytes_utf8(str(data))\n\n # 单次加密串的长度最大为(key_size / 8) - 11\n # 1024 bit的证书用100, 2048 bit的证书用 200\n one_len = 100\n ret_data = b''\n for i in range(0, len(data), one_len):\n ret_data += RsaCrypt.CHIPPER.encrypt(data[i:i + one_len])\n return ret_data\n\n @classmethod\n def decrypt(cls, data):\n if RsaCrypt.CHIPPER is None:\n rsa = SysConfig.get_init_rsa_obj()\n RsaCrypt.CHIPPER = Cipher_pkcs1.new(rsa)\n\n # 1024 bit的证书用128,2048 bit证书用256位\n one_len = 128\n ret_data = b''\n\n # python2下需转成str类型,否则异常\n if IS_PY2:\n data = str(data)\n\n for i in range(0, len(data), one_len):\n ret_data += RsaCrypt.CHIPPER.decrypt(data[i:i + one_len], RsaCrypt.RANDOM_GENERATOR)\n\n return ret_data\n\n\n\"\"\"\ntest_str = 'futu api' * 32\ndt_encrypt = RsaCrypt.encrypt(test_str)\nprint(dt_encrypt)\ndt_decrypt = RsaCrypt.decrypt(dt_encrypt)\nprint(dt_decrypt)\n\"\"\"\n\n\"\"\"\nfrom Crypto.Cipher import AES\nfrom binascii import b2a_hex, a2b_hex\n\nkey = b'<KEY>'\ncryptor = AES.new(key, AES.MODE_ECB, key)\n\nsrc = b'123'\nlen_src = len(src)\nadd = 16 - (len_src % 16)\nsrc = src\nsrc2 = src + (b'\\0' * add)\n\ndst = cryptor.encrypt(src2)\nhex_dst = b2a_hex(dst)\nprint(hex_dst)\n\nsrc3 = cryptor.decrypt(dst)\nprint(\"len={} decrypt={}\".format(len(src3), src3))\n\"\"\"\n\n\n\n\n\n", "id": "10632198", "language": "Python", "matching_score": 2.0139877796173096, "max_stars_count": 5, "path": "futuquant/common/sys_config.py" }, { "content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2017 Futu, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport signal\nimport sys\nimport threading\n\n\ndef _check_version_no_older(cur_ver, base_ver):\n cur_ver_parts = [int(n) for n in cur_ver.split('.')]\n base_ver_parts = [int(n) for n in base_ver.split('.')]\n return cur_ver_parts >= base_ver_parts\n\n\ndef _check_module(mod_name, package_name=None, version=None, version_getter=None, py_version=None):\n import importlib\n\n if package_name is None:\n package_name = mod_name\n\n if py_version is not None:\n if sys.version_info[0] != py_version:\n return\n\n try:\n mod = importlib.import_module(mod_name)\n except Exception:\n if version is None:\n print(\"Missing required package {}\".format(package_name))\n else:\n print(\"Missing required package {} v{}\".format(package_name, version))\n sys.exit(1)\n\n if version is not None:\n try:\n mod_version = version_getter(mod)\n if not _check_version_no_older(mod_version, version):\n print(\"The current version of package {} is {}, not compatible. You need use {} or newer.\".format(package_name, mod_version, version))\n sys.exit(1)\n except Exception:\n return # 取版本号出了异常,一般是因为版本号中含有非数字的部分,这种无法处理,默认成功\n\n\ndef _pip_get_package_version(package_name):\n import subprocess\n proc = subprocess.Popen([sys.executable, '-m', 'pip', 'show', package_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n outdata, errdata = proc.communicate()\n\n eol = b'\\n'\n version_key = b'Version:'\n\n lines = outdata.split(eol)\n for line in lines:\n line = line.strip()\n if line.startswith(version_key):\n version = line.lstrip(version_key).strip()\n return version.decode('utf-8')\n return None\n\n\ndef _check_package(package_name, version=None):\n try:\n import pip\n except ImportError:\n return\n\n mod_version = _pip_get_package_version(package_name)\n if mod_version == '' or mod_version is None:\n if version is None:\n print(\"Missing required package {}\".format(package_name))\n else:\n print(\"Missing required package {} v{}\".format(package_name, version))\n sys.exit(1)\n elif version is not None and mod_version != version:\n print(\"Package {} version is {}, better be {}.\".format(package_name, mod_version, version))\n\n\n_check_module('pandas')\n_check_module('simplejson')\n_check_module('Crypto', 'pycryptodome')\n# _check_module('google.protobuf', package_name='protobuf', version='3.5.1', version_getter=lambda mod: mod.__version__)\n_check_module('selectors2', py_version=2)\n\n\n#import data querying APIs and response handle base class\nfrom futu.quote.open_quote_context import OpenQuoteContext\nfrom futu.quote.quote_response_handler import *\nfrom futu.trade.trade_response_handler import *\nfrom futu.quote.quote_get_warrant import Request as WarrantRequest\n\n#import HK and US trade context\nfrom futu.trade.open_trade_context import OpenHKTradeContext\nfrom futu.trade.open_trade_context import OpenUSTradeContext\nfrom futu.trade.open_trade_context import OpenHKCCTradeContext\nfrom futu.trade.open_trade_context import OpenCNTradeContext\nfrom futu.trade.open_trade_context import OpenFutureTradeContext\nfrom futu.trade.open_trade_context import OpenSecTradeContext\n\n#import constant values\nfrom futu.common import *\nfrom futu.common.constant import *\nfrom futu.common.sys_config import SysConfig\nfrom futu.common.diag import print_sys_info\nfrom futu.common.err import Err\nfrom futu.quote.quote_get_warrant import Request as WarrantRequest\n\nwith open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'VERSION.txt'), 'rb') as f:\n __version__ = f.read().decode('ascii').strip()\n\ndef set_futu_debug_model(on_off=True):\n common.set_debug_model(on_off)\n\n\ndef quit_handler(sig, frame):\n os._exit(0)\n\n\nif not IS_PY2:\n if threading.current_thread() is threading.main_thread():\n signal.signal(signal.SIGINT, quit_handler)\n", "id": "2639355", "language": "Python", "matching_score": 0.5405402779579163, "max_stars_count": 858, "path": "futu/__init__.py" }, { "content": "from rest_framework import serializers\nfrom django.contrib.auth.models import Group, Permission\n\nfrom .models import Accounts, Ticket, TicketType, TicketRecord\n\n\nclass PermissionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Permission\n fields = '__all__'\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n class Meta:\n model = Group\n fields = '__all__'\n\n\nclass AccountsSerializer(serializers.ModelSerializer):\n get_group = serializers.ReadOnlyField()\n\n class Meta:\n model = Accounts\n # fields = ('id', 'username', 'email', 'chinese_name', 'sex','birthday')\n # fields = '__all__'\n exclude = ('user_permissions',)\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n password = validated_data.pop('password', None)\n groups = validated_data.pop('groups', None)\n instance = self.Meta.model(**validated_data)\n if password is not None:\n instance.set_password(password)\n instance.save()\n if groups is not None:\n for group in groups:\n instance.groups.add(group)\n instance.save()\n return instance\n\n def update(self, instance, validated_data):\n for attr, value in list(validated_data.items()):\n if attr == 'password':\n instance.set_password(value)\n elif attr == 'groups':\n instance.groups.clear()\n groups = validated_data.pop('groups', None)\n for i in groups:\n instance.groups.add(i)\n else:\n setattr(instance, attr, value)\n instance.save()\n return instance", "id": "4428752", "language": "Python", "matching_score": 2.9915521144866943, "max_stars_count": 81, "path": "users/serializers.py" }, { "content": "from rest_framework import serializers\nfrom .models import Hosts\n\n\nclass HostsSerializer(serializers.ModelSerializer):\n\n notice_email = serializers.ReadOnlyField()\n class Meta:\n model = Hosts\n fields = '__all__'", "id": "9876412", "language": "Python", "matching_score": 1.5791388750076294, "max_stars_count": 81, "path": "servers/serializers.py" }, { "content": "\nfrom rest_framework.decorators import api_view, permission_classes, renderer_classes \nfrom rest_framework.response import Response\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.renderers import JSONRenderer\n\nfrom urllib import parse\n\nfrom libs.encode import encodeQuery\n\n\nfrom servers.models import Hosts\nfrom libs.mg.query import mgdb\n\nimport datetime as dts\nfrom datetime import datetime\n\nfrom servers.tasks import send_notice_email\n\ndef host_load_check(host:dict)->bool:\n ram_pt = round(host['ram_usage']/host['ram_total']*100, 2)\n disk_pt = round(host['disk_usage']/host['disk_total']*100, 2)\n sys_pt = host['load_system'] if host['load_system'] <=100 else 100\n res_list = [\n True if ram_pt>=host['notice_ram'] else False,\n True if disk_pt>=host['notice_disk'] else False,\n True if sys_pt>=host['notice_load'] else False,\n ]\n if True in res_list and host[\"is_notice\"]:\n if host[\"is_notice\"]:\n d1 = host['last_notice_at']\n difference = datetime.now() - d1\n diff = round(difference.total_seconds()/3600,2)\n return True if diff >6 else False\n else:\n return True\n return False\n\n\ndef host_save(token, data):\n host = Hosts.objects.filter(code=token)\n host.update(**data)\n return [host[0].to_dict(), host]\n\ndef mongo_save(data):\n db = mgdb()\n collection = db['nqmonitor']['records']\n collection.insert(data)\n db.close()\n\n@api_view(['GET', 'POST'])\n@permission_classes([AllowAny])\n@renderer_classes([JSONRenderer])\ndef main(request):\n if request.method == 'POST':\n user = request.user\n res = \"http://www.default.com/default.html?\"+request.body.decode('utf-8') \n data = dict(parse.parse_qsl(parse.urlsplit(res).query))\n token = data['token']\n sdata = encodeQuery(data['data'].split(' '))\n sdata['updated_at'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n # print(token,sdata)\n try:\n # 更新host数据\n hosts = Hosts.objects.filter(code=token)\n hosts.update(**sdata)\n host = hosts[0].to_dict()\n # 保存记录到mongodb\n mdata = sdata.copy()\n mdata['updated_at'] = datetime.now()\n mdata['host_id'] = host['id']\n mongo_save(mdata)\n # 检查\n check = host_load_check(host)\n if check:\n print(host['name'], check)\n hosts.update(is_notice=True, last_notice_at=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n send_notice_email(host['notice_email'], host)\n return Response(res)\n except Exception as e:\n print(e)\n return Response({'msg':'error'})\n else:\n return Response({'msg':'post only!'})\n\n\n", "id": "11529543", "language": "Python", "matching_score": 2.999966621398926, "max_stars_count": 81, "path": "api/get_data.py" }, { "content": "from datetime import datetime\n\nfrom celery.decorators import task\nfrom celery.utils.log import get_task_logger\nfrom libs.mg.query import mgdb\n\nlogger = get_task_logger(__name__)\n\n\n@task(name=\"delete_records_by_host_task\")\ndef delete_records_by_host_task(host_id):\n try:\n db = mgdb()\n collection = db['nqmonitor']['records']\n collection.remove({'host_id':host_id})\n return True\n except:\n return False\n\n@task(name=\"send_resrouce_notice_email\")\ndef send_notice_email(to_email:str, host:dict):\n \"\"\"发送提醒邮件\"\"\"\n subject = \"MonitorX Notice: '{name}' has high resource consumption\".format(**host)\n message = \"\"\n sender = settings.DEFAULT_FROM_EMAIL\n receiver = [to_email]\n host['ram_p'] = round(host['ram_usage']/host['ram_total']*100, 2)\n host['disk_p'] = round(host['disk_usage']/host['disk_total']*100, 2)\n host['sys_p'] = host['load_system'] if host['load_system'] <=100 else 100\n html_message = '''\n <p style=\"font-size:18px; color:\"#500050\">Hello, It seems one of your servers is consuming a lot of resources.</p>\n <div style=\"padding-bottom:20px\">\n <p style=\"font-size:18px; color:\"#500050\">Server:{name}</p>\n <p>Last Update:{updated_at}</p>\n </div>\n <div style=\"padding-bottom:20px\">\n <p>Average:{loads}</p>\n <p>System Load:{sys_p}%</p>\n <p>Ram Usage:{ram_p}%</p>\n <p>Disk Usage:{disk_p}%</p>\n </div>\n <div>\n If you don't want to receive alerts anymore, log into your account and edit the notification settings for your server.<br>\n Feel free to reply to this message if you are experiencing problems with our services.<br>\n Thanks,<br>\n MonitorX\n </div>\n '''.format(**host)\n logger.info(\"Sent notice email:\"+to_email)\n return send_mail(subject, message, sender, receiver, html_message=html_message, fail_silently=False)\n\n\n@task(name=\"send_download_notice_email\")\ndef send_notice_email(to_email:str, host:dict):\n \"\"\"发送提醒邮件\"\"\"\n subject = \"MonitorX Notice: '{name}' is not responding\".format(**host)\n message = \"\"\n sender = settings.DEFAULT_FROM_EMAIL\n receiver = [to_email]\n host['now'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n html_message = '''\n <p style=\"font-size:18px; color:\"#500050\">Hello, It seems one of your servers is not responding anymore.</p>\n <div style=\"padding-bottom:20px\">\n <p style=\"font-size:18px; color:\"#500050\">Server:{name}</p>\n <p>Last Update:{updated_at}</p>\n <p>Alert Trigge:{now}</p>\n </div>\n <div>\n If you don't want to receive alerts anymore, log into your account and edit the notification settings for your server.<br>\n Feel free to reply to this message if you are experiencing problems with our services.<br>\n Thanks,<br>\n MonitorX\n </div>\n '''.format(**host)\n logger.info(\"Sent notice email:\"+to_email)\n return send_mail(subject, message, sender, receiver, html_message=html_message, fail_silently=False)", "id": "4676589", "language": "Python", "matching_score": 1.1869715452194214, "max_stars_count": 81, "path": "servers/tasks.py" }, { "content": "from django.urls import path, re_path, include\nfrom rest_framework.routers import DefaultRouter\nfrom servers.viewset import HostsViewSet\n\nfrom users import auth\n\nfrom .records import get_host_records, delete_records_by_host\nfrom .get_ip import get_ip_info\nfrom .user import sigup\n\nrouter = DefaultRouter()\nrouter.register(r'hosts', HostsViewSet)\n\nurlpatterns = [\n path('auth', auth.AuthToken.as_view(), name='authentication'),\n path('api-auth/', include('rest_framework.urls')),\n path('records/<int:host_id>/', get_host_records),\n path('records/delete/<int:host_id>/', delete_records_by_host),\n path('ip', get_ip_info),\n path('user/sigup/', sigup),\n path('', include(router.urls)),\n]", "id": "6583531", "language": "Python", "matching_score": 2.056203603744507, "max_stars_count": 81, "path": "api/router.py" }, { "content": "\"\"\"nqmonitor URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n \nfrom users.auth import AuthToken, login, logout, sigup\nfrom api.get_data import main as agent_api\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\n@login_required\ndef dashboard(request):\n return render(request, 'hosts/list.html')\n\nurlpatterns = [\n path('', index),\n path('login/', login),\n path('logout/', logout),\n path('sigup/', sigup),\n path('dashboard/', dashboard),\n path('auth', AuthToken.as_view(), name='authentication'),\n path('admin/', admin.site.urls),\n path('hosts/', include('servers.urls')),\n path('api/agent.json', agent_api),\n path('api/v1/', include('api.router')),\n] \n\nurlpatterns += staticfiles_urlpatterns()", "id": "8833380", "language": "Python", "matching_score": 2.2813496589660645, "max_stars_count": 81, "path": "nqmonitor/urls.py" }, { "content": "from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n\nfrom servers.models import Hosts\n\n@login_required\ndef lists(request):\n return render(request, 'hosts/list.html')\n\n@login_required\ndef detail(request, host_id):\n host = Hosts.objects.get(id=host_id)\n return render(request, 'hosts/detail.html', {'host':host})", "id": "8968788", "language": "Python", "matching_score": 1.3435107469558716, "max_stars_count": 81, "path": "servers/views.py" }, { "content": "from django.urls import path, include\n\nfrom .views import detail, lists\nurlpatterns = [\n path('lists/', lists),\n path('detail/<int:host_id>/', detail),\n \n] \n", "id": "4061067", "language": "Python", "matching_score": 0.42036229372024536, "max_stars_count": 81, "path": "servers/urls.py" } ]
2.28757
Wolfe1
[ { "content": "import os\n\nAPP_TITLE = os.getenv(\"RFHUB_APP_TITLE\", \"rfhub2\")\nAPP_INTERFACE = os.getenv(\"RFHUB_APP_INTERFACE\", \"0.0.0.0\")\nAPP_PORT = int(os.getenv(\"PORT\", 8000))\nAPP_LOG_LEVEL = os.getenv(\"RFHUB_APP_LOG_LEVEL\", \"info\")\nBASIC_AUTH_USER = os.getenv(\"RFHUB_BASIC_AUTH_USER\", \"rfhub\")\nBASIC_AUTH_PASSWORD = os.getenv(\"RFHUB_BASIC_AUTH_PASSWORD\", \"<PASSWORD>\")\nSQLALCHEMY_DB_URI = os.getenv(\"RFHUB_DB_URI\", \"sqlite:///test.db\")\n", "id": "9920607", "language": "Python", "matching_score": 2.1798949241638184, "max_stars_count": 0, "path": "rfhub2/config.py" }, { "content": "import uvicorn\n\nfrom rfhub2 import config\n\n# for now we import here this instance of app to have db initialized and populated for development purposes\n# later we should just create app instance here\nfrom rfhub2.main import app\n\n\ndef main():\n uvicorn.run(\n app,\n host=config.APP_INTERFACE,\n port=config.APP_PORT,\n log_level=config.APP_LOG_LEVEL,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "6870155", "language": "Python", "matching_score": 0.03781699016690254, "max_stars_count": 0, "path": "rfhub2/__main__.py" }, { "content": "from starlette.requests import Request\nfrom unittest.mock import Mock\n\nfrom rfhub2.api.utils.db import db_healthcheck\nfrom rfhub2.db.session import Session\nfrom tests.unit.api.endpoints.base_endpoint_tests import BaseApiEndpointTest\n\n\nclass HealthcheckApiTest(BaseApiEndpointTest):\n def test_get_successful_healthcheck_response(self):\n response = self.client.get(\"api/v1/health/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), {\"db\": \"ok\"})\n\n def test_get_failed_healthcheck_response(self):\n def mock_db_healthcheck(_: Request) -> bool:\n return False\n\n self.app.dependency_overrides[db_healthcheck] = mock_db_healthcheck\n response = self.client.get(\"api/v1/health/\")\n self.assertEqual(response.status_code, 503)\n self.assertEqual(response.json(), {\"db\": \"failure\"})\n\n def test_successful_db_healthcheck(self):\n request = Mock()\n request.state.db = Session()\n self.assertTrue(db_healthcheck(request))\n\n def test_failed_db_healthcheck(self):\n request = Mock()\n request.state.db = None\n self.assertFalse(db_healthcheck(request))\n", "id": "11905239", "language": "Python", "matching_score": 3.526005983352661, "max_stars_count": 0, "path": "tests/unit/api/endpoints/healthcheck_tests.py" }, { "content": "from rfhub2.config import APP_TITLE\nfrom rfhub2.version import version\nfrom tests.unit.api.endpoints.base_endpoint_tests import BaseApiEndpointTest\n\n\nclass VersionApiTest(BaseApiEndpointTest):\n def test_get_successful_version_info_response(self):\n response = self.client.get(\"api/v1/version/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), {\"title\": APP_TITLE, \"version\": version})\n", "id": "7019850", "language": "Python", "matching_score": 2.57448673248291, "max_stars_count": 0, "path": "tests/unit/api/endpoints/version_tests.py" }, { "content": "from tests.unit.api.endpoints.base_endpoint_tests import BaseApiEndpointTest\n\n\nclass KeywordsApiTest(BaseApiEndpointTest):\n def test_get_single_keyword(self):\n response = self.client.get(\"api/v1/keywords/1/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), self.KEYWORD_1)\n\n def test_get_404_for_nonexistent_keyword_id(self):\n response = self.client.get(\"api/v1/keywords/999/\")\n self.assertEqual(response.status_code, 404)\n\n def test_get_all_keywords(self):\n response = self.client.get(\"api/v1/keywords/\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 3)\n self.assertEqual(body, [self.KEYWORD_2, self.KEYWORD_3, self.KEYWORD_1])\n\n def test_get_all_keywords_with_limit(self):\n response = self.client.get(\"api/v1/keywords?limit=2\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 2)\n self.assertEqual(body, [self.KEYWORD_2, self.KEYWORD_3])\n\n def test_get_all_keywords_with_skip(self):\n response = self.client.get(\"api/v1/keywords?skip=1\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 2)\n self.assertEqual(body, [self.KEYWORD_3, self.KEYWORD_1])\n\n def test_get_all_keywords_with_skip_and_limit(self):\n response = self.client.get(\"api/v1/keywords?skip=1&limit=1\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 1)\n self.assertEqual(body, [self.KEYWORD_3])\n\n def test_get_all_keywords_with_filter_pattern(self):\n response = self.client.get(\"api/v1/keywords?pattern=teardown\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 2)\n self.assertEqual(body, [self.KEYWORD_3, self.KEYWORD_1])\n\n def test_get_all_keywords_with_filter_pattern_not_using_doc(self):\n response = self.client.get(\"api/v1/keywords?pattern=teardown&use_doc=false\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 1)\n self.assertEqual(body, [self.KEYWORD_3])\n\n def test_search_keywords(self):\n response = self.client.get(\n \"api/v1/keywords/search?pattern=name:%20teardown%20in:%20first\"\n )\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 1)\n self.assertEqual(body, [self.KEYWORD_3])\n\n def test_search_keywords_without_results(self):\n response = self.client.get(\n \"api/v1/keywords/search?pattern=name:%20teardown%20in:%20second\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), [])\n\n def test_search_keywords_without_pattern_should_get_all(self):\n response = self.client.get(\"api/v1/keywords/search/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json(), [self.KEYWORD_2, self.KEYWORD_3, self.KEYWORD_1]\n )\n\n def test_search_keywords_with_skip_and_limit(self):\n cases = [\n (\n \"api/v1/keywords/search?pattern=teardown%20in:%20first&skip=1\",\n [self.KEYWORD_1],\n ),\n (\n \"api/v1/keywords/search?pattern=teardown%20in:%20first&limit=1\",\n [self.KEYWORD_3],\n ),\n ]\n for url, results in cases:\n with self.subTest(url=url, results=results):\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), results)\n\n def test_get_empty_list_with_nonexistent_filter_pattern(self):\n response = self.client.get(\"api/v1/keywords?pattern=nonexistent\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), [])\n\n def test_create_new_keyword_for_existing_collection(self):\n response = self.auth_client.post(\n \"api/v1/keywords/\", json=self.KEYWORD_TO_CREATE\n )\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.json(), self.KEYWORD_CREATED)\n\n def test_should_not_create_new_keyword_for_existing_collection_without_auth(self):\n response = self.client.post(\"api/v1/keywords/\", json=self.KEYWORD_TO_CREATE)\n self.assertEqual(response.status_code, 401)\n\n def test_get_400_when_creating_new_keyword_for_nonexistent_collection(self):\n keyword_to_create = {**self.KEYWORD_TO_CREATE, \"collection_id\": 999}\n response = self.auth_client.post(\"api/v1/keywords/\", json=keyword_to_create)\n self.assertEqual(response.status_code, 400)\n\n def test_update_existing_keyword(self):\n response = self.auth_client.put(\n f\"api/v1/keywords/{self.KEYWORD_3['id']}/\", json=self.KEYWORD_TO_UPDATE\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), self.KEYWORD_UPDATED)\n\n def test_should_not_update_existing_keyword_without_auth(self):\n response = self.client.put(\n f\"api/v1/keywords/{self.KEYWORD_3['id']}/\", json=self.KEYWORD_TO_UPDATE\n )\n self.assertEqual(response.status_code, 401)\n\n def test_get_404_when_updating_nonexistent_keyword(self):\n response = self.auth_client.put(\n \"api/v1/keywords/999/\", json=self.KEYWORD_TO_UPDATE\n )\n self.assertEqual(response.status_code, 404)\n\n def test_delete_existing_keyword(self):\n response = self.auth_client.delete(\"api/v1/keywords/1/\")\n self.assertEqual(response.status_code, 204)\n self.assertEqual(response.text, \"\")\n response = self.auth_client.get(\"api/v1/keywords/\")\n self.assertEqual(len(response.json()), 2)\n\n def test_should_not_delete_existing_keyword_without_auth(self):\n response = self.client.delete(\"api/v1/keywords/1/\")\n self.assertEqual(response.status_code, 401)\n\n def test_get_404_when_deleting_nonexistent_keyword(self):\n response = self.auth_client.delete(\"api/v1/keywords/999/\")\n self.assertEqual(response.status_code, 404)\n", "id": "7992603", "language": "Python", "matching_score": 4.432451248168945, "max_stars_count": 0, "path": "tests/unit/api/endpoints/keywords_tests.py" }, { "content": "from tests.unit.api.endpoints.base_endpoint_tests import BaseApiEndpointTest\n\n\nclass CollectionsApiTest(BaseApiEndpointTest):\n def test_get_single_collection(self):\n response = self.client.get(\"api/v1/collections/1/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), self.COLLECTION_1)\n\n def test_get_404_for_nonexistent_collection_id(self):\n response = self.client.get(\"api/v1/collections/999/\")\n self.assertEqual(response.status_code, 404)\n\n def test_get_all_collections(self):\n response = self.client.get(\"api/v1/collections/\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 3)\n self.assertEqual(body[0], self.COLLECTION_1)\n\n def test_get_all_collections_with_limit(self):\n response = self.client.get(\"api/v1/collections?limit=2\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 2)\n self.assertEqual(body[0], self.COLLECTION_1)\n\n def test_get_all_collections_with_skip(self):\n response = self.client.get(\"api/v1/collections?skip=1\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 2)\n self.assertEqual(body[0], self.COLLECTION_2)\n\n def test_get_all_collections_with_skip_and_limit(self):\n response = self.client.get(\"api/v1/collections?skip=1&limit=1\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 1)\n self.assertEqual(body[0], self.COLLECTION_2)\n\n def test_get_all_collections_with_filter_pattern(self):\n response = self.client.get(\"api/v1/collections?pattern=collection\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 2)\n self.assertEqual(body[0], self.COLLECTION_1)\n\n def test_get_all_collections_with_filter_libtype(self):\n response = self.client.get(\"api/v1/collections?libtype=robot\")\n self.assertEqual(response.status_code, 200)\n body = response.json()\n self.assertEqual(len(body), 2)\n self.assertEqual(body[0], self.COLLECTION_1)\n\n def test_get_empty_list_with_nonexistent_filter_pattern(self):\n response = self.client.get(\"api/v1/collections?pattern=nonexistent\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), [])\n\n def test_create_new_collection(self):\n response = self.auth_client.post(\n \"api/v1/collections/\", json=self.COLLECTION_TO_CREATE\n )\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.json(), self.COLLECTION_CREATED)\n\n def test_should_not_create_new_collection_without_auth(self):\n response = self.client.post(\n \"api/v1/collections/\", json=self.COLLECTION_TO_CREATE\n )\n self.assertEqual(response.status_code, 401)\n\n def test_should_not_create_new_collection_with_wrong_credentials(self):\n credentials = (\"rfhub\", \"<PASSWORD>\")\n response = self.client.post(\n \"api/v1/collections/\", json=self.COLLECTION_TO_CREATE, auth=credentials\n )\n self.assertEqual(response.status_code, 401)\n\n def test_update_existing_collection(self):\n response = self.auth_client.put(\n f\"api/v1/collections/{self.COLLECTION_3['id']}/\",\n json=self.COLLECTION_TO_UPDATE,\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), self.COLLECTION_UPDATED)\n\n def test_should_not_update_existing_collection_without_auth(self):\n response = self.client.put(\n f\"api/v1/collections/{self.COLLECTION_3['id']}/\",\n json=self.COLLECTION_TO_UPDATE,\n )\n self.assertEqual(response.status_code, 401)\n\n def test_get_404_when_updating_nonexistent_collection(self):\n response = self.auth_client.put(\n \"api/v1/collections/999/\", json=self.COLLECTION_TO_UPDATE\n )\n self.assertEqual(response.status_code, 404)\n\n def test_delete_existing_collection(self):\n response = self.auth_client.delete(\"api/v1/collections/1/\")\n self.assertEqual(response.status_code, 204)\n self.assertEqual(response.text, \"\")\n response = self.auth_client.get(\"api/v1/collections/\")\n self.assertEqual(len(response.json()), 2)\n\n def test_should_not_delete_existing_collection_without_auth(self):\n response = self.client.delete(\"api/v1/collections/1/\")\n self.assertEqual(response.status_code, 401)\n\n def test_get_404_when_deleting_nonexistent_collection(self):\n response = self.auth_client.delete(\"api/v1/collections/999/\")\n self.assertEqual(response.status_code, 404)\n", "id": "9255970", "language": "Python", "matching_score": 1.9622074365615845, "max_stars_count": 0, "path": "tests/unit/api/endpoints/collections_tests.py" }, { "content": "from sqlalchemy.exc import IntegrityError\nfrom typing import List, Optional\n\nfrom rfhub2.db.base import Keyword\nfrom rfhub2.db.session import db_session\nfrom tests.unit.db.base_repo_tests import BaseRepositoryTest\n\n\nclass KeywordRepositoryTest(BaseRepositoryTest):\n def test_should_add_keyword_with_collection_id(self) -> None:\n name_to_add = \"test_keyword\"\n keyword = Keyword(name=name_to_add, collection_id=self.collections[-1].id)\n self.keyword_repo.add(keyword)\n results: List[Keyword] = db_session.query(Keyword).filter_by(\n name=name_to_add\n ).all()\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0].name, name_to_add)\n self.assertIsNotNone(results[0].id)\n self.assertEqual(results[0].collection, self.collections[-1])\n\n def test_should_not_add_keyword_without_collection_id(self) -> None:\n name_to_add = \"test_keyword\"\n keyword = Keyword(name=name_to_add)\n self.assertRaises(IntegrityError, lambda: self.keyword_repo.add(keyword))\n\n def test_should_get_keyword_by_id(self) -> None:\n result: Optional[Keyword] = self.keyword_repo.get(self.keywords[-1].id)\n self.assertEqual(result, self.keywords[-1])\n self.assertEqual(result.collection, self.collections[0])\n\n def test_should_get_all_keywords(self) -> None:\n result: List[Keyword] = self.keyword_repo.get_all()\n self.assertEqual(result, self.sorted_keywords)\n\n def test_should_filter_keywords_by_name_and_doc(self) -> None:\n test_data = [\n (\"Environ\", [self.keywords[2], self.keywords[0]]),\n (\"login_key\", self.keywords[1:2]),\n (\"Teardown\", [self.keywords[2], self.keywords[0]]),\n (\"\", self.sorted_keywords),\n ]\n for pattern, expected in test_data:\n with self.subTest(pattern=pattern, expected=expected):\n result: List[Keyword] = self.keyword_repo.get_all(pattern=pattern)\n self.assertEqual(result, expected)\n\n def test_should_filter_keywords_by_name_only(self) -> None:\n test_data = [\n (\"Environ\", []),\n (\"login_key\", self.keywords[1:2]),\n (\"Teardown\", self.keywords[2:]),\n (\"\", self.sorted_keywords),\n ]\n for pattern, expected in test_data:\n with self.subTest(pattern=pattern, expected=expected):\n result: List[Keyword] = self.keyword_repo.get_all(\n pattern=pattern, use_doc=False\n )\n self.assertEqual(result, expected)\n\n def test_should_get_all_keywords_with_limit(self) -> None:\n result: List[Keyword] = self.keyword_repo.get_all(limit=2)\n self.assertEqual(result, self.sorted_keywords[:2])\n\n def test_should_get_all_keywords_with_skip(self) -> None:\n result: List[Keyword] = self.keyword_repo.get_all(skip=2)\n self.assertEqual(result, self.sorted_keywords[2:])\n\n def test_should_get_all_keywords_with_collection_name(self) -> None:\n test_data = [\n (\"third\", []),\n (\"collec\", self.sorted_keywords),\n (\"\", self.sorted_keywords),\n ]\n for collection, expected in test_data:\n with self.subTest(collection=collection, expected=expected):\n result: List[Keyword] = self.keyword_repo.get_all(\n collection_name=collection\n )\n self.assertEqual(result, expected)\n\n def test_should_get_all_keywords_matching_pattern_and_collection_name(self) -> None:\n test_data = [\n (\"Login\", \"collect\", [self.keywords[1], self.app_keyword]),\n (\"Login\", \"second\", [self.app_keyword]),\n (\"application\", \"Third\", []),\n (\"application\", \"non-existing\", []),\n ]\n for pattern, collection, expected in test_data:\n with self.subTest(\n pattern=pattern, collection=collection, expected=expected\n ):\n result: List[Keyword] = self.keyword_repo.get_all(\n pattern=pattern, collection_name=collection\n )\n self.assertEqual(result, expected)\n", "id": "5726402", "language": "Python", "matching_score": 4.884721279144287, "max_stars_count": 0, "path": "tests/unit/db/keyword_repo_tests.py" }, { "content": "from typing import List, Optional\n\nfrom rfhub2.db.base import Collection, Keyword\nfrom rfhub2.db.session import db_session\nfrom tests.unit.db.base_repo_tests import BaseRepositoryTest\n\n\nclass CollectionRepositoryTest(BaseRepositoryTest):\n def test_should_add_collection(self) -> None:\n name_to_add = \"test_collection\"\n collection = Collection(name=name_to_add)\n self.collection_repo.add(collection)\n results: List[Collection] = db_session.query(Collection).filter_by(\n name=name_to_add\n ).all()\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0].name, name_to_add)\n self.assertIsNotNone(results[0].id)\n\n def test_should_add_collection_with_keywords(self) -> None:\n name_to_add = \"test_collection\"\n collection = Collection(name=name_to_add)\n collection.keywords = [Keyword(name=\"Keyword1\"), Keyword(name=\"Keyword2\")]\n self.collection_repo.add(collection)\n results: List[Collection] = db_session.query(Collection).filter_by(\n name=name_to_add\n ).all()\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0].name, name_to_add)\n self.assertIsNotNone(results[0].id)\n self.assertEqual(len(results[0].keywords), 2)\n self.assertEqual(\n [k.name for k in results[0].keywords], [\"Keyword1\", \"Keyword2\"]\n )\n\n def test_should_get_collection_by_id(self) -> None:\n result: Optional[Collection] = self.collection_repo.get(self.collections[-1].id)\n self.assertEqual(result, self.collections[-1])\n\n def test_should_get_collection_by_id_with_keywords(self) -> None:\n result: Optional[Collection] = self.collection_repo.get(self.collections[0].id)\n self.assertEqual(result, self.collections[0])\n self.assertEqual(result.keywords, self.collections[0].keywords)\n\n def test_should_get_all_collections(self) -> None:\n result: List[Collection] = self.collection_repo.get_all()\n self.assertEqual(result, self.collections)\n\n def test_should_get_all_collections_ordered_by_name(self) -> None:\n collection_a = Collection(name=\"A collection\")\n collection_z = Collection(name=\"Z collection\")\n self.collection_repo.add(collection_a)\n self.collection_repo.add(collection_z)\n result: List[Collection] = self.collection_repo.get_all()\n self.assertEqual(result, [collection_a] + self.collections + [collection_z])\n\n def test_should_filter_collections_by_name(self) -> None:\n test_data = [\n (\"collection\", self.collections[:2]),\n (\"thir\", self.collections[2:]),\n (\"second collection\", self.collections[1:2]),\n (\"\", self.collections),\n ]\n for pattern, expected in test_data:\n with self.subTest(pattern=pattern, expected=expected):\n result: List[Collection] = self.collection_repo.get_all(pattern=pattern)\n self.assertEqual(result, expected)\n\n def test_should_filter_collections_by_type(self) -> None:\n test_data = [\n (\"Robo\", self.collections[:2]),\n (\"library\", self.collections[2:]),\n (\"\", self.collections),\n ]\n for libtype, expected in test_data:\n with self.subTest(libtype=libtype, expected=expected):\n result: List[Collection] = self.collection_repo.get_all(libtype=libtype)\n self.assertEqual(result, expected)\n\n def test_should_get_all_collections_with_limit(self) -> None:\n result: List[Collection] = self.collection_repo.get_all(limit=2)\n self.assertEqual(result, self.collections[:2])\n\n def test_should_get_all_collections_with_skip(self) -> None:\n result: List[Collection] = self.collection_repo.get_all(skip=2)\n self.assertEqual(result, self.collections[2:])\n\n def test_should_delete_collection_with_keywords(self) -> None:\n result: int = self.collection_repo.delete(self.collections[0].id)\n self.assertEqual(result, 1)\n self.assertEqual(db_session.query(Collection).count(), 2)\n self.assertEqual(db_session.query(Keyword).count(), 1)\n\n def test_should_delete_collection_without_keywords(self) -> None:\n result: int = self.collection_repo.delete(self.collections[2].id)\n self.assertEqual(result, 1)\n self.assertEqual(db_session.query(Collection).count(), 2)\n self.assertEqual(db_session.query(Keyword).count(), 4)\n", "id": "6900736", "language": "Python", "matching_score": 1.9359091520309448, "max_stars_count": 0, "path": "tests/unit/db/collection_repo_tests.py" }, { "content": "from fastapi import APIRouter, Depends, HTTPException\nfrom starlette.responses import Response\nfrom typing import List, Optional\n\nfrom rfhub2.api.utils.auth import is_authenticated\nfrom rfhub2.api.utils.db import get_collection_repository, get_keyword_repository\nfrom rfhub2.api.utils.http import or_404\nfrom rfhub2.db.base import Collection as DBCollection, Keyword as DBKeyword\nfrom rfhub2.db.repository.collection_repository import CollectionRepository\nfrom rfhub2.db.repository.keyword_repository import KeywordRepository\nfrom rfhub2.model import Keyword, KeywordCreate, KeywordUpdate\nfrom rfhub2.ui.search_params import SearchParams\n\nrouter = APIRouter()\n\n\[email protected](\"/\", response_model=List[Keyword])\ndef get_keywords(\n repository: KeywordRepository = Depends(get_keyword_repository),\n skip: int = 0,\n limit: int = 100,\n pattern: str = None,\n use_doc: bool = True,\n):\n keywords: List[DBKeyword] = repository.get_all(\n skip=skip, limit=limit, pattern=pattern, use_doc=use_doc\n )\n return keywords\n\n\[email protected](\"/search/\", response_model=List[Keyword])\ndef search_keywords(\n *,\n repository: KeywordRepository = Depends(get_keyword_repository),\n params: SearchParams = Depends(),\n skip: int = 0,\n limit: int = 100,\n):\n return repository.get_all(\n pattern=params.pattern,\n collection_name=params.collection_name,\n use_doc=params.use_doc,\n skip=skip,\n limit=limit,\n )\n\n\[email protected](\"/{id}/\", response_model=Keyword)\ndef get_keyword(\n *, repository: KeywordRepository = Depends(get_keyword_repository), id: int\n):\n keyword: Optional[DBKeyword] = repository.get(id)\n return or_404(keyword)\n\n\[email protected](\"/\", response_model=Keyword, status_code=201)\ndef create_keyword(\n *,\n _: bool = Depends(is_authenticated),\n repository: KeywordRepository = Depends(get_keyword_repository),\n collection_repository: CollectionRepository = Depends(get_collection_repository),\n keyword: KeywordCreate,\n):\n collection: Optional[DBCollection] = collection_repository.get(\n keyword.collection_id\n )\n if not collection:\n raise HTTPException(status_code=400, detail=\"Collection does not exist\")\n db_keyword: DBKeyword = DBKeyword(**keyword.dict())\n return repository.add(db_keyword)\n\n\[email protected](\"/{id}/\", response_model=Keyword)\ndef update_keyword(\n *,\n _: bool = Depends(is_authenticated),\n repository: KeywordRepository = Depends(get_keyword_repository),\n id: int,\n keyword_update: KeywordUpdate,\n):\n db_keyword: DBKeyword = or_404(repository.get(id))\n updated: DBKeyword = repository.update(\n db_keyword, keyword_update.dict(skip_defaults=True)\n )\n return updated\n\n\[email protected](\"/{id}/\")\ndef delete_keyword(\n *,\n _: bool = Depends(is_authenticated),\n repository: KeywordRepository = Depends(get_keyword_repository),\n id: int,\n):\n deleted: int = repository.delete(id)\n if deleted:\n return Response(status_code=204)\n else:\n raise HTTPException(status_code=404)\n", "id": "7222741", "language": "Python", "matching_score": 5.260536193847656, "max_stars_count": 0, "path": "rfhub2/api/endpoints/keywords.py" }, { "content": "from fastapi import APIRouter, Depends, HTTPException\nfrom starlette.responses import Response\nfrom typing import List, Optional\n\nfrom rfhub2.api.utils.auth import is_authenticated\nfrom rfhub2.api.utils.db import get_collection_repository\nfrom rfhub2.api.utils.http import or_404\nfrom rfhub2.db.base import Collection as DBCollection\nfrom rfhub2.db.repository.collection_repository import CollectionRepository\nfrom rfhub2.model import Collection, CollectionUpdate\n\nrouter = APIRouter()\n\n\[email protected](\"/\", response_model=List[Collection])\ndef get_collections(\n repository: CollectionRepository = Depends(get_collection_repository),\n skip: int = 0,\n limit: int = 100,\n pattern: str = None,\n libtype: str = None,\n):\n collections: List[DBCollection] = repository.get_all(\n skip=skip, limit=limit, pattern=pattern, libtype=libtype\n )\n return collections\n\n\[email protected](\"/{id}/\", response_model=Collection)\ndef get_collection(\n *, repository: CollectionRepository = Depends(get_collection_repository), id: int\n):\n collection: Optional[DBCollection] = repository.get(id)\n return or_404(collection)\n\n\[email protected](\"/\", response_model=Collection, status_code=201)\ndef create_collection(\n *,\n _: bool = Depends(is_authenticated),\n repository: CollectionRepository = Depends(get_collection_repository),\n collection: CollectionUpdate,\n):\n db_collection: DBCollection = DBCollection(**collection.dict())\n return repository.add(db_collection)\n\n\[email protected](\"/{id}/\", response_model=Collection)\ndef update_collection(\n *,\n _: bool = Depends(is_authenticated),\n repository: CollectionRepository = Depends(get_collection_repository),\n id: int,\n collection_update: CollectionUpdate,\n):\n db_collection: DBCollection = or_404(repository.get(id))\n updated: DBCollection = repository.update(\n db_collection, collection_update.dict(skip_defaults=True)\n )\n return updated\n\n\[email protected](\"/{id}/\")\ndef delete_collection(\n *,\n _: bool = Depends(is_authenticated),\n repository: CollectionRepository = Depends(get_collection_repository),\n id: int,\n):\n deleted: int = repository.delete(id)\n if deleted:\n return Response(status_code=204)\n else:\n raise HTTPException(status_code=404)\n", "id": "604559", "language": "Python", "matching_score": 2.3710527420043945, "max_stars_count": 0, "path": "rfhub2/api/endpoints/collections.py" }, { "content": "from fastapi import Depends, HTTPException\nfrom fastapi.security import HTTPBasic, HTTPBasicCredentials\nfrom starlette.status import HTTP_401_UNAUTHORIZED\n\nfrom rfhub2 import config\n\nsecurity = HTTPBasic()\n\n\ndef authenticated_user(\n user: HTTPBasicCredentials = Depends(security)\n) -> HTTPBasicCredentials:\n if (\n user.username == config.BASIC_AUTH_USER\n and user.password == config.BASIC_AUTH_PASSWORD\n ):\n return user\n else:\n raise HTTPException(\n status_code=HTTP_401_UNAUTHORIZED,\n detail=\"Unauthorized to perform this action\",\n headers={\"WWW-Authenticate\": \"Basic\"},\n )\n\n\ndef is_authenticated(user: HTTPBasicCredentials = Depends(security)) -> bool:\n return authenticated_user(user) is not None\n", "id": "10259692", "language": "Python", "matching_score": 1.7777817249298096, "max_stars_count": 0, "path": "rfhub2/api/utils/auth.py" }, { "content": "from fastapi import HTTPException\nfrom typing import Optional, TypeVar\n\nT = TypeVar(\"T\")\n\n\ndef or_404(item: Optional[T]) -> T:\n if not item:\n raise HTTPException(status_code=404)\n return item\n", "id": "8373811", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "rfhub2/api/utils/http.py" }, { "content": "with open(\"non-existing-file.txt\") as f:\n content = f.readlines()\n", "id": "9156553", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/fixtures/initial/data_error.py" }, { "content": "def glob_to_sql(string: str) -> str:\n \"\"\"Convert glob-like wildcards to SQL wildcards\n\n * becomes %\n ? becomes _\n % becomes \\%\n \\\\ remains \\\\\n \\* remains \\*\n \\? remains \\?\n\n This also adds a leading and trailing %, unless the pattern begins with\n ^ or ends with $\n \"\"\"\n\n # What's with the chr(1) and chr(2) nonsense? It's a trick to\n # hide \\* and \\? from the * and ? substitutions. This trick\n # depends on the substitutions being done in order. chr(1)\n # and chr(2) were picked because I know those characters\n # almost certainly won't be in the input string\n table = (\n (r\"\\\\\", chr(1)),\n (r\"\\*\", chr(2)),\n (r\"\\?\", chr(3)),\n (r\"%\", r\"\\%\"),\n (r\"?\", \"_\"),\n (r\"*\", \"%\"),\n (chr(1), r\"\\\\\"),\n (chr(2), r\"\\*\"),\n (chr(3), r\"\\?\"),\n )\n\n for (a, b) in table:\n string = string.replace(a, b)\n\n string = string[1:] if string.startswith(\"^\") else \"%\" + string\n string = string[:-1] if string.endswith(\"$\") else string + \"%\"\n\n return string\n", "id": "8481294", "language": "Python", "matching_score": 0.06066462770104408, "max_stars_count": 0, "path": "rfhub2/db/repository/query_utils.py" }, { "content": "# -*- coding: utf-8 -*-\r\n\r\nfrom appium.webdriver.common.touch_action import TouchAction\r\n\r\nfrom AppiumLibrary.locators import ElementFinder\r\nfrom .keywordgroup import KeywordGroup\r\n\r\n\r\nclass _TouchKeywords(KeywordGroup):\r\n\r\n def __init__(self):\r\n self._element_finder = ElementFinder()\r\n\r\n # Public, element lookups\r\n def zoom(self, locator, percent=\"200%\", steps=1):\r\n \"\"\"\r\n Zooms in on an element a certain amount.\r\n \"\"\"\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.zoom(element=element, percent=percent, steps=steps)\r\n\r\n def pinch(self, locator, percent=\"200%\", steps=1):\r\n \"\"\"\r\n Pinch in on an element a certain amount.\r\n \"\"\"\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.pinch(element=element, percent=percent, steps=steps)\r\n\r\n def swipe(self, start_x, start_y, offset_x, offset_y, duration=1000):\r\n \"\"\"\r\n Swipe from one point to another point, for an optional duration.\r\n\r\n Args:\r\n - start_x - x-coordinate at which to start\r\n - start_y - y-coordinate at which to start\r\n - offset_x - x-coordinate distance from start_x at which to stop\r\n - offset_y - y-coordinate distance from start_y at which to stop\r\n - duration - (optional) time to take the swipe, in ms.\r\n\r\n Usage:\r\n | Swipe | 500 | 100 | 100 | 0 | 1000 |\r\n\r\n _*NOTE: *_\r\n Android 'Swipe' is not working properly, use ``offset_x`` and ``offset_y`` as if these are destination points.\r\n \"\"\"\r\n x_start = int(start_x)\r\n x_offset = int(offset_x)\r\n y_start = int(start_y)\r\n y_offset = int(offset_y)\r\n driver = self._current_application()\r\n driver.swipe(x_start, y_start, x_offset, y_offset, duration)\r\n\r\n def swipe_by_percent(self, start_x, start_y, end_x, end_y, duration=1000):\r\n \"\"\"\r\n Swipe from one percent of the screen to another percent, for an optional duration.\r\n Normal swipe fails to scale for different screen resolutions, this can be avoided using percent.\r\n\r\n Args:\r\n - start_x - x-percent at which to start\r\n - start_y - y-percent at which to start\r\n - end_x - x-percent distance from start_x at which to stop\r\n - end_y - y-percent distance from start_y at which to stop\r\n - duration - (optional) time to take the swipe, in ms.\r\n\r\n Usage:\r\n | Swipe By Percent | 90 | 50 | 10 | 50 | # Swipes screen from right to left. |\r\n\r\n _*NOTE: *_\r\n This also considers swipe acts different between iOS and Android.\r\n\r\n New in AppiumLibrary 1.4.5\r\n \"\"\"\r\n width = self.get_window_width()\r\n height = self.get_window_height()\r\n x_start = float(start_x) / 100 * width\r\n x_end = float(end_x) / 100 * width\r\n y_start = float(start_y) / 100 * height\r\n y_end = float(end_y) / 100 * height\r\n x_offset = x_end - x_start\r\n y_offset = y_end - y_start\r\n platform = self._get_platform()\r\n if platform == 'android':\r\n self.swipe(x_start, y_start, x_end, y_end, duration)\r\n else:\r\n self.swipe(x_start, y_start, x_offset, y_offset, duration)\r\n\r\n def scroll(self, start_locator, end_locator):\r\n \"\"\"\r\n Scrolls from one element to another\r\n Key attributes for arbitrary elements are `id` and `name`. See\r\n `introduction` for details about locating elements.\r\n \"\"\"\r\n el1 = self._element_find(start_locator, True, True)\r\n el2 = self._element_find(end_locator, True, True)\r\n driver = self._current_application()\r\n driver.scroll(el1, el2)\r\n\r\n def scroll_down(self, locator):\r\n \"\"\"Scrolls down to element\"\"\"\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.execute_script(\"mobile: scroll\", {\"direction\": 'down', 'elementid': element.id})\r\n\r\n def scroll_up(self, locator):\r\n \"\"\"Scrolls up to element\"\"\"\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n driver.execute_script(\"mobile: scroll\", {\"direction\": 'up', 'elementid': element.id})\r\n\r\n def long_press(self, locator, duration=1000):\r\n \"\"\" Long press the element with optional duration \"\"\"\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n action = TouchAction(driver)\r\n action.press(element).wait(duration).release().perform()\r\n\r\n def tap(self, locator, x_offset=None, y_offset=None, count=1):\r\n \"\"\" Tap element identified by ``locator``. \r\n\r\n Args:\r\n - ``locator`` - (mandatory). Taps coordinates when set to ${None}.\r\n - ``x_offset`` - (optional) x coordinate to tap, relative to the top left corner of the element.\r\n - ``y_offset`` - (optional) y coordinate. If y is used, x must also be set, and vice versa\r\n - ``count`` - can be used for multiple times of tap on that element\r\n \"\"\"\r\n driver = self._current_application()\r\n el = self._element_find(locator, True, True)\r\n action = TouchAction(driver)\r\n action.tap(el,x_offset,y_offset, count).perform()\r\n \r\n def tap_with_number_of_taps(self, locator, number_of_taps, number_of_touches):\r\n \"\"\" Sends one or more taps with one or more touch points.iOS only.\r\n \r\n Args:\r\n - ``number_of_taps`` - The number of taps.\r\n - ``number_of_touches`` - The number of touch points.\r\n \"\"\"\r\n driver = self._current_application()\r\n element = self._element_find(locator, True, True)\r\n params = {'element': element, 'numberOfTaps': number_of_taps, 'numberOfTouches': number_of_touches}\r\n driver.execute_script(\"mobile: tapWithNumberOfTaps\", params)\r\n\r\n def click_a_point(self, x=0, y=0, duration=100):\r\n \"\"\" Click on a point\"\"\"\r\n self._info(\"Clicking on a point (%s,%s).\" % (x,y))\r\n driver = self._current_application()\r\n action = TouchAction(driver)\r\n try:\r\n action.press(x=float(x), y=float(y)).wait(float(duration)).release().perform()\r\n except:\r\n assert False, \"Can't click on a point at (%s,%s)\" % (x,y)\r\n\r\n def click_element_at_coordinates(self, coordinate_X, coordinate_Y):\r\n \"\"\" click element at a certain coordinate \"\"\"\r\n self._info(\"Pressing at (%s, %s).\" % (coordinate_X, coordinate_Y))\r\n driver = self._current_application()\r\n action = TouchAction(driver)\r\n action.press(x=coordinate_X, y=coordinate_Y).release().perform()\r\n", "id": "1871924", "language": "Python", "matching_score": 1.087428092956543, "max_stars_count": 231, "path": "AppiumLibrary/keywords/_touch.py" }, { "content": "from setuptools import setup\n\nfilename = 'rfhub2/version.py'\nexec(open(filename).read())\n\nsetup(\n name='rfhub2',\n version=version,\n author='<NAME>, <NAME>',\n author_email='<EMAIL>, <EMAIL>',\n url='https://github.com/pbylicki/rfhub2/',\n keywords='robotframework',\n license='Apache License 2.0',\n description='Webserver for robot framework and python assets documentation',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n zip_safe=True,\n include_package_data=True,\n install_requires=[\n 'aiofiles>=0.4.0',\n 'Click>=7.0',\n 'fastapi>=0.30.0',\n 'robotframework>=3.0.0',\n 'SQLAlchemy>=1.1.0',\n 'requests>=2.10.0',\n 'uvicorn>=0.7.1'\n ],\n extras_require={\n \"postgresql\": [\"psycopg2-binary>=2.7.4\"]\n },\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Unix\",\n \"Framework :: Robot Framework\",\n \"Framework :: Robot Framework :: Tool\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Software Development :: Quality Assurance\",\n \"Intended Audience :: Developers\",\n ],\n packages=[\n 'rfhub2',\n 'rfhub2.api',\n 'rfhub2.api.endpoints',\n 'rfhub2.api.middleware',\n 'rfhub2.api.utils',\n 'rfhub2.cli',\n 'rfhub2.db',\n 'rfhub2.db.model',\n 'rfhub2.db.repository',\n 'rfhub2.model',\n 'rfhub2.ui',\n 'rfhub2.utils',\n ],\n scripts=[],\n entry_points={\n 'console_scripts': [\n \"rfhub2 = rfhub2.__main__:main\",\n \"rfhub2-cli = rfhub2.cli.__main__:main\",\n ]\n }\n)\n", "id": "3974555", "language": "Python", "matching_score": 1.2705168724060059, "max_stars_count": 0, "path": "setup.py" }, { "content": "import click\nfrom pathlib import Path\nfrom typing import Tuple\n\nfrom rfhub2.cli.api_client import Client\nfrom rfhub2.cli.rfhub_importer import RfhubImporter\n\n\[email protected]()\[email protected](\n \"-a\",\n \"--app-url\",\n type=click.STRING,\n default=\"http://localhost:8000\",\n help=\"Specifies IP, URI or host of rfhub2 web application. Default value is http://localhost:8000.\",\n)\[email protected](\n \"-u\",\n \"--user\",\n type=click.STRING,\n default=\"rfhub\",\n help=\"Specifies rfhub2 user to authenticate on endpoints that requires that. Default value is rfhub.\",\n)\[email protected](\n \"-p\",\n \"--password\",\n type=click.STRING,\n default=\"<PASSWORD>\",\n help=\"Specifies rfhub2 password to authenticate on endpoints that requires that. Default value is rfhub.\",\n)\[email protected](\n \"--no-installed-keywords\",\n type=click.BOOL,\n default=False,\n is_flag=True,\n help=\"Flag specifying if package should skip loading commonly installed libraries, \"\n \"such as such as BuiltIn, Collections, DateTime etc.\",\n)\[email protected](\n \"--mode\",\n \"-m\",\n type=click.Choice([\"insert\", \"append\", \"update\"], case_sensitive=False),\n default=\"insert\",\n help=\"\"\"Choice parameter specifying in what mode package should run:\\n\n - `insert` - default value, removes all existing collections from app and add ones found in paths\\n\n - `append` - adds collections found in paths without removal of existing ones\\n\n - `update` - removes collections not found in paths, adds new ones and updates existing ones.\"\"\",\n)\[email protected](\"paths\", nargs=-1, type=click.Path(exists=True))\ndef main(\n app_url: str,\n user: str,\n password: str,\n paths: Tuple[Path, ...],\n mode: str,\n no_installed_keywords: bool,\n) -> None:\n \"\"\"Package to populate rfhub2 with robot framework keywords\n from libraries and resource files.\"\"\"\n client = Client(app_url, user, password)\n rfhub_importer = RfhubImporter(client, paths, no_installed_keywords, mode)\n loaded_collections, loaded_keywords = rfhub_importer.import_libraries()\n print(\n f\"\\nSuccessfully loaded {loaded_collections} collections with {loaded_keywords} keywords.\"\n )\n", "id": "10759215", "language": "Python", "matching_score": 1.770041584968567, "max_stars_count": 0, "path": "rfhub2/cli/cli.py" }, { "content": "from requests import session, Response\nfrom typing import Dict, Tuple\n\n\nAPI_V1 = \"api/v1\"\nTEST_COLLECTION = {\n \"name\": \"healtcheck_collection\",\n \"type\": \"a\",\n \"version\": \"a\",\n \"scope\": \"a\",\n \"named_args\": \"a\",\n \"path\": \"a\",\n \"doc\": \"a\",\n \"doc_format\": \"a\",\n}\n\n\nclass Client(object):\n \"\"\"\n API client with methods to populate rfhub2 application.\n \"\"\"\n\n def __init__(self, app_url: str, user: str, password: str):\n self.app_url = app_url\n self.session = session()\n self.api_url = f\"{self.app_url}/{API_V1}\"\n self.session.auth = (user, password)\n self.session.headers = {\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n }\n\n def get_collections(self, skip: int = 0, limit: int = 100) -> Response:\n \"\"\"\n Gets list of collections object using request get method.\n \"\"\"\n return self._get_request(\n endpoint=\"collections\", params={\"skip\": skip, \"limit\": limit}\n )\n\n def add_collection(self, data: Dict) -> Dict:\n \"\"\"\n Adds collection using request post method.\n \"\"\"\n return self._post_request(endpoint=\"collections\", data=data)\n\n def delete_collection(self, id: int) -> Response:\n \"\"\"\n Deletes collection with given id.\n \"\"\"\n return self._delete_request(endpoint=\"collections\", id=id)\n\n def add_keyword(self, data: Dict) -> Dict:\n \"\"\"\n Adds keyword using request post method.\n \"\"\"\n return self._post_request(endpoint=\"keywords\", data=data)\n\n def _get_request(self, endpoint: str, params: Dict) -> Dict:\n \"\"\"\n Sends get request from given endpoint.\n \"\"\"\n request = self.session.get(url=f\"{self.api_url}/{endpoint}/\", params=params)\n return request.json()\n\n def _post_request(self, endpoint: str, data: Dict) -> Tuple[int, Dict]:\n \"\"\"\n Sends post request to collections or keywords endpoint.\n \"\"\"\n request = self.session.post(url=f\"{self.api_url}/{endpoint}/\", json=data)\n return request.status_code, request.json()\n\n def _delete_request(self, endpoint: str, id: int) -> Response:\n \"\"\"\n Sends delete request to collections or keywords endpoint with item id.\n \"\"\"\n return self.session.delete(url=f\"{self.api_url}/{endpoint}/{id}/\")\n", "id": "8425170", "language": "Python", "matching_score": 3.4914190769195557, "max_stars_count": 0, "path": "rfhub2/cli/api_client.py" }, { "content": "import responses\nimport unittest\n\nfrom rfhub2.cli.api_client import Client\n\nCOLLECTION = [\n {\n \"name\": \"Third\",\n \"type\": \"Library\",\n \"version\": None,\n \"scope\": None,\n \"named_args\": None,\n \"path\": None,\n \"doc\": None,\n \"doc_format\": None,\n \"id\": 3,\n \"keywords\": [],\n }\n]\nKEYWORD = {\n \"name\": \"Some keyword\",\n \"doc\": \"Perform some check\",\n \"args\": None,\n \"id\": 2,\n \"collection\": {\"id\": 1, \"name\": \"First collection\"},\n}\n\n\nclass ApiClientTests(unittest.TestCase):\n def setUp(self) -> None:\n self.app_url = \"http://localhost:8000\"\n self.client = Client(self.app_url, \"rfhub\", \"rfhub\")\n self.collection_endpoint = f\"{self.client.api_url}/collections/\"\n self.keyword_endpoint = f\"{self.client.api_url}/keywords/\"\n\n def test_get_collections(self):\n with responses.RequestsMock() as rsps:\n rsps.add(\n responses.GET,\n self.collection_endpoint,\n json=COLLECTION,\n status=200,\n content_type=\"application/json\",\n )\n response = self.client.get_collections()\n self.assertEqual(response, COLLECTION)\n\n def test_add_collection(self):\n with responses.RequestsMock() as rsps:\n rsps.add(\n responses.POST,\n self.collection_endpoint,\n json=COLLECTION[0],\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n response = self.client.add_collection(data=COLLECTION[0])\n self.assertEqual(response, (201, COLLECTION[0]))\n\n def test_delete_collection(self):\n with responses.RequestsMock() as rsps:\n rsps.add(\n responses.DELETE,\n f\"{self.collection_endpoint}1/\",\n status=204,\n adding_headers={\"accept\": \"application/json\"},\n )\n response = self.client.delete_collection(1)\n self.assertEqual(response.status_code, 204)\n\n def test_add_keyword(self):\n with responses.RequestsMock() as rsps:\n rsps.add(\n responses.POST,\n self.keyword_endpoint,\n json=KEYWORD,\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n response = self.client.add_keyword(data=KEYWORD)\n self.assertEqual(response, (201, KEYWORD))\n", "id": "5675751", "language": "Python", "matching_score": 2.1779541969299316, "max_stars_count": 0, "path": "tests/unit/cli/api_client.py" }, { "content": "from pathlib import Path\nimport unittest\nfrom starlette.testclient import TestClient\n\nfrom rfhub2.app import create_app\n\n\nclass UIRouterTest(unittest.TestCase):\n\n INDEX_FILE = (\n Path(__file__).parent\n / \"..\"\n / \"..\"\n / \"..\"\n / \"rfhub2\"\n / \"templates\"\n / \"index.html\"\n )\n\n def setUp(self) -> None:\n self.app = create_app()\n self.client: TestClient = TestClient(self.app)\n\n def test_ui_routes_should_get_index_html_file(self):\n with open(self.INDEX_FILE) as f:\n expected_body = f.read()\n routes = (\"/\", \"/search/?q=a\", \"/keywords/123/\", \"/keywords/123/456/\")\n for route in routes:\n with self.subTest(route=route):\n response = self.client.get(route)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.text, expected_body)\n", "id": "11373329", "language": "Python", "matching_score": 1.9507006406784058, "max_stars_count": 0, "path": "tests/unit/ui/ui_router_tests.py" }, { "content": "from pathlib import Path\n\n\ndef abs_path(*segments: str) -> str:\n return str((Path(__file__).parent.parent / Path(*segments)).resolve())\n", "id": "8393674", "language": "Python", "matching_score": 1.537888526916504, "max_stars_count": 0, "path": "rfhub2/utils/__init__.py" }, { "content": "import unittest\n\nfrom rfhub2.utils import abs_path\n\n\nclass PathOpsTest(unittest.TestCase):\n def test_should_build_absolute_path_from_relative_path_inside_the_package(self):\n path = abs_path(\"..\", \"tests\", \"unit\", \"utils\", \"path_ops_tests.py\")\n self.assertEqual(path, __file__)\n", "id": "11731172", "language": "Python", "matching_score": 1.9083038568496704, "max_stars_count": 0, "path": "tests/unit/utils/path_ops_tests.py" }, { "content": "from tests.unit.api.endpoints.collections_tests import CollectionsApiTest\nfrom tests.unit.api.endpoints.healthcheck_tests import HealthcheckApiTest\nfrom tests.unit.api.endpoints.keywords_tests import KeywordsApiTest\nfrom tests.unit.api.endpoints.version_tests import VersionApiTest\nfrom tests.unit.cli.api_client import ApiClientTests\nfrom tests.unit.cli.rfhub_importer import RfhubImporterTests\nfrom tests.unit.db.collection_repo_tests import CollectionRepositoryTest\nfrom tests.unit.db.collection_tests import CollectionTest\nfrom tests.unit.db.keyword_repo_tests import KeywordRepositoryTest\nfrom tests.unit.db.keyword_tests import KeywordTest\nfrom tests.unit.ui.search_params_tests import SearchParamsTest\nfrom tests.unit.ui.ui_router_tests import UIRouterTest\nfrom tests.unit.utils.path_ops_tests import PathOpsTest\n", "id": "4303361", "language": "Python", "matching_score": 1.5936599969863892, "max_stars_count": 0, "path": "tests/__init__.py" }, { "content": "from starlette.requests import Request\n\nfrom rfhub2.db.repository.keyword_repository import KeywordRepository\nfrom rfhub2.db.repository.collection_repository import CollectionRepository\n\n\ndef get_collection_repository(request: Request) -> CollectionRepository:\n return CollectionRepository(request.state.db)\n\n\ndef get_keyword_repository(request: Request) -> KeywordRepository:\n return KeywordRepository(request.state.db)\n\n\ndef db_healthcheck(request: Request) -> bool:\n try:\n result = request.state.db.execute(\"select 1\")\n return next(result) == (1,)\n except Exception as e:\n print(e)\n return False\n", "id": "6785925", "language": "Python", "matching_score": 1.3858602046966553, "max_stars_count": 0, "path": "rfhub2/api/utils/db.py" }, { "content": "from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\n\nfrom rfhub2.db.session import Session\n\n\nclass DbSessionMiddleware(BaseHTTPMiddleware):\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint):\n request.state.db = Session()\n response = await call_next(request)\n request.state.db.close()\n return response\n", "id": "3208777", "language": "Python", "matching_score": 1.4608832597732544, "max_stars_count": 0, "path": "rfhub2/api/middleware/db_session_middleware.py" }, { "content": "from fastapi import FastAPI\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom rfhub2 import config\nfrom rfhub2.api.router import api_router\nfrom rfhub2.api.middleware.db_session_middleware import DbSessionMiddleware\nfrom rfhub2.ui.ui_router import router as ui_router\nfrom rfhub2.utils import abs_path\nfrom rfhub2.version import version\n\n\ndef create_app() -> FastAPI:\n app = FastAPI(title=config.APP_TITLE, version=version)\n app.mount(\"/static\", StaticFiles(directory=abs_path(\"static\")), name=\"static\")\n app.include_router(ui_router)\n app.include_router(api_router, prefix=\"/api/v1\")\n app.add_middleware(CORSMiddleware, allow_origins=[\"*\"])\n app.add_middleware(DbSessionMiddleware)\n return app\n", "id": "11459376", "language": "Python", "matching_score": 1.741636037826538, "max_stars_count": 0, "path": "rfhub2/app.py" }, { "content": "from fastapi import APIRouter\nfrom starlette.responses import FileResponse\n\nfrom rfhub2.utils import abs_path\n\nrouter = APIRouter()\n\n\[email protected](\"/\")\[email protected](\"/search/\")\[email protected](\"/keywords/{collection_id}/\")\[email protected](\"/keywords/{collection_id}/{keyword_id}/\")\nasync def home():\n return FileResponse(abs_path(\"templates\", \"index.html\"), media_type=\"text/html\")\n", "id": "7487974", "language": "Python", "matching_score": 1.904894232749939, "max_stars_count": 0, "path": "rfhub2/ui/ui_router.py" }, { "content": "from fastapi import APIRouter\n\nfrom rfhub2.api.endpoints import collections, healthcheck, keywords, version\n\napi_router = APIRouter()\napi_router.include_router(healthcheck.router, prefix=\"/health\", tags=[\"healthcheck\"])\napi_router.include_router(\n collections.router, prefix=\"/collections\", tags=[\"collections\"]\n)\napi_router.include_router(keywords.router, prefix=\"/keywords\", tags=[\"keywords\"])\napi_router.include_router(version.router, prefix=\"/version\", tags=[\"version\"])\n", "id": "7192176", "language": "Python", "matching_score": 1.4264482259750366, "max_stars_count": 0, "path": "rfhub2/api/router.py" }, { "content": "from fastapi import APIRouter, Depends\nfrom starlette.responses import Response\n\nfrom rfhub2.api.utils.db import db_healthcheck\nfrom rfhub2.model import Healthcheck\n\nrouter = APIRouter()\n\n\[email protected](\"/\", response_model=Healthcheck)\ndef healthcheck(response: Response, db_status: bool = Depends(db_healthcheck)):\n if db_status:\n return Healthcheck(**{\"db\": \"ok\"})\n else:\n response.status_code = 503\n return Healthcheck(**{\"db\": \"failure\"})\n", "id": "5001990", "language": "Python", "matching_score": 1.8259469270706177, "max_stars_count": 0, "path": "rfhub2/api/endpoints/healthcheck.py" }, { "content": "from fastapi import APIRouter\n\nfrom rfhub2.config import APP_TITLE\nfrom rfhub2.model import VersionInfo\nfrom rfhub2.version import version\n\nrouter = APIRouter()\n\n\[email protected](\"/\", response_model=VersionInfo)\ndef healthcheck():\n return VersionInfo(**{\"title\": APP_TITLE, \"version\": version})\n", "id": "3795102", "language": "Python", "matching_score": 1.0135107040405273, "max_stars_count": 0, "path": "rfhub2/api/endpoints/version.py" }, { "content": "import os\nfrom pkg_resources import parse_version\nimport requests\n\nfrom rfhub2.version import version\n\n\ndef get_pypi_version() -> str:\n resp = requests.get(\"https://pypi.org/pypi/rfhub2/json\")\n assert resp.status_code == 200\n return resp.json()[\"info\"][\"version\"]\n\n\ndef execute_cmd(cmd: str):\n print(f\"Executing command '{cmd}'\")\n os.system(cmd)\n\n\ndef publish_to_pypi() -> None:\n execute_cmd(\"twine upload dist/*\")\n execute_cmd(\"rm -rf build dist rfhub2.egg-info\")\n\n\ndef should_publish() -> bool:\n return os.getenv(\"TRAVIS_BRANCH\", \"\") == \"master\" and os.getenv(\"TRAVIS_PULL_REQUEST\", \"\") == \"false\"\n\n\nif __name__ == \"__main__\":\n local_version = parse_version(version)\n print(f\"Found local version: {local_version}\")\n pypi_version = parse_version(get_pypi_version())\n print(f\"Found PyPI version: {pypi_version}\")\n if local_version > pypi_version and should_publish():\n publish_to_pypi()\n else:\n print(\"Local version is not greater than PyPI version, publishing skipped\")\n", "id": "5207425", "language": "Python", "matching_score": 0.41195622086524963, "max_stars_count": 0, "path": "publish_pypi.py" }, { "content": "import argparse\n\nparser = argparse.ArgumentParser(\n prog=\"script_for_system_exit\", description=\"I should trigger SystemExit\"\n)\nparser.add_argument(\"-c\", \"--conf\", required=True)\nargs = parser.parse_args()\n", "id": "6670927", "language": "Python", "matching_score": 1.4002379179000854, "max_stars_count": 0, "path": "tests/fixtures/initial/arg_parse.py" }, { "content": "# Copyright 2008-2015 Nokia Networks\n# Copyright 2016- Robot Framework Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ..lexer import Token\nfrom ..model import TestCase, Keyword, For, If\n\n\nclass Parser:\n \"\"\"Base class for parsers.\"\"\"\n\n def __init__(self, model):\n self.model = model\n\n def handles(self, statement):\n raise NotImplementedError\n\n def parse(self, statement):\n raise NotImplementedError\n\n\nclass BlockParser(Parser):\n unhandled_tokens = Token.HEADER_TOKENS | frozenset((Token.TESTCASE_NAME,\n Token.KEYWORD_NAME))\n\n def __init__(self, model):\n Parser.__init__(self, model)\n self.nested_parsers = {Token.FOR: ForParser, Token.IF: IfParser}\n\n def handles(self, statement):\n return statement.type not in self.unhandled_tokens\n\n def parse(self, statement):\n parser_class = self.nested_parsers.get(statement.type)\n if parser_class:\n parser = parser_class(statement)\n self.model.body.append(parser.model)\n return parser\n self.model.body.append(statement)\n return None\n\n\nclass TestCaseParser(BlockParser):\n\n def __init__(self, header):\n BlockParser.__init__(self, TestCase(header))\n\n\nclass KeywordParser(BlockParser):\n\n def __init__(self, header):\n BlockParser.__init__(self, Keyword(header))\n\n\nclass NestedBlockParser(BlockParser):\n\n def handles(self, statement):\n return BlockParser.handles(self, statement) and not self.model.end\n\n def parse(self, statement):\n if statement.type == Token.END:\n self.model.end = statement\n return None\n return BlockParser.parse(self, statement)\n\n\nclass ForParser(NestedBlockParser):\n\n def __init__(self, header):\n NestedBlockParser.__init__(self, For(header))\n\n\nclass IfParser(NestedBlockParser):\n\n def __init__(self, header):\n NestedBlockParser.__init__(self, If(header))\n\n def parse(self, statement):\n if statement.type in (Token.ELSE_IF, Token.ELSE):\n parser = OrElseParser(statement)\n self.model.orelse = parser.model\n return parser\n return NestedBlockParser.parse(self, statement)\n\n\nclass OrElseParser(IfParser):\n\n def handles(self, statement):\n return IfParser.handles(self, statement) and statement.type != Token.END\n", "id": "10240519", "language": "Python", "matching_score": 1.6768732070922852, "max_stars_count": 7073, "path": "src/robot/parsing/parser/blockparsers.py" }, { "content": "from rfhub2.db.model.base_class import Base\nfrom rfhub2.db.model.keyword import Keyword\nfrom rfhub2.db.model.collection import Collection\n", "id": "1078739", "language": "Python", "matching_score": 0.22687342762947083, "max_stars_count": 0, "path": "rfhub2/db/base.py" }, { "content": "class Remote:\r\n def remote_dummy_keyword(self):\r\n pass\r\n", "id": "10839914", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/fixtures/initial/remote.py" }, { "content": "from robot.libdocpkg.htmlwriter import DocToHtml\n\n\nclass DocMixin:\n @property\n def synopsis(self) -> str:\n return self.doc.splitlines()[0] if self.doc else \"\"\n\n @property\n def html_doc(self) -> str:\n return DocToHtml(\"ROBOT\")(self.doc) if self.doc else \"\"\n", "id": "80647", "language": "Python", "matching_score": 1.011670708656311, "max_stars_count": 0, "path": "rfhub2/db/model/doc_mixin.py" }, { "content": "from sqlalchemy import Column, Integer, Sequence, Text\nfrom sqlalchemy.orm import relationship\n\nfrom rfhub2.db.model.base_class import Base\nfrom rfhub2.db.model.doc_mixin import DocMixin\n\n\nclass Collection(Base, DocMixin):\n id = Column(Integer, Sequence(\"collection_id_seq\"), primary_key=True)\n name = Column(Text, index=True)\n type = Column(Text)\n version = Column(Text)\n scope = Column(Text)\n named_args = Column(Text)\n path = Column(Text)\n doc = Column(Text)\n doc_format = Column(Text)\n keywords = relationship(\n \"Keyword\",\n backref=\"collection\",\n cascade=\"all, delete-orphan\",\n passive_deletes=True,\n order_by=\"Keyword.name\",\n )\n\n def __str__(self): # pragma: no cover\n return (\n f\"Collection({self.id},{self.name},{self.type},{self.version},\"\n + f\"{self.scope},{self.named_args},{self.path},{self.doc},{self.doc_format})\"\n )\n\n def __repr__(self): # pragma: no cover\n return str(self)\n", "id": "3572374", "language": "Python", "matching_score": 5.126615524291992, "max_stars_count": 0, "path": "rfhub2/db/model/collection.py" }, { "content": "import json\nfrom sqlalchemy import Column, ForeignKey, Integer, Sequence, Text\n\nfrom rfhub2.db.model.base_class import Base\nfrom rfhub2.db.model.doc_mixin import DocMixin\n\n\nclass Keyword(Base, DocMixin):\n id = Column(Integer, Sequence(\"keyword_id_seq\"), primary_key=True)\n name = Column(Text, index=True)\n doc = Column(Text)\n args = Column(Text)\n collection_id = Column(\n Integer, ForeignKey(\"collection.id\", ondelete=\"CASCADE\"), nullable=False\n )\n\n def __str__(self): # pragma: no cover\n return f\"Keyword({self.id},{self.name},{self.doc},{self.args},{self.collection_id})\"\n\n def __repr__(self): # pragma: no cover\n return str(self)\n\n @property\n def arg_string(self) -> str:\n \"\"\"\n Old implementation saves args list as JSON in text field, this is more readable representation for UI\n \"\"\"\n return \", \".join(json.loads(self.args)) if self.args else \"\"\n", "id": "12466089", "language": "Python", "matching_score": 1.2683091163635254, "max_stars_count": 0, "path": "rfhub2/db/model/keyword.py" }, { "content": "from pydantic import BaseModel\nfrom typing import List, Optional\n\n\nclass VersionInfo(BaseModel):\n title: str\n version: str\n\n\nclass Healthcheck(BaseModel):\n db: str\n\n\nclass NestedCollection(BaseModel):\n id: int\n name: str\n\n class Config:\n orm_mode = True\n\n\nclass KeywordUpdate(BaseModel):\n name: str\n doc: Optional[str]\n args: Optional[str]\n\n class Config:\n orm_mode = True\n\n\nclass KeywordCreate(KeywordUpdate):\n collection_id: int\n\n\nclass CollectionUpdate(BaseModel):\n name: str\n type: Optional[str]\n version: Optional[str]\n scope: Optional[str]\n named_args: Optional[str]\n path: Optional[str]\n doc: Optional[str]\n doc_format: Optional[str]\n\n\nclass NestedKeyword(KeywordUpdate):\n id: int\n synopsis: Optional[str]\n html_doc: Optional[str]\n arg_string: Optional[str]\n\n\nclass Collection(NestedCollection, CollectionUpdate):\n keywords: List[NestedKeyword]\n synopsis: Optional[str]\n html_doc: Optional[str]\n\n\nclass Keyword(NestedKeyword):\n collection: NestedCollection\n", "id": "7387204", "language": "Python", "matching_score": 2.992391586303711, "max_stars_count": 0, "path": "rfhub2/model/__init__.py" }, { "content": "import unittest\nfrom starlette.testclient import TestClient\n\nfrom rfhub2 import config\nfrom rfhub2.app import create_app\nfrom rfhub2.db.init_db import init_db\nfrom rfhub2.db.sample_data import recreate_data\nfrom rfhub2.db.session import db_session\n\n\nclass BaseApiEndpointTest(unittest.TestCase):\n\n NESTED_COLLECTION_1 = {\"id\": 1, \"name\": \"First collection\"}\n NESTED_KEYWORD_1 = {\n \"id\": 1,\n \"name\": \"Test setup\",\n \"doc\": \"Prepare test environment, use teardown after this one\",\n \"synopsis\": \"Prepare test environment, use teardown after this one\",\n \"html_doc\": \"<p>Prepare test environment, use teardown after this one</p>\",\n \"args\": None,\n \"arg_string\": \"\",\n }\n NESTED_KEYWORD_2 = {\n \"id\": 2,\n \"name\": \"Some keyword\",\n \"doc\": \"Perform some check\",\n \"synopsis\": \"Perform some check\",\n \"html_doc\": \"<p>Perform some check</p>\",\n \"args\": None,\n \"arg_string\": \"\",\n }\n NESTED_KEYWORD_3 = {\n \"id\": 3,\n \"name\": \"Teardown\",\n \"doc\": \"Clean up environment\",\n \"synopsis\": \"Clean up environment\",\n \"html_doc\": \"<p>Clean up environment</p>\",\n \"args\": None,\n \"arg_string\": \"\",\n }\n COLLECTION_1 = {\n \"id\": 1,\n \"name\": \"First collection\",\n \"type\": \"robot\",\n \"version\": None,\n \"scope\": None,\n \"named_args\": None,\n \"path\": None,\n \"doc\": None,\n \"doc_format\": None,\n \"synopsis\": \"\",\n \"html_doc\": \"\",\n \"keywords\": [NESTED_KEYWORD_2, NESTED_KEYWORD_3, NESTED_KEYWORD_1],\n }\n COLLECTION_2 = {\n \"id\": 2,\n \"name\": \"Second collection\",\n \"type\": \"Robot\",\n \"version\": None,\n \"scope\": None,\n \"named_args\": None,\n \"path\": None,\n \"doc\": None,\n \"doc_format\": None,\n \"synopsis\": \"\",\n \"html_doc\": \"\",\n \"keywords\": [],\n }\n COLLECTION_3 = {\n \"id\": 3,\n \"name\": \"Third\",\n \"type\": \"Library\",\n \"version\": None,\n \"scope\": None,\n \"named_args\": None,\n \"path\": None,\n \"doc\": None,\n \"doc_format\": None,\n \"synopsis\": \"\",\n \"html_doc\": \"\",\n \"keywords\": [],\n }\n KEYWORD_1 = {\"collection\": NESTED_COLLECTION_1, **NESTED_KEYWORD_1}\n KEYWORD_2 = {\"collection\": NESTED_COLLECTION_1, **NESTED_KEYWORD_2}\n KEYWORD_3 = {\"collection\": NESTED_COLLECTION_1, **NESTED_KEYWORD_3}\n KEYWORD_TO_CREATE = {\n \"name\": \"New Keyword\",\n \"doc\": \"New doc\",\n \"args\": None,\n \"collection_id\": COLLECTION_2[\"id\"],\n }\n KEYWORD_CREATED = {\n \"id\": 4,\n \"name\": \"New Keyword\",\n \"doc\": \"New doc\",\n \"synopsis\": \"New doc\",\n \"html_doc\": \"<p>New doc</p>\",\n \"args\": None,\n \"arg_string\": \"\",\n \"collection\": {\"id\": COLLECTION_2[\"id\"], \"name\": COLLECTION_2[\"name\"]},\n }\n KEYWORD_TO_UPDATE = {\n \"name\": \"Updated Teardown\",\n \"doc\": \"Updated Clean up environment\",\n \"synopsis\": \"Updated Clean up environment\",\n \"html_doc\": \"<p>Updated Clean up environment</p>\",\n }\n KEYWORD_UPDATED = {**KEYWORD_3, **KEYWORD_TO_UPDATE}\n COLLECTION_TO_CREATE = {\n \"name\": \"New Resource\",\n \"type\": \"Resource\",\n \"version\": \"1.0.2\",\n \"scope\": None,\n \"named_args\": \"conn_string\",\n \"path\": \"/some/file\",\n \"doc\": \"New Resource collection\",\n \"doc_format\": None,\n }\n COLLECTION_CREATED = {\n **COLLECTION_TO_CREATE,\n \"id\": 4,\n \"synopsis\": \"New Resource collection\",\n \"html_doc\": \"<p>New Resource collection</p>\",\n \"keywords\": [],\n }\n COLLECTION_TO_UPDATE = {\n \"name\": \"Updated collection\",\n \"version\": \"1.0.2-NEW\",\n \"path\": \"/some/file\",\n }\n COLLECTION_UPDATED = {**COLLECTION_3, **COLLECTION_TO_UPDATE}\n\n def setUp(self) -> None:\n self.app = create_app()\n db_session.rollback()\n init_db(db_session)\n recreate_data(db_session)\n self.client: TestClient = TestClient(self.app)\n self.auth_client: TestClient = TestClient(self.app)\n self.auth_client.auth = (config.BASIC_AUTH_USER, config.BASIC_AUTH_PASSWORD)\n", "id": "12350702", "language": "Python", "matching_score": 2.931844472885132, "max_stars_count": 0, "path": "tests/unit/api/endpoints/base_endpoint_tests.py" }, { "content": "import unittest\n\nfrom rfhub2.db.base import Collection, Keyword\nfrom rfhub2.db.init_db import init_db\nfrom rfhub2.db.repository.collection_repository import CollectionRepository\nfrom rfhub2.db.repository.keyword_repository import KeywordRepository\nfrom rfhub2.db.session import db_session\n\n\nclass BaseRepositoryTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n init_db(db_session)\n\n def setUp(self) -> None:\n db_session.rollback()\n db_session.query(Keyword).delete()\n db_session.query(Collection).delete()\n self.collection_repo = CollectionRepository(db_session)\n self.keyword_repo = KeywordRepository(db_session)\n self.keywords = [\n Keyword(\n name=\"Test setup\",\n doc=\"Prepare test environment, use teardown after this one\",\n ),\n Keyword(name=\"Login keyword\", doc=\"Perform some check\"),\n Keyword(name=\"Teardown\", doc=\"Clean up environment\"),\n ]\n self.app_keyword = Keyword(name=\"Login to Application\")\n\n self.collections = [\n Collection(name=\"First collection\", type=\"robot\", keywords=self.keywords),\n Collection(\n name=\"Second collection\", type=\"Robot\", keywords=[self.app_keyword]\n ),\n Collection(name=\"Third\", type=\"Library\"),\n ]\n self.sorted_keywords = sorted(\n self.keywords + [self.app_keyword], key=lambda k: k.name\n )\n db_session.add_all(self.collections)\n db_session.commit()\n for item in self.collections:\n db_session.refresh(item)\n\n def tearDown(self):\n db_session.expunge_all()\n", "id": "3233065", "language": "Python", "matching_score": 4.141103267669678, "max_stars_count": 0, "path": "tests/unit/db/base_repo_tests.py" }, { "content": "from sqlalchemy.orm.session import Session\n\nfrom rfhub2.db.base import Collection, Keyword\n\n\ndef recreate_data(session: Session) -> None:\n session.query(Keyword).delete()\n session.query(Collection).delete()\n keywords = [\n Keyword(\n name=\"Test setup\",\n doc=\"Prepare test environment, use teardown after this one\",\n ),\n Keyword(name=\"Some keyword\", doc=\"Perform some check\"),\n Keyword(name=\"Teardown\", doc=\"Clean up environment\"),\n ]\n collections = [\n Collection(name=\"First collection\", type=\"robot\", keywords=keywords),\n Collection(name=\"Second collection\", type=\"Robot\"),\n Collection(name=\"Third\", type=\"Library\"),\n ]\n session.add_all(collections)\n session.commit()\n", "id": "4122660", "language": "Python", "matching_score": 1.165908932685852, "max_stars_count": 0, "path": "rfhub2/db/sample_data.py" }, { "content": "from pathlib import Path\nimport re\nfrom robot.errors import DataError\nfrom robot.libdocpkg import LibraryDocumentation\nimport robot.libraries\nfrom typing import Dict, List, Set, Tuple\n\nfrom .api_client import Client\n\n\nRESOURCE_PATTERNS = {\".robot\", \".txt\", \".tsv\", \".resource\"}\nALL_PATTERNS = RESOURCE_PATTERNS | {\".xml\", \".py\"}\nEXCLUDED_LIBRARIES = {\n \"remote.py\",\n \"reserved.py\",\n \"dialogs.py\",\n \"dialogs_jy.py\",\n \"dialogs_py.py\",\n \"dialogs_ipy.py\",\n \"setup.py\",\n}\nINIT_FILES = {\"__init__.txt\", \"__init__.robot\", \"__init__.html\", \"__init__.tsv\"}\n\n\nclass RfhubImporter(object):\n def __init__(\n self,\n client: Client,\n paths: Tuple[Path, ...],\n no_installed_keywords: bool,\n mode: str,\n ) -> None:\n self.paths = paths\n self.no_installed_keywords = no_installed_keywords\n self.mode = mode\n self.client = client\n\n def delete_all_collections(self) -> Set[int]:\n \"\"\"\n Deletes all existing collections.\n \"\"\"\n collections_id = set()\n while len(self.client.get_collections()) > 0:\n collections_id.update(self._delete_collections())\n return collections_id\n\n def get_all_collections(self) -> List[Dict]:\n \"\"\"Gets all collections from application\"\"\"\n collections = []\n for i in range(0, 999999, 100):\n collection_slice = self.client.get_collections(i)\n if len(collection_slice) == 0:\n break\n collections += collection_slice\n return collections\n\n def _delete_collections(self) -> Set[int]:\n \"\"\"\n Helper method to delete all existing callections.\n \"\"\"\n collections = self.client.get_collections()\n collections_id = {collection[\"id\"] for collection in collections}\n for id in collections_id:\n self.client.delete_collection(id)\n return collections_id\n\n def import_libraries(self) -> Tuple[int, int]:\n \"\"\"\n Import libraries to application from paths specified when invoking client.\n :return: Number of libraries loaded\n \"\"\"\n libraries_paths = self.get_libraries_paths()\n collections = self.create_collections(libraries_paths)\n if self.mode == \"append\":\n loaded_collections = self.add_collections(collections)\n elif self.mode == \"insert\":\n self.delete_all_collections()\n loaded_collections = self.add_collections(collections)\n else:\n existing_collections = self.get_all_collections()\n loaded_collections = self.update_collections(\n existing_collections, collections\n )\n self.delete_outdated_collections(existing_collections, collections)\n return len(loaded_collections), sum(d[\"keywords\"] for d in loaded_collections)\n\n def get_libraries_paths(self) -> Set[Path]:\n \"\"\"\n Traverses all given paths and returns set with paths\n pointing to libraries to import to app.\n :return: Set of Paths object pointing to libraries to import\n \"\"\"\n libraries_paths = set()\n for path in self.paths:\n libraries_paths.update(self._traverse_paths(Path(path)))\n if not self.no_installed_keywords:\n libdir = Path(robot.libraries.__file__).parent\n libraries_paths.update(self._traverse_paths(Path(libdir)))\n return libraries_paths\n\n def _traverse_paths(self, path: Path) -> Set[Path]:\n \"\"\"\n Traverses through paths and adds libraries to rfhub.\n Helper function for get_library_paths.\n \"\"\"\n valid_lib_paths = set()\n if self._is_library_with_init(path):\n valid_lib_paths.add(path)\n else:\n for item in path.iterdir():\n if item.is_dir():\n if self._is_library_with_init(item):\n valid_lib_paths.add(item)\n else:\n valid_lib_paths.update(self._traverse_paths(item))\n elif (\n item.is_file()\n and self._is_robot_keyword_file(item)\n and not self._should_ignore(item)\n ):\n valid_lib_paths.add(item)\n return valid_lib_paths\n\n def create_collections(self, paths: Set[Path]) -> List[Dict]:\n \"\"\"\n Creates list of Collection objects from set of provided paths.\n :param paths: set of paths\n :return: list of Collection objects\n \"\"\"\n collections = []\n for path in paths:\n try:\n collection = self.create_collection(path)\n collections.append(collection)\n except (DataError, SystemExit) as ex:\n print(\n f\"Failed to create collection from path {path}\\n\"\n f\"{type(ex).__name__}, {ex.args}\"\n )\n return collections\n\n def create_collection(self, path: Path) -> Dict:\n \"\"\"\n Creates Collection object from provided path.\n :param path: Path\n :return: Collection object\n \"\"\"\n libdoc = LibraryDocumentation(str(path))\n serialised_keywords = self._serialise_keywords(libdoc)\n return self._serialise_libdoc(libdoc, str(path), serialised_keywords)\n\n def update_collections(\n self, existing_collections: List[Dict], new_collections: List[Dict]\n ) -> List[Dict[str, int]]:\n \"\"\"\n Updates collections already existing in app.\n :param existing_collections: List of existing collections object\n :param new_collections: List of new collections object\n :return: list of dictionaries with collection name and number of keywords.\n \"\"\"\n collections_to_update = self._get_collections_to_update(\n existing_collections, new_collections\n )\n collections_to_insert = self._get_new_collections(\n existing_collections, new_collections\n )\n return self.add_collections(collections_to_update + collections_to_insert)\n\n def delete_outdated_collections(\n self, existing_collections: List[Dict], new_collections: List[Dict]\n ) -> None:\n \"\"\"Deletes outdated collections\"\"\"\n collections_to_delete = self._get_outdated_collections_ids(\n existing_collections, new_collections\n ) | self._get_obsolete_collections_ids(existing_collections, new_collections)\n for collection in collections_to_delete:\n self.client.delete_collection(collection)\n return collections_to_delete\n\n def add_collections(self, collections: List[Dict]) -> List[Dict[str, int]]:\n \"\"\"\n Adds collections and keywords from provided list to app.\n :param collections: List of collections object\n :return: list of dictionaries with collection name and number of keywords.\n \"\"\"\n loaded_collections = []\n for collection in collections:\n coll_req = self.client.add_collection(collection)\n if coll_req[0] != 201:\n print(coll_req[1][\"detail\"])\n raise StopIteration\n collection_id = coll_req[1][\"id\"]\n for keyword in collection[\"keywords\"]:\n keyword[\"collection_id\"] = collection_id\n self.client.add_keyword(keyword)\n loaded_collections.append(\n {\"name\": collection[\"name\"], \"keywords\": len(collection[\"keywords\"])}\n )\n print(\n f'{collection[\"name\"]} library with {len(collection[\"keywords\"])} keywords loaded.'\n )\n return loaded_collections\n\n def _serialise_libdoc(self, libdoc: Dict, path: str, keywords: Dict) -> Dict:\n \"\"\"\n Serialises libdoc object to Collection object.\n :param libdoc: libdoc input object\n :param path: library path\n :return: Collection object\n \"\"\"\n\n lib_dict = libdoc.__dict__\n lib_dict[\"doc_format\"] = lib_dict.pop(\"_setter__doc_format\")\n for key in (\"_setter__keywords\", \"inits\", \"named_args\"):\n lib_dict.pop(key)\n lib_dict[\"path\"] = path\n lib_dict[\"keywords\"] = keywords\n return lib_dict\n\n def _serialise_keywords(self, libdoc: Dict) -> Dict:\n \"\"\"\n Serialises keywords to Keyword object.\n :param :libdoc input object\n :return: Keyword object\n \"\"\"\n\n keywords = [keyword.__dict__ for keyword in libdoc.keywords]\n for keyword in keywords:\n keyword.pop(\"tags\")\n if keyword[\"args\"]:\n keyword[\"args\"] = str(\n [\n str(item).replace(\"'\", \"\").replace('\"', \"\")\n for item in keyword[\"args\"]\n ]\n ).replace(\"'\", '\"')\n else:\n keyword[\"args\"] = \"\"\n return keywords\n\n @staticmethod\n def _is_library_with_init(path: Path) -> bool:\n return (path / \"__init__.py\").is_file() and len(\n LibraryDocumentation(str(path)).keywords\n ) > 0\n\n def _is_robot_keyword_file(self, file: Path) -> bool:\n return (\n self._is_library_file(file)\n or self._is_libdoc_file(file)\n or self._is_resource_file(file)\n )\n\n @staticmethod\n def _is_library_file(file: Path) -> bool:\n return file.suffix == \".py\" and file.name != \"__init__.py\"\n\n @staticmethod\n def _is_libdoc_file(file: Path) -> bool:\n \"\"\"Return true if an xml file looks like a libdoc file\"\"\"\n # inefficient since we end up reading the file twice,\n # but it's fast enough for our purposes, and prevents\n # us from doing a full parse of files that are obviously\n # not libdoc files\n if file.suffix == \".xml\":\n with open(file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n # read the first few lines; if we don't see\n # what looks like libdoc data, return false\n data = f.read(200)\n index = data.lower().find(\"<keywordspec \")\n return index > 0\n return False\n\n @staticmethod\n def _should_ignore(file: Path) -> bool:\n \"\"\"Return True if a given library name should be ignored\n This is necessary because not all files we find in the library\n folder are libraries.\n \"\"\"\n filename = file.name.lower()\n return (\n filename.startswith(\"deprecated\")\n or filename.startswith(\"_\")\n or filename in EXCLUDED_LIBRARIES\n )\n\n @staticmethod\n def _is_resource_file(file: Path) -> bool:\n \"\"\"Returns true if the file has a keyword table but not a testcase table.\"\"\"\n # inefficient since we end up reading the file twice,\n # but it's fast enough for our purposes, and prevents\n # us from doing a full parse of files that are obviously\n # not robot files\n\n if file.name not in INIT_FILES and file.suffix in RESOURCE_PATTERNS:\n with open(file, \"r\", encoding=\"utf-8\", errors=\"ignore\") as f:\n data = f.read()\n return not RfhubImporter._has_test_case_table(\n data\n ) and RfhubImporter._has_keyword_table(data)\n return False\n\n @staticmethod\n def _has_keyword_table(data: str) -> bool:\n \"\"\"Returns true if file has keyword or user keyword table\"\"\"\n return (\n re.search(\n r\"^\\*+\\s*((?:User )?Keywords?)\", data, re.MULTILINE | re.IGNORECASE\n )\n is not None\n )\n\n @staticmethod\n def _has_test_case_table(data: str) -> bool:\n \"\"\"Returns true if file has keyword or user keyword table\"\"\"\n return (\n re.search(r\"^\\*+\\s*(Test Cases?)\", data, re.MULTILINE | re.IGNORECASE)\n is not None\n )\n\n @staticmethod\n def _get_obsolete_collections_ids(\n existing_collections: List[Dict], new_collections: List[Dict]\n ) -> Set[int]:\n \"\"\"Returns set of collection ids that were found in application but not in paths\"\"\"\n new_collections_paths = {\n new_collection[\"path\"] for new_collection in new_collections\n }\n return {\n existing_collection[\"id\"]\n for existing_collection in existing_collections\n if existing_collection[\"path\"] not in new_collections_paths\n }\n\n @staticmethod\n def _get_outdated_collections_ids(\n existing_collections: List[Dict], new_collections: List[Dict]\n ) -> Set[int]:\n \"\"\"Returns set of collection ids that were found in application but are outdated\"\"\"\n outdated_collections = set()\n if len(existing_collections) > 0:\n for new_collection in new_collections:\n for existing_collection in existing_collections:\n reduced_collection = RfhubImporter._reduce_collection_items(\n new_collection, existing_collection\n )\n if RfhubImporter._collection_path_and_name_match(\n new_collection, reduced_collection\n ) and RfhubImporter._library_or_resource_changed(\n new_collection, reduced_collection\n ):\n outdated_collections.add(existing_collection[\"id\"])\n return outdated_collections\n\n @staticmethod\n def _get_collections_to_update(\n existing_collections: List[Dict], new_collections: List[Dict]\n ) -> List[Dict]:\n \"\"\"Returns list of collections to update that were found in paths and application\"\"\"\n collections_to_update = []\n if len(existing_collections) >= 0:\n for new_collection in new_collections:\n for existing_collection in existing_collections:\n reduced_collection = RfhubImporter._reduce_collection_items(\n new_collection, existing_collection\n )\n if RfhubImporter._collection_path_and_name_match(\n new_collection, reduced_collection\n ):\n if RfhubImporter._library_or_resource_changed(\n new_collection, reduced_collection\n ):\n collections_to_update.append(new_collection)\n return collections_to_update\n\n @staticmethod\n def _get_new_collections(\n existing_collections: List[Dict], new_collections: List[Dict]\n ) -> List[Dict]:\n \"\"\"Returns list of collections to insert that were found in paths but not in application\"\"\"\n existing_collections_paths = {\n existing_collection[\"path\"] for existing_collection in existing_collections\n }\n return [\n new_collection\n for new_collection in new_collections\n if new_collection[\"path\"] not in existing_collections_paths\n ]\n\n @staticmethod\n def _reduce_collection_items(\n new_collection: Dict, existing_collection: Dict\n ) -> Dict:\n reduced_collection = RfhubImporter._get_reduced_collection(\n new_collection, existing_collection\n )\n reduced_collection[\"keywords\"] = RfhubImporter._get_reduced_keywords(\n new_collection[\"keywords\"], reduced_collection[\"keywords\"]\n )\n return reduced_collection\n\n @staticmethod\n def _get_reduced_collection(\n new_collection: Dict, existing_collection: Dict\n ) -> Dict:\n \"\"\"Returns existing_collection dictionary with key/value pairs reduced to the ones from new_collection\"\"\"\n return {k: existing_collection.get(k) for k in new_collection.keys()}\n\n @staticmethod\n def _get_reduced_keywords(\n new_collection_keywords: List[Dict], existing_collection_keywords: List[Dict]\n ) -> List[Dict]:\n reduced_keywords_list = []\n if min(len(new_collection_keywords), len(existing_collection_keywords)) > 0:\n reduced_keywords_list = [\n {\n k: v\n for k, v in keyword.items()\n if k in new_collection_keywords[0].keys()\n }\n for keyword in existing_collection_keywords\n ]\n return reduced_keywords_list\n\n @staticmethod\n def _collection_path_and_name_match(\n new_collection: Dict, existing_collection: Dict\n ) -> bool:\n return (\n new_collection[\"name\"] == existing_collection[\"name\"]\n and new_collection[\"path\"] == existing_collection[\"path\"]\n )\n\n @staticmethod\n def _library_or_resource_changed(\n new_collection: Dict, existing_collection: Dict\n ) -> bool:\n if new_collection[\"type\"] == \"library\":\n return new_collection[\"version\"] != existing_collection[\"version\"]\n else:\n return (\n any(\n keyword not in new_collection[\"keywords\"]\n for keyword in existing_collection[\"keywords\"]\n )\n or any(\n keyword not in existing_collection[\"keywords\"]\n for keyword in new_collection[\"keywords\"]\n )\n or RfhubImporter._library_or_resource_doc_changed(\n new_collection, existing_collection\n )\n )\n\n @staticmethod\n def _library_or_resource_doc_changed(\n new_collection: Dict, existing_collection: Dict\n ) -> bool:\n \"\"\"Returns true if collection overall documentation has changed.\n Does not check for keywords changes\"\"\"\n return {k: v for k, v in new_collection.items() if k != \"keywords\"} != {\n k: v for k, v in existing_collection.items() if k != \"keywords\"\n }\n", "id": "6766181", "language": "Python", "matching_score": 5.5646891593933105, "max_stars_count": 0, "path": "rfhub2/cli/rfhub_importer.py" }, { "content": "import copy\nimport responses\nimport unittest\nfrom pathlib import Path\nfrom robot.libdocpkg import LibraryDocumentation\nimport robot.libraries\n\nfrom rfhub2.cli.rfhub_importer import RfhubImporter\nfrom rfhub2.cli.api_client import Client\n\nFIXTURE_PATH = Path.cwd() / \"tests\" / \"fixtures\" / \"initial\"\nEXPECTED_LIBDOC = {\n \"doc\": \"Documentation for library ``Test Libdoc File``.\",\n \"doc_format\": \"ROBOT\",\n \"name\": \"Test Libdoc File\",\n \"scope\": \"global\",\n \"type\": \"library\",\n \"version\": \"3.2.0\",\n \"keywords\": [{\"name\": \"Someone Shall Pass\", \"args\": '[\"who\"]', \"doc\": \"\"}],\n}\nEXPECTED_KEYWORDS = [\n {\n \"args\": \"\",\n \"doc\": \"This keyword was imported from file\\n\"\n \"with .resource extension, available since RFWK 3.1\",\n \"name\": \"Keyword 1 Imported From Resource File\",\n },\n {\n \"args\": '[\"arg_1\", \"arg_2\"]',\n \"doc\": \"This keyword was imported from file\\n\"\n \"with .resource extension, available since RFWK 3.1\",\n \"name\": \"Keyword 2 Imported From Resource File\",\n },\n]\nEXPECTED_TRAVERSE_PATHS_INIT = {FIXTURE_PATH / \"LibWithInit\"}\nEXPECTED_TRAVERSE_PATHS_NO_INIT = {\n FIXTURE_PATH / \"LibsWithEmptyInit\" / \"LibWithEmptyInit1.py\",\n FIXTURE_PATH / \"LibsWithEmptyInit\" / \"LibWithEmptyInit2.py\",\n}\nEXPECTED_GET_LIBRARIES = (\n EXPECTED_TRAVERSE_PATHS_INIT\n | EXPECTED_TRAVERSE_PATHS_NO_INIT\n | {\n FIXTURE_PATH / \"SingleClassLib\" / \"SingleClassLib.py\",\n FIXTURE_PATH / \"test_libdoc_file.xml\",\n FIXTURE_PATH / \"test_resource.resource\",\n FIXTURE_PATH / \"test_robot.robot\",\n FIXTURE_PATH / \"arg_parse.py\",\n FIXTURE_PATH / \"data_error.py\",\n }\n)\nEXPECTED_COLLECTION = {\n \"doc\": \"Overview that should be imported for SingleClassLib.\",\n \"doc_format\": \"ROBOT\",\n \"keywords\": [\n {\n \"args\": \"\",\n \"doc\": \"Docstring for single_class_lib_method_1\",\n \"name\": \"Single Class Lib Method 1\",\n },\n {\n \"args\": \"\",\n \"doc\": \"Docstring for single_class_lib_method_2\",\n \"name\": \"Single Class Lib Method 2\",\n },\n {\n \"args\": '[\"param_1\", \"param_2\"]',\n \"doc\": \"Docstring for single_class_lib_method_3 with two params\",\n \"name\": \"Single Class Lib Method 3\",\n },\n ],\n \"name\": \"SingleClassLib\",\n \"path\": str(FIXTURE_PATH / \"SingleClassLib\" / \"SingleClassLib.py\"),\n \"scope\": \"test case\",\n \"type\": \"library\",\n \"version\": \"1.2.3\",\n}\nEXPECTED_COLLECTION2 = {\n \"doc\": \"Documentation for library ``Test Libdoc File``.\",\n \"doc_format\": \"ROBOT\",\n \"keywords\": [{\"args\": '[\"who\"]', \"doc\": \"\", \"name\": \"Someone Shall Pass\"}],\n \"name\": \"Test Libdoc File\",\n \"path\": str(FIXTURE_PATH / \"test_libdoc_file.xml\"),\n \"scope\": \"global\",\n \"type\": \"library\",\n \"version\": \"3.2.0\",\n}\nEXPECTED_ADD_COLLECTIONS = [{\"name\": \"Test Libdoc File\", \"keywords\": 1}]\nEXPECTED_UPDATE_COLLECTIONS = [\n {\"name\": \"a\", \"keywords\": 3},\n {\"name\": \"b\", \"keywords\": 3},\n {\"name\": \"c\", \"keywords\": 3},\n {\"name\": \"e\", \"keywords\": 3},\n]\nKEYWORDS_1 = [\n {\n \"args\": \"\",\n \"doc\": \"Docstring for single_class_lib_method_1\",\n \"name\": \"Single Class Lib Method 1\",\n },\n {\n \"args\": \"\",\n \"doc\": \"Docstring for single_class_lib_method_2\",\n \"name\": \"Single Class Lib Method 2\",\n },\n {\n \"args\": '[\"param_1\", \"param_2\"]',\n \"doc\": \"Docstring for single_class_lib_method_3 with two params\",\n \"name\": \"Single Class Lib Method 3\",\n },\n]\nKEYWORDS_2 = [{\"args\": '[\"who\"]', \"doc\": \"\", \"name\": \"<NAME>\"}]\nKEYWORDS_EXTENDED = [\n {\n \"args\": \"\",\n \"doc\": \"Docstring for single_class_lib_method_1\",\n \"name\": \"Single Class Lib Method 1\",\n \"id\": 15,\n \"synopsis\": \"Docstring for lib_with_empty_init_1_method_1\",\n \"html_doc\": \"<p>Docstring for lib_with_empty_init_1_method_1</p>\",\n \"arg_string\": \"\",\n },\n {\n \"args\": \"\",\n \"doc\": \"Docstring for single_class_lib_method_2\",\n \"name\": \"Single Class Lib Method 2\",\n \"id\": 16,\n \"synopsis\": \"Docstring for lib_with_empty_init_1_method_1\",\n \"html_doc\": \"<p>Docstring for lib_with_empty_init_1_method_1</p>\",\n \"arg_string\": \"\",\n },\n {\n \"args\": '[\"param_1\", \"param_2\"]',\n \"doc\": \"Docstring for single_class_lib_method_3 with two params\",\n \"name\": \"Single Class Lib Method 3\",\n \"id\": 17,\n \"synopsis\": \"Docstring for lib_with_empty_init_1_method_1\",\n \"html_doc\": \"<p>Docstring for lib_with_empty_init_1_method_1</p>\",\n \"arg_string\": \"\",\n },\n]\n\nEXPECTED_BUILT_IN_LIBS = {\n Path(robot.libraries.__file__).parent / \"BuiltIn.py\",\n Path(robot.libraries.__file__).parent / \"Collections.py\",\n Path(robot.libraries.__file__).parent / \"DateTime.py\",\n Path(robot.libraries.__file__).parent / \"Easter.py\",\n Path(robot.libraries.__file__).parent / \"OperatingSystem.py\",\n Path(robot.libraries.__file__).parent / \"Process.py\",\n Path(robot.libraries.__file__).parent / \"Screenshot.py\",\n Path(robot.libraries.__file__).parent / \"String.py\",\n Path(robot.libraries.__file__).parent / \"Telnet.py\",\n Path(robot.libraries.__file__).parent / \"XML.py\",\n}\n\n\nclass RfhubImporterTests(unittest.TestCase):\n def setUp(self) -> None:\n self.fixture_path = FIXTURE_PATH\n self.client = Client(\"http://localhost:8000\", \"rfhub\", \"rfhub\")\n self.rfhub_importer = RfhubImporter(\n self.client, (self.fixture_path,), True, mode=\"insert\"\n )\n\n def test_import_libraries_insert_mode(self):\n with responses.RequestsMock() as rsps:\n rfhub_importer = RfhubImporter(\n self.client, (self.fixture_path / \"LibWithInit\",), True, mode=\"insert\"\n )\n rsps.add(\n responses.GET,\n f\"{self.client.api_url}/collections/\",\n json=[],\n status=200,\n adding_headers={\"Content-Type\": \"application/json\"},\n )\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/collections/\",\n json={\"name\": \"LibWithInit\", \"id\": 2},\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/keywords/\",\n json=KEYWORDS_2,\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n result = rfhub_importer.import_libraries()\n self.assertCountEqual(result, (1, 4), msg=f\"{result}\")\n\n def test_import_libraries_append_mode(self):\n with responses.RequestsMock() as rsps:\n rfhub_importer = RfhubImporter(\n self.client, (self.fixture_path / \"LibWithInit\",), True, mode=\"append\"\n )\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/collections/\",\n json={\"name\": \"LibWithInit\", \"id\": 2},\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/keywords/\",\n json=KEYWORDS_2,\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n result = rfhub_importer.import_libraries()\n self.assertCountEqual(result, (1, 4), msg=f\"{result}\")\n\n def test_import_libraries_update_mode(self):\n with responses.RequestsMock() as rsps:\n rfhub_importer = RfhubImporter(\n self.client, (self.fixture_path / \"LibWithInit\",), True, mode=\"update\"\n )\n rsps.add(\n responses.GET,\n f\"{self.client.api_url}/collections/\",\n json=[],\n status=200,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/collections/\",\n json={\"name\": \"LibWithInit\", \"id\": 2},\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/keywords/\",\n json=KEYWORDS_2,\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n result = rfhub_importer.import_libraries()\n self.assertCountEqual(result, (1, 4), msg=f\"{result}\")\n\n def test_delete_all_collections(self):\n with responses.RequestsMock() as rsps:\n for i in [2, 2, 66, 66]:\n rsps.add(\n responses.GET,\n f\"{self.client.api_url}/collections/\",\n json=[{\"id\": i}],\n status=200,\n adding_headers={\"Content-Type\": \"application/json\"},\n )\n rsps.add(\n responses.DELETE,\n f\"{self.client.api_url}/collections/2/\",\n status=204,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.DELETE,\n f\"{self.client.api_url}/collections/66/\",\n status=204,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.GET,\n f\"{self.client.api_url}/collections/\",\n json=[],\n status=200,\n adding_headers={\"Content-Type\": \"application/json\"},\n )\n result = self.rfhub_importer.delete_all_collections()\n self.assertEqual({2, 66}, result)\n\n def test_delete_collections(self):\n with responses.RequestsMock() as rsps:\n rsps.add(\n responses.GET,\n f\"{self.client.api_url}/collections/\",\n json=[{\"id\": 2}, {\"id\": 66}],\n status=200,\n adding_headers={\"Content-Type\": \"application/json\"},\n )\n rsps.add(\n responses.DELETE,\n f\"{self.client.api_url}/collections/2/\",\n status=204,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.DELETE,\n f\"{self.client.api_url}/collections/66/\",\n status=204,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n result = self.rfhub_importer._delete_collections()\n self.assertEqual({2, 66}, result)\n\n def test_get_all_collections_should_return_all_collections(self):\n with responses.RequestsMock() as rsps:\n for i in (0, 100):\n rsps.add(\n responses.GET,\n f\"{self.client.api_url}/collections/?skip={i}&limit=100\",\n json=[{\"id\": i}],\n status=200,\n adding_headers={\"Content-Type\": \"application/json\"},\n )\n rsps.add(\n responses.GET,\n f\"{self.client.api_url}/collections/?skip=200&limit=100\",\n json=[],\n status=200,\n adding_headers={\"Content-Type\": \"application/json\"},\n )\n result = self.rfhub_importer.get_all_collections()\n self.assertListEqual([{\"id\": 0}, {\"id\": 100}], result)\n\n def test_traverse_paths_should_return_set_of_path_on_lib_with_init(self):\n result = self.rfhub_importer._traverse_paths(self.fixture_path / \"LibWithInit\")\n self.assertEqual(result, EXPECTED_TRAVERSE_PATHS_INIT)\n\n def test_traverse_paths_should_return_set_of_paths_on_libs_with_empty_init(self):\n result = self.rfhub_importer._traverse_paths(\n self.fixture_path / \"LibsWithEmptyInit\"\n )\n self.assertEqual(result, EXPECTED_TRAVERSE_PATHS_NO_INIT)\n\n def test_get_libraries_paths_should_return_set_of_paths(self):\n result = self.rfhub_importer.get_libraries_paths()\n self.assertEqual(result, EXPECTED_GET_LIBRARIES)\n\n def test_get_libraries_paths_should_return_set_of_paths_on_installed_keywords(self):\n self.rfhub_importer = RfhubImporter(self.client, tuple(), False, False)\n result = self.rfhub_importer.get_libraries_paths()\n self.assertEqual(result, EXPECTED_BUILT_IN_LIBS)\n\n def test_get_libraries_paths_should_return_set_of_paths_when_paths_are_tuple(self):\n self.rfhub_importer = RfhubImporter(\n self.client,\n (\n self.fixture_path / \"LibWithInit\",\n self.fixture_path / \"LibsWithEmptyInit\",\n ),\n True,\n False,\n )\n result = self.rfhub_importer.get_libraries_paths()\n self.assertEqual(\n result, EXPECTED_TRAVERSE_PATHS_INIT | EXPECTED_TRAVERSE_PATHS_NO_INIT\n )\n\n def test__create_collections_should_return_collection_list(self):\n result = self.rfhub_importer.create_collections(\n {\n FIXTURE_PATH / \"SingleClassLib\" / \"SingleClassLib.py\",\n FIXTURE_PATH / \"test_libdoc_file.xml\",\n }\n )\n self.assertCountEqual(result, [EXPECTED_COLLECTION, EXPECTED_COLLECTION2])\n\n def test_create_collection_should_return_collection(self):\n result = self.rfhub_importer.create_collection(\n FIXTURE_PATH / \"SingleClassLib\" / \"SingleClassLib.py\"\n )\n self.assertDictEqual(EXPECTED_COLLECTION, result)\n\n def test_create_collections_should_return_empty_list_on_data_error(self):\n result = self.rfhub_importer.create_collections(\n {FIXTURE_PATH / \"data_error.py\"}\n )\n self.assertListEqual([], result)\n\n def test_create_collections_should_return_empty_list_on_system_exit(self):\n result = self.rfhub_importer.create_collections({FIXTURE_PATH / \"arg_parse.py\"})\n self.assertListEqual([], result)\n\n def test_update_collections_should_insert_collections(self):\n existing_collections = [\n {\n \"id\": 1,\n \"path\": \"1\",\n \"type\": \"library\",\n \"version\": \"1\",\n \"name\": \"a\",\n \"keywords\": KEYWORDS_EXTENDED,\n },\n {\n \"id\": 2,\n \"path\": \"2\",\n \"type\": \"library\",\n \"version\": \"2\",\n \"name\": \"b\",\n \"keywords\": KEYWORDS_EXTENDED,\n },\n {\n \"id\": 3,\n \"path\": \"3\",\n \"type\": \"library\",\n \"version\": \"3\",\n \"name\": \"c\",\n \"keywords\": KEYWORDS_EXTENDED,\n },\n {\n \"id\": 4,\n \"path\": \"4\",\n \"type\": \"resource\",\n \"version\": \"\",\n \"name\": \"d\",\n \"keywords\": KEYWORDS_EXTENDED,\n },\n {\n \"id\": 5,\n \"path\": \"5\",\n \"type\": \"resource\",\n \"version\": \"\",\n \"name\": \"e\",\n \"keywords\": KEYWORDS_2,\n },\n ]\n\n new_collections = [\n {\n \"id\": 1,\n \"path\": \"1\",\n \"type\": \"library\",\n \"version\": \"2\",\n \"name\": \"a\",\n \"keywords\": KEYWORDS_1,\n },\n {\n \"id\": 2,\n \"path\": \"2\",\n \"type\": \"library\",\n \"version\": \"3\",\n \"name\": \"b\",\n \"keywords\": KEYWORDS_1,\n },\n {\n \"id\": 3,\n \"path\": \"3\",\n \"type\": \"library\",\n \"version\": \"4\",\n \"name\": \"c\",\n \"keywords\": KEYWORDS_1,\n },\n {\n \"id\": 4,\n \"path\": \"4\",\n \"type\": \"resource\",\n \"version\": \"\",\n \"name\": \"d\",\n \"keywords\": KEYWORDS_1,\n },\n {\n \"id\": 5,\n \"path\": \"5\",\n \"type\": \"resource\",\n \"version\": \"\",\n \"name\": \"e\",\n \"keywords\": KEYWORDS_1,\n },\n ]\n with responses.RequestsMock() as rsps:\n for i in range(1, 5):\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/collections/\",\n json=new_collections[0],\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n for j in range(1, 4):\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/keywords/\",\n json=new_collections[0][\"keywords\"][0],\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n result = self.rfhub_importer.update_collections(\n existing_collections, new_collections\n )\n self.assertCountEqual(EXPECTED_UPDATE_COLLECTIONS, result)\n\n def test_delete_outdated_collections_should_delete_outdated_collections(self):\n existing_collections = [\n {\n \"id\": 1,\n \"path\": \"1\",\n \"type\": \"library\",\n \"version\": \"1\",\n \"name\": \"a\",\n \"keywords\": [],\n },\n {\n \"id\": 2,\n \"path\": \"2\",\n \"type\": \"library\",\n \"version\": \"2\",\n \"name\": \"b\",\n \"keywords\": [],\n },\n {\n \"id\": 3,\n \"path\": \"3\",\n \"type\": \"library\",\n \"version\": \"3\",\n \"name\": \"c\",\n \"keywords\": [],\n },\n ]\n\n new_collections = [\n {\n \"id\": 1,\n \"path\": \"1\",\n \"type\": \"library\",\n \"version\": \"2\",\n \"name\": \"a\",\n \"keywords\": [],\n },\n {\n \"id\": 2,\n \"path\": \"2\",\n \"type\": \"library\",\n \"version\": \"3\",\n \"name\": \"b\",\n \"keywords\": [],\n },\n {\n \"id\": 3,\n \"path\": \"3\",\n \"type\": \"library\",\n \"version\": \"4\",\n \"name\": \"c\",\n \"keywords\": [],\n },\n ]\n with responses.RequestsMock() as rsps:\n for i in range(1, 4):\n rsps.add(\n responses.DELETE,\n f\"{self.client.api_url}/collections/{i}/\",\n status=204,\n adding_headers={\"accept\": \"application/json\"},\n )\n result = self.rfhub_importer.delete_outdated_collections(\n existing_collections, new_collections\n )\n self.assertSetEqual({1, 2, 3}, result)\n\n def test_add_collections_should_return_loaded_collections_and_keywords_number(self):\n with responses.RequestsMock() as rsps:\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/collections/\",\n json={\"name\": EXPECTED_COLLECTION2[\"name\"], \"id\": 1},\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/keywords/\",\n json=EXPECTED_COLLECTION2[\"keywords\"][0],\n status=201,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n result = self.rfhub_importer.add_collections([EXPECTED_COLLECTION2])\n self.assertCountEqual(result, EXPECTED_ADD_COLLECTIONS)\n\n def test_add_collections_should_exit_when_unauthorized(self):\n with self.assertRaises(StopIteration) as cm:\n with responses.RequestsMock() as rsps:\n rsps.add(\n responses.POST,\n f\"{self.client.api_url}/collections/\",\n json={\"detail\": \"Unauthorized to perform this action\"},\n status=401,\n adding_headers={\n \"Content-Type\": \"application/json\",\n \"accept\": \"application/json\",\n },\n )\n self.rfhub_importer.add_collections([EXPECTED_COLLECTION2])\n\n def test_is_library_with_init_should_return_true_on_library_with_init(self):\n file = self.fixture_path / \"LibWithInit\"\n result = self.rfhub_importer._is_library_with_init(file)\n self.assertTrue(\n result, \"method should return true if file is python library with init\"\n )\n\n def test_is_library_with_init_should_return_false_on_library_without_init(self):\n file = self.fixture_path / \"LibsWithEmptyInit\"\n result = self.rfhub_importer._is_library_with_init(file)\n self.assertFalse(\n result, \"method should return false if file is python library without init\"\n )\n\n def test_is_robot_keyword_file_should_return_true_on_library(self):\n file = self.fixture_path / \"SingleClassLib\" / \"SingleClassLib.py\"\n result = self.rfhub_importer._is_robot_keyword_file(file)\n self.assertTrue(result, \"method should return true if file is python library\")\n\n def test_is_robot_keyword_file_should_return_true_on_libdoc(self):\n file = self.fixture_path / \"test_libdoc_file.xml\"\n result = self.rfhub_importer._is_robot_keyword_file(file)\n self.assertTrue(result, \"method should return true if file is libdoc file\")\n\n def test_is_robot_keyword_file_should_return_true_on_resource(self):\n file = self.fixture_path / \"test_resource.resource\"\n result = self.rfhub_importer._is_robot_keyword_file(file)\n self.assertTrue(result, \"method should return true if file is robot resource\")\n\n def test_is_library_file_should_return_false_on_lib_with_init(self):\n file = self.fixture_path / \"LibWithInit\" / \"__init__.py\"\n result = RfhubImporter._is_library_file(file)\n self.assertFalse(result, \"method should return true if file is python library\")\n\n def test_is_library_file_should_return_false_on_library_with_init(self):\n file = self.fixture_path / \"LibWithInit\" / \"__init__.py\"\n result = RfhubImporter._is_library_file(file)\n self.assertFalse(\n result, \"method should return false if file is python library with init\"\n )\n\n def test_is_libdoc_file_should_return_true_on_libdoc(self):\n file = self.fixture_path / \"test_libdoc_file.xml\"\n result = RfhubImporter._is_libdoc_file(file)\n self.assertTrue(result, \"method should return true if file is libdoc file\")\n\n def test_is_libdoc_file_should_return_false_on_non_libdoc(self):\n file = self.fixture_path / \"not_libdoc_file.xml\"\n result = RfhubImporter._is_libdoc_file(file)\n self.assertFalse(\n result, \"method should return false if file is not libdoc file\"\n )\n\n def test_is_libdoc_file_should_return_false_on_non_xml(self):\n file = self.fixture_path / \"_private_library.py\"\n result = RfhubImporter._is_libdoc_file(file)\n self.assertFalse(\n result, \"method should return false if file is not libdoc file\"\n )\n\n def test_should_ignore_should_return_true_on_deprecated(self):\n file = self.fixture_path / \"deprecated_library.py\"\n result = RfhubImporter._should_ignore(file)\n self.assertTrue(\n result, 'method should return true if file starts with \"deprecated\"'\n )\n\n def test_should_ignore_should_return_true_on_private(self):\n file = self.fixture_path / \"_private_library.py\"\n result = RfhubImporter._should_ignore(file)\n self.assertTrue(result, 'method should return true if file starts with \"_\"')\n\n def test_should_ignore_should_return_true_on_excluded(self):\n file = self.fixture_path / \"remote.py\"\n result = RfhubImporter._should_ignore(file)\n self.assertTrue(\n result, \"method should return true if file in EXCLUDED_LIBRARIES\"\n )\n\n def test_should_ignore_should_return_false_on_library_to_import(self):\n file = self.fixture_path / \"SingleClassLib\" / \"SingleClassLib.py\"\n result = RfhubImporter._should_ignore(file)\n self.assertFalse(\n result, \"method should return false if file should be imported\"\n )\n\n def test_is_resource_file_should_return_true(self):\n file = self.fixture_path / \"test_resource.resource\"\n result = RfhubImporter._is_resource_file(file)\n self.assertTrue(result, \"method should return true if file is resource file\")\n\n def test_is_resource_file_should_return_false(self):\n file = self.fixture_path / \"test_file_with_tests.robot\"\n result = RfhubImporter._is_resource_file(file)\n self.assertFalse(\n result, \"method should return false if file is not resource file\"\n )\n\n def test_is_resource_file_should_return_false_on_init(self):\n file = self.fixture_path / \"__init__.robot\"\n result = RfhubImporter._is_resource_file(file)\n self.assertFalse(\n result, \"method should return false if file is not resource file\"\n )\n\n def test_has_keyword_table_should_return_true(self):\n data = \"*** Keywords ***\"\n result = RfhubImporter._has_keyword_table(data=data)\n self.assertTrue(result, \"method should return true if Keywords were found\")\n\n def test_has_keyword_table_should_return_false(self):\n data = \"*** Keys ***\"\n result = RfhubImporter._has_keyword_table(data=data)\n self.assertFalse(\n result, \"method should return false if Keywords were not found\"\n )\n\n def test_has_test_case_table_should_return_true(self):\n data = \"*** Test Case ***\"\n result = RfhubImporter._has_test_case_table(data=data)\n self.assertTrue(result, \"method should return true if Test Case were found\")\n\n def test_has_test_case_table_should_return_false(self):\n data = \"*** Test ***\"\n result = RfhubImporter._has_test_case_table(data=data)\n self.assertFalse(\n result, \"method should return false if Test Case were not found\"\n )\n\n def test_serialise_libdoc_should_return_collection(self):\n file = self.fixture_path / \"test_libdoc_file.xml\"\n libdoc = LibraryDocumentation(file)\n serialised_keywords = self.rfhub_importer._serialise_keywords(libdoc)\n serialised_libdoc = self.rfhub_importer._serialise_libdoc(\n libdoc, file, serialised_keywords\n )\n serialised_libdoc.pop(\"path\")\n self.assertEqual(serialised_libdoc, EXPECTED_LIBDOC)\n\n def test_serialise_keywords_should_return_keywords(self):\n file = self.fixture_path / \"test_resource.resource\"\n libdoc = LibraryDocumentation(file)\n serialised_keywords = self.rfhub_importer._serialise_keywords(libdoc)\n self.assertEqual(serialised_keywords, EXPECTED_KEYWORDS)\n\n def test_collection_path_and_name_match_should_return_true_when_matched(self):\n result = RfhubImporter._collection_path_and_name_match(\n EXPECTED_COLLECTION, EXPECTED_COLLECTION\n )\n self.assertTrue(result)\n\n def test_collection_path_and_name_match_should_return_false_when_not_matched(self):\n result = RfhubImporter._collection_path_and_name_match(\n EXPECTED_COLLECTION, EXPECTED_COLLECTION2\n )\n self.assertFalse(result)\n\n def test_get_collections_to_update_should_return_collections_to_update(self):\n existing_collections = [EXPECTED_COLLECTION, EXPECTED_COLLECTION2]\n new_collections = copy.deepcopy(existing_collections)\n new_collections[0][\"version\"] = \"1.2.4\"\n new_collections[1][\"version\"] = \"3.3.0\"\n result = RfhubImporter._get_collections_to_update(\n existing_collections, new_collections\n )\n self.assertListEqual(new_collections, result)\n\n def test_get_new_collections_should_return_only_new_collections(self):\n exisitng_collections = [EXPECTED_COLLECTION]\n new_collections = [EXPECTED_COLLECTION, EXPECTED_COLLECTION2]\n result = RfhubImporter._get_new_collections(\n exisitng_collections, new_collections\n )\n self.assertListEqual([EXPECTED_COLLECTION2], result)\n\n def test_reduce_collection_items_should_return_reduced_collection(self):\n collection2 = copy.deepcopy(EXPECTED_COLLECTION)\n EXPECTED_COLLECTION[\"id\"] = 1\n EXPECTED_COLLECTION[\"keywords\"] = KEYWORDS_EXTENDED\n result = RfhubImporter._reduce_collection_items(\n collection2, EXPECTED_COLLECTION\n )\n self.assertDictEqual(collection2, result)\n\n def test_get_reduced_collection_should_return_reduced_collection(self):\n collection2 = copy.deepcopy(EXPECTED_COLLECTION2)\n collection2[\"id\"] = 1\n result = RfhubImporter._get_reduced_collection(\n EXPECTED_COLLECTION2, collection2\n )\n self.assertDictEqual(EXPECTED_COLLECTION2, result)\n\n def test_get_reduced_keywords_should_return_reduced_keywords(self):\n result = RfhubImporter._get_reduced_keywords(KEYWORDS_1, KEYWORDS_EXTENDED)\n self.assertListEqual(KEYWORDS_1, result)\n\n def test_library_or_resource_changed_should_return_false_when_library_unchanged(\n self\n ):\n result = RfhubImporter._library_or_resource_changed(\n EXPECTED_COLLECTION, EXPECTED_COLLECTION\n )\n self.assertFalse(result)\n\n def test_library_or_resource_changed_should_return_true_when_library_changed(self):\n collection2 = copy.deepcopy(EXPECTED_COLLECTION)\n collection2[\"version\"] = \"1.2.4\"\n result = RfhubImporter._library_or_resource_changed(\n EXPECTED_COLLECTION, collection2\n )\n self.assertTrue(result)\n\n def test_library_or_resource_changed_should_return_true_when_resource_unchanged(\n self\n ):\n EXPECTED_COLLECTION[\"type\"] = \"resource\"\n collection2 = copy.deepcopy(EXPECTED_COLLECTION)\n result = RfhubImporter._library_or_resource_changed(\n EXPECTED_COLLECTION, collection2\n )\n self.assertFalse(result)\n\n def test_library_or_resource_changed_should_return_true_when_resource_changed(self):\n EXPECTED_COLLECTION[\"type\"] = \"resource\"\n collection2 = copy.deepcopy(EXPECTED_COLLECTION)\n collection2[\"doc\"] = \"abc\"\n result = RfhubImporter._library_or_resource_changed(\n EXPECTED_COLLECTION, collection2\n )\n self.assertTrue(result)\n\n def test_library_or_resource_doc_changed_should_return_true_when_resource_changed(\n self\n ):\n EXPECTED_COLLECTION[\"type\"] = \"resource\"\n collection2 = copy.deepcopy(EXPECTED_COLLECTION)\n collection2[\"doc\"] = \"abc\"\n result = RfhubImporter._library_or_resource_changed(\n EXPECTED_COLLECTION, collection2\n )\n self.assertTrue(result)\n", "id": "6043301", "language": "Python", "matching_score": 3.784372568130493, "max_stars_count": 0, "path": "tests/unit/cli/rfhub_importer.py" }, { "content": "import unittest\n\nfrom rfhub2.db.base import Keyword\n\n\nclass KeywordTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.keyword = Keyword(\n name=\"My keyword\",\n doc=\"Keyword description\\n\\nFurther description\",\n args='[\"path\", \"arg1\"]',\n collection_id=1,\n )\n cls.empty_keyword = Keyword(name=\"Empty keyword\", collection_id=1)\n\n def test_should_get_arg_string(self):\n self.assertEqual(self.keyword.arg_string, \"path, arg1\")\n\n def test_should_get_empty_string_when_keyword_has_no_args(self):\n self.assertEqual(self.empty_keyword.arg_string, \"\")\n\n def test_should_get_keyword_synopsis(self):\n self.assertEqual(self.keyword.synopsis, \"Keyword description\")\n\n def test_should_get_empty_synopsis_when_keyword_has_no_doc(self):\n self.assertEqual(self.empty_keyword.synopsis, \"\")\n\n def test_should_get_keyword_html_doc(self):\n self.assertEqual(\n self.keyword.html_doc,\n \"<p>Keyword description</p>\\n<p>Further description</p>\",\n )\n\n def test_should_get_empty_html_doc_when_keyword_has_no_doc(self):\n self.assertEqual(self.empty_keyword.html_doc, \"\")\n", "id": "11232545", "language": "Python", "matching_score": 4.347089767456055, "max_stars_count": 0, "path": "tests/unit/db/keyword_tests.py" }, { "content": "import unittest\n\nfrom rfhub2.db.base import Collection\n\n\nclass CollectionTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.collection = Collection(\n name=\"My collection\", doc=\"Collection description\\n\\nFurther description\"\n )\n cls.empty_collection = Collection(name=\"Empty collection\")\n\n def test_should_get_collection_synopsis(self):\n self.assertEqual(self.collection.synopsis, \"Collection description\")\n\n def test_should_get_empty_synopsis_when_collection_has_no_doc(self):\n self.assertEqual(self.empty_collection.synopsis, \"\")\n\n def test_should_get_collection_html_doc(self):\n self.assertEqual(\n self.collection.html_doc,\n \"<p>Collection description</p>\\n<p>Further description</p>\",\n )\n\n def test_should_get_empty_html_doc_when_collection_has_no_doc(self):\n self.assertEqual(self.empty_collection.html_doc, \"\")\n", "id": "3268382", "language": "Python", "matching_score": 1.4728902578353882, "max_stars_count": 0, "path": "tests/unit/db/collection_tests.py" }, { "content": "import unittest\n\nfrom rfhub2.ui.search_params import SearchParams\n\n\nclass SearchParamsTest(unittest.TestCase):\n def test_should_return_default_search_params_for_invalid_input_value(self):\n for case in (None, \"\", \"*\"):\n with self.subTest(case=case):\n result: SearchParams = SearchParams(case)\n self.assertEqual(\n (result.pattern, result.collection_name, result.use_doc),\n SearchParams.DEFAULT,\n )\n self.assertEqual(result.raw_pattern, case)\n\n def test_should_extract_search_params_for_valid_input_value(self):\n test_data = [\n (\"keyword\", \"keyword\", None, True),\n (\"keywordin\", \"keywordin\", None, True),\n (\"keywordin:\", \"keywordin:\", None, True),\n (\"keyword in:\", \"keyword in:\", None, True),\n (\"keyword In:lib\", \"keyword\", \"lib\", True),\n (\"keyword in: lib\", \"keyword\", \"lib\", True),\n (\"keyword in: My Lib\", \"keyword\", \"my lib\", True),\n (\"name:keywordin:\", \"keywordin:\", None, False),\n (\"name: keyword in: lib\", \"keyword\", \"lib\", False),\n ]\n for value, pattern, col_name, use_doc in test_data:\n with self.subTest(pattern=value):\n result: SearchParams = SearchParams(value)\n self.assertEqual(\n (result.pattern, result.collection_name, result.use_doc),\n (pattern, col_name, use_doc),\n )\n self.assertEqual(result.raw_pattern, value)\n", "id": "10501414", "language": "Python", "matching_score": 2.754931688308716, "max_stars_count": 0, "path": "tests/unit/ui/search_params_tests.py" }, { "content": "from typing import Optional, Tuple\n\nIN_TOKEN = \" in:\"\nNAME_TOKEN = \"name:\"\nWILDCARD = \"*\"\n\n\nclass SearchParams:\n\n DEFAULT = (WILDCARD, None, True)\n\n def __init__(self, pattern: str = WILDCARD) -> None:\n if not pattern or pattern == WILDCARD:\n result = self.DEFAULT\n else:\n result = self.extract_params(pattern)\n self.raw_pattern = pattern\n self.pattern, self.collection_name, self.use_doc = result\n\n @staticmethod\n def extract_params(raw_pattern: str) -> Tuple[str, Optional[str], bool]:\n pattern, collection_name, use_doc = raw_pattern.strip().lower(), None, True\n if pattern.startswith(NAME_TOKEN):\n pattern = pattern[5:].strip()\n use_doc = False\n query, sep, col_name = pattern.partition(IN_TOKEN)\n if sep == IN_TOKEN and col_name:\n pattern = query.strip()\n collection_name = col_name.strip()\n return pattern, collection_name, use_doc\n", "id": "1005970", "language": "Python", "matching_score": 0.9071720838546753, "max_stars_count": 0, "path": "rfhub2/ui/search_params.py" }, { "content": "from typing import List, Optional\n\nfrom sqlalchemy import or_\nfrom sqlalchemy.orm import selectinload\nfrom sqlalchemy.orm.query import Query\nfrom sqlalchemy.sql.elements import BinaryExpression\n\nfrom rfhub2.db.base import Collection, Keyword\nfrom rfhub2.db.repository.base_repository import BaseRepository\nfrom rfhub2.db.repository.query_utils import glob_to_sql\n\n\nclass KeywordRepository(BaseRepository):\n @property\n def _items(self) -> Query:\n return self.session.query(Keyword).options(selectinload(Keyword.collection))\n\n def _id_filter(self, item_id: int) -> BinaryExpression:\n return Keyword.id == item_id\n\n def get_all(\n self,\n *,\n pattern: Optional[str] = None,\n collection_name: Optional[str] = None,\n use_doc: bool = True,\n skip: int = 0,\n limit: int = 100,\n ) -> List[Keyword]:\n filter_criteria = []\n if pattern:\n filter_criteria.append(Keyword.name.ilike(glob_to_sql(pattern)))\n if use_doc:\n filter_criteria = [\n or_(filter_criteria[0], Keyword.doc.ilike(glob_to_sql(pattern)))\n ]\n if collection_name:\n filter_criteria.append(Collection.name.ilike(glob_to_sql(collection_name)))\n return (\n self.session.query(Keyword)\n .join(Keyword.collection)\n .filter(*filter_criteria)\n .order_by(Keyword.name)\n .offset(skip)\n .limit(limit)\n .all()\n )\n", "id": "6311180", "language": "Python", "matching_score": 5.265579700469971, "max_stars_count": 0, "path": "rfhub2/db/repository/keyword_repository.py" }, { "content": "from typing import List, Optional\n\nfrom sqlalchemy.orm import selectinload\nfrom sqlalchemy.orm.query import Query\nfrom sqlalchemy.sql.elements import BinaryExpression\n\nfrom rfhub2.db.base import Collection\nfrom rfhub2.db.repository.base_repository import BaseRepository\nfrom rfhub2.db.repository.query_utils import glob_to_sql\n\n\nclass CollectionRepository(BaseRepository):\n @property\n def _items(self) -> Query:\n return self.session.query(Collection).options(selectinload(Collection.keywords))\n\n def _id_filter(self, item_id: int) -> BinaryExpression:\n return Collection.id == item_id\n\n def get_all(\n self,\n *,\n pattern: Optional[str] = None,\n libtype: Optional[str] = None,\n skip: int = 0,\n limit: int = 100,\n ) -> List[Collection]:\n filter_criteria = []\n if pattern:\n filter_criteria.append(Collection.name.ilike(glob_to_sql(pattern)))\n if libtype:\n filter_criteria.append(Collection.type.ilike(glob_to_sql(libtype)))\n return (\n self._items.filter(*filter_criteria)\n .order_by(Collection.name)\n .offset(skip)\n .limit(limit)\n .all()\n )\n", "id": "12336720", "language": "Python", "matching_score": 2.405290365219116, "max_stars_count": 0, "path": "rfhub2/db/repository/collection_repository.py" }, { "content": "from typing import Generic, Optional, TypeVar\n\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy.orm.query import Query\nfrom sqlalchemy.orm.session import Session\nfrom sqlalchemy.sql.elements import BinaryExpression\n\nT = TypeVar(\"T\")\n\n\nclass BaseRepository(Generic[T]):\n def __init__(self, db_session: Session):\n self.session = db_session\n\n @property\n def _items(self) -> Query: # pragma: no cover\n raise NotImplementedError\n\n def _id_filter(self, item_id: int) -> BinaryExpression: # pragma: no cover\n raise NotImplementedError\n\n def add(self, item: T) -> T:\n self.session.add(item)\n self.session.commit()\n self.session.refresh(item)\n return item\n\n def delete(self, item_id: int) -> int:\n row_count = self._items.filter(self._id_filter(item_id)).delete()\n self.session.commit()\n return row_count\n\n def get(self, item_id: int) -> Optional[T]:\n return self._items.get(item_id)\n\n def update(self, item: T, update_data: dict):\n item_data = jsonable_encoder(item)\n for field in item_data:\n if field in update_data:\n setattr(item, field, update_data[field])\n return self.add(item)\n", "id": "11253889", "language": "Python", "matching_score": 1.1535998582839966, "max_stars_count": 0, "path": "rfhub2/db/repository/base_repository.py" }, { "content": "from sqlalchemy import create_engine, event\nfrom sqlalchemy.engine import Engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nimport sqlite3\n\nfrom rfhub2 import config\n\n\ndef create_sqlalchemy_engine(db_uri: str) -> Engine:\n if db_uri.startswith(\"sqlite://\"):\n engine_kwargs = {\"connect_args\": {\"check_same_thread\": False}}\n else:\n engine_kwargs = {}\n return create_engine(\n config.SQLALCHEMY_DB_URI, pool_pre_ping=True, echo=False, **engine_kwargs\n )\n\n\[email protected]_for(Engine, \"connect\")\ndef set_sqlite_fk_pragma(db_api_connection, _):\n \"\"\"\n Setting foreign keys pragma is required to enable on delete cascade behavior\n for foreign key fields which is by default disabled\n \"\"\"\n if isinstance(db_api_connection, sqlite3.Connection):\n cursor = db_api_connection.cursor()\n cursor.execute(\"PRAGMA foreign_keys=ON;\")\n cursor.close()\n\n\nengine = create_sqlalchemy_engine(config.SQLALCHEMY_DB_URI)\n\ndb_session = scoped_session(\n sessionmaker(autocommit=False, autoflush=False, bind=engine)\n)\nSession = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n", "id": "8982839", "language": "Python", "matching_score": 1.8425006866455078, "max_stars_count": 0, "path": "rfhub2/db/session.py" }, { "content": "from sqlalchemy.orm.session import Session\n\nfrom rfhub2.db.base import Base\n\n\ndef init_db(session: Session) -> None:\n Base.metadata.create_all(bind=session.bind)\n", "id": "7307061", "language": "Python", "matching_score": 1.5903284549713135, "max_stars_count": 0, "path": "rfhub2/db/init_db.py" }, { "content": "from rfhub2.app import create_app\nfrom rfhub2.db.init_db import init_db\nfrom rfhub2.db.session import db_session\n\ninit_db(db_session)\napp = create_app()\n", "id": "8910085", "language": "Python", "matching_score": 0.21340130269527435, "max_stars_count": 0, "path": "rfhub2/main.py" }, { "content": "class LibWithInit2(object):\n \"\"\"\n Overview that should not be imported for LibWithInit2.\n \"\"\"\n\n def __init__(self):\n self.b = None\n\n def lib_with_init_2_method_1(self):\n \"\"\"Docstring for lib_with_init_2_method_1\"\"\"\n pass\n\n def lib_with_init_2_method_2(self):\n \"\"\"Docstring for lib_with_init_2_method_2\"\"\"\n pass\n", "id": "10218311", "language": "Python", "matching_score": 2.7162063121795654, "max_stars_count": 0, "path": "tests/fixtures/updated/LibWithInit/LibWithInit2.py" }, { "content": "from LibWithInit.LibWithInit1 import LibWithInit1\nfrom LibWithInit.LibWithInit2 import LibWithInit2\n\n\nclass LibWithInit(LibWithInit1, LibWithInit2):\n \"\"\"This is a docstring that should be imported as overview\"\"\"\n\n ROBOT_LIBRARY_SCOPE = \"GLOBAL\"\n __version__ = \"6.6.6\"\n", "id": "5071056", "language": "Python", "matching_score": 2.015410900115967, "max_stars_count": 0, "path": "tests/fixtures/updated/LibWithInit/__init__.py" }, { "content": "class LibWithEmptyInit1(object):\n \"\"\"\n Overview that should be imported for LibWithEmptyInit1.\n \"\"\"\n\n __version__ = \"2.1.0\"\n\n def __init__(self):\n self.b = None\n\n def lib_with_empty_init_1_method_1(self):\n \"\"\"Docstring for lib_with_empty_init_1_method_1\"\"\"\n pass\n\n def lib_with_empty_init_1_method_2(self):\n \"\"\"Docstring for lib_with_empty_init_1_method_2\"\"\"\n pass\n", "id": "9450186", "language": "Python", "matching_score": 3.273587226867676, "max_stars_count": 0, "path": "tests/fixtures/updated/LibsWithEmptyInit/LibWithEmptyInit1.py" }, { "content": "class LibWithEmptyInit2(object):\n \"\"\"\n Overview that should be imported for LibWithEmptyInit2.\n \"\"\"\n\n __version__ = \"1.0.0\"\n\n def __init__(self):\n self.a = None\n\n def lib_with_empty_init_2_method_1(self):\n \"\"\"Docstring for lib_with_empty_init_2_method_1\"\"\"\n pass\n\n def lib_with_empty_init_2_method_2(self):\n \"\"\"Docstring for lib_with_empty_init_2_method_2\"\"\"\n pass\n", "id": "7639870", "language": "Python", "matching_score": 2.790658950805664, "max_stars_count": 0, "path": "tests/fixtures/updated/LibsWithEmptyInit/LibWithEmptyInit2.py" }, { "content": "class LibWithInit1(object):\n \"\"\"\n Overview that should not be imported for LibWithInit1.\n \"\"\"\n\n def __init__(self):\n self.a = None\n\n def lib_with_init_1_method_1(self):\n \"\"\"Docstring for lib_with_init_1_method_1\"\"\"\n pass\n\n def lib_with_init_1_method_2(self):\n \"\"\"Docstring for lib_with_init_1_method_2\"\"\"\n pass\n", "id": "3522425", "language": "Python", "matching_score": 1.1163580417633057, "max_stars_count": 0, "path": "tests/fixtures/updated/LibWithInit/LibWithInit1.py" }, { "content": "class SingleClassLib(object):\n \"\"\"\n Overview that should be imported for SingleClassLib.\n \"\"\"\n\n __version__ = \"1.2.3\"\n\n def __init__(self):\n self.b = None\n\n def single_class_lib_method_1(self):\n \"\"\"Docstring for single_class_lib_method_1\"\"\"\n pass\n\n def single_class_lib_method_2(self):\n \"\"\"Docstring for single_class_lib_method_2\"\"\"\n pass\n\n def single_class_lib_method_3(self, param_1, param_2):\n \"\"\"Docstring for single_class_lib_method_3 with two params\"\"\"\n pass\n\n\nclass SingleClassLibThatShouldNotBeImported(object):\n \"\"\"\n Overview that should not be imported for SingleClassLibThatShouldNotBeImported.\n \"\"\"\n\n def __init__(self):\n self.b = None\n\n def single_class_lib_that_should_not_be_imported_method_1(self):\n \"\"\"Docstring for single_class_lib_that_should_not_be_imported_method_1\"\"\"\n pass\n\n def single_class_lib_that_should_not_be_imported_method_2(self):\n \"\"\"Docstring for single_class_lib_that_should_not_be_imported_method_2\"\"\"\n pass\n", "id": "7041934", "language": "Python", "matching_score": 4.367588996887207, "max_stars_count": 0, "path": "tests/fixtures/initial/SingleClassLib/SingleClassLib.py" }, { "content": "class SingleClassLib(object):\n \"\"\"\n Overview that should be imported for SingleClassLib.\n \"\"\"\n\n __version__ = \"1.2.8\"\n\n def __init__(self):\n self.b = None\n\n def single_class_lib_method_1(self):\n \"\"\"Docstring for single_class_lib_method_1\"\"\"\n pass\n\n def single_class_lib_method_2(self):\n \"\"\"Docstring for single_class_lib_method_2\"\"\"\n pass\n\n def single_class_lib_method_4(self, param_1, param_2):\n \"\"\"Docstring for single_class_lib_method_4 with two params\"\"\"\n pass\n\n def single_class_lib_method_5(self, param_1, param_2):\n \"\"\"Docstring for single_class_lib_method_5 with two params\"\"\"\n pass\n\n\nclass SingleClassLibThatShouldNotBeImported(object):\n \"\"\"\n Overview that should not be imported for SingleClassLibThatShouldNotBeImported.\n \"\"\"\n\n def __init__(self):\n self.b = None\n\n def single_class_lib_that_should_not_be_imported_method_1(self):\n \"\"\"Docstring for single_class_lib_that_should_not_be_imported_method_1\"\"\"\n pass\n\n def single_class_lib_that_should_not_be_imported_method_2(self):\n \"\"\"Docstring for single_class_lib_that_should_not_be_imported_method_2\"\"\"\n pass\n", "id": "3570962", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/fixtures/updated/SingleClassLib/SingleClassLib.py" }, { "content": "class DeprecatedLibrary:\r\n def deprecated_keyword(self):\r\n pass\r\n", "id": "4999257", "language": "Python", "matching_score": 0.823369026184082, "max_stars_count": 0, "path": "tests/fixtures/initial/deprecated_library.py" }, { "content": "class PrivateLibrary:\r\n def private_keyword(self):\r\n pass\r\n", "id": "4170858", "language": "Python", "matching_score": 0.7697879672050476, "max_stars_count": 0, "path": "tests/fixtures/initial/_private_library.py" } ]
1.777782
kba
[ { "content": "import json\nfrom pkg_resources import resource_string\n\n\ndef get_ocrd_tool():\n return json.loads(\n resource_string(__name__, 'ocrd-tool.json').decode('utf8'))\n", "id": "12442631", "language": "Python", "matching_score": 2.638640880584717, "max_stars_count": 26, "path": "align/ocrd_tool.py" }, { "content": "import json\nfrom pkg_resources import resource_string\n\n__all__ = ['OCRD_TOOL']\n\nOCRD_TOOL = json.loads(resource_string(__name__, 'ocrd-tool.json').decode('utf8'))\n", "id": "12120653", "language": "Python", "matching_score": 0.034051164984703064, "max_stars_count": 25, "path": "ocrd_anybaseocr/constants.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport ctypes\nimport os\nimport sys\nfrom tempfile import NamedTemporaryFile\n\nimport numpy as np\n\n\nfrom .bindings.lsd_ctypes import lsdlib\n\ndef lsd(src, scale=0.8, sigma_scale=0.6, quant=2.0, ang_th=22.5, eps=0.0, density_th=0.7, n_bins=1024, max_grad=255.0):\n \"\"\"Analyse image with Line Segment Detector.\n\n Args:\n src (Numpy object) : 2-d grayscale image array (HxW) to analyse.\n\n Keyword Args:\n scale (double) : Scale the image by Gaussian filter.\n sigma_scale (double) : Sigma for Gaussian filter is computed as sigma = sigma_scale/scale.\n quant (double) : Bound to the quantization error on the gradient norm.\n ang_th (double) : Gradient angle tolerance in degrees.\n eps (double) : Detection threshold, -log10(NFA).\n density_th (double) : Minimal density of region points in rectangle.\n n_bins (int) : Number of bins in pseudo-ordering of gradient modulus.\n max_grad (double) : Gradient modulus in the highest bin. The default value corresponds to the highest gradient modulus on images with gray levels in [0,255].\n\n Returns:\n A list of line candidates as 5-tuples of (x1, y1, x2, y2, width).\n \"\"\"\n rows, cols = src.shape\n src = src.reshape(1, rows * cols).tolist()[0]\n\n lens = len(src)\n src = (ctypes.c_double * lens)(*src)\n\n with NamedTemporaryFile(prefix='pylsd-', suffix='.ntl.txt', delete=False) as fp:\n fname = fp.name\n fname_bytes = bytes(fp.name) if sys.version_info < (3, 0) else bytes(fp.name, 'utf8')\n\n lsdlib.lsdGet(src, ctypes.c_int(rows), ctypes.c_int(cols), fname_bytes,\n ctypes.c_double(scale),\n ctypes.c_double(sigma_scale),\n ctypes.c_double(quant),\n ctypes.c_double(ang_th),\n ctypes.c_double(eps),\n ctypes.c_double(density_th),\n ctypes.c_int(n_bins),\n ctypes.c_double(max_grad))\n\n with open(fname, 'r') as fp:\n output = fp.read()\n cnt = output.strip().split(' ')\n count = int(cnt[0])\n dim = int(cnt[1])\n lines = np.array([float(each) for each in cnt[2:]])\n lines = lines.reshape(count, dim)\n\n os.remove(fname)\n return lines\n", "id": "10324175", "language": "Python", "matching_score": 1.3973369598388672, "max_stars_count": 1, "path": "pylsd/lsd.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\nfrom PIL import Image, ImageDraw\nimport numpy as np\nfrom pylsd.lsd import lsd\n\nfullName = 'house.png'\nfolder, imgName = os.path.split(fullName)\nimg = Image.open(fullName)\ngray = np.asarray(img.convert('L'))\nlines = lsd(gray)\ndraw = ImageDraw.Draw(img)\nfor i in range(lines.shape[0]):\n pt1 = (int(lines[i, 0]), int(lines[i, 1]))\n pt2 = (int(lines[i, 2]), int(lines[i, 3]))\n width = lines[i, 4]\n draw.line((pt1, pt2), fill=(0, 0, 255), width=int(np.ceil(width / 2)))\nimg.save(os.path.join(folder, 'PIL_' + imgName.split('.')[0] + '.jpg'))\n", "id": "1895468", "language": "Python", "matching_score": 0.2942705750465393, "max_stars_count": 1, "path": "example/example_PIL.py" }, { "content": "# ====================================================================\n# ====================================\n# README file for Binarize component\n# ====================================\n\n#Filename : ocrd-anyBaseOCR-binarize.py\n\n# Author: <NAME>, <NAME>, Md. <NAME>\n# Responsible: <NAME>, <NAME>, Md. <NAME>\n# Contact Email: <EMAIL>, <EMAIL>, <EMAIL>\n# Note:\n# 1) this work has been done in DFKI, Kaiserslautern, Germany.\n# 2) The parameters values are read from ocrd-anyBaseOCR-parameter.json file. The values can be changed in that file.\n# 3) The command line IO usage is based on \"OCR-D\" project guidelines (https://ocr-d.github.io/). A sample image file (samples/becker_quaestio_1586_00013.tif) and mets.xml (work_dir/mets.xml) are provided. The sequence of operations is: binarization, deskewing, cropping and dewarping (or can also be: binarization, dewarping, deskewing, and cropping; depends upon use-case).\n\n# *********** LICENSE ********************\n# License: ocropus-nlbin.py (from https://github.com/tmbdev/ocropy/) contains both functionalities: binarization and skew correction.\n# This method (ocrd-anyBaseOCR-binarize.py) only contains the binarization functionality of ocropus-nlbin.py.\n# It still has the same licenses as ocropus-nlbin, i.e Apache 2.0. ((the ocropy license details are pasted below).\n# This file is dependend on ocrolib library which comes from https://github.com/tmbdev/ocropy/.\n\n# Copyright 2014 <NAME>\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# *********** LICENSE ********************\n# =====================================================================\n#!/usr/bin/env python\n\n\nimport argparse\nimport json\nimport ocrolib\nimport os\nimport os.path\nimport sys\n\nfrom pylab import amin, amax, mean, ginput, ones, clip, imshow, median, ion, gray, minimum, array, clf\nfrom scipy.ndimage import filters, interpolation, morphology\nfrom scipy import stats\nimport numpy as np\n\nfrom ..utils import parseXML, write_to_xml, print_info, parse_params_with_defaults, print_error\nfrom ..constants import OCRD_TOOL\n\nclass OcrdAnybaseocrBinarizer():\n\n def __init__(self, param):\n self.param = param\n\n def check_page(self, image):\n if len(image.shape) == 3:\n return \"input image is color image %s\" % (image.shape,)\n if mean(image) < median(image):\n return \"image may be inverted\"\n h, w = image.shape\n if h < 600:\n return \"image not tall enough for a page image %s\" % (image.shape,)\n if h > 10000:\n return \"image too tall for a page image %s\" % (image.shape,)\n if w < 600:\n return \"image too narrow for a page image %s\" % (image.shape,)\n if w > 10000:\n return \"line too wide for a page image %s\" % (image.shape,)\n return None\n\n\n def dshow(self, image, info):\n if self.param['debug'] <= 0:\n return\n ion()\n gray()\n imshow(image)\n title(info)\n ginput(1, self.param['debug'])\n\n def run(self, fname, i):\n print_info(\"# %s\" % (fname))\n print_info(\"=== %s %-3d\" % (fname, i))\n raw = ocrolib.read_image_gray(fname)\n self.dshow(raw, \"input\")\n # perform image normalization\n image = raw-amin(raw)\n if amax(image) == amin(image):\n print_info(\"# image is empty: %s\" % (fname))\n return\n image /= amax(image)\n\n if not self.param['nocheck']:\n check = self.check_page(amax(image)-image)\n if check is not None:\n print_error(fname+\" SKIPPED. \"+check +\n \" (use -n to disable this check)\")\n return\n\n # check whether the image is already effectively binarized\n if self.param['gray']:\n extreme = 0\n else:\n extreme = (np.sum(image < 0.05) + np.sum(image > 0.95)) * 1.0 / np.prod(image.shape)\n if extreme > 0.95:\n comment = \"no-normalization\"\n flat = image\n else:\n comment = \"\"\n # if not, we need to flatten it by estimating the local whitelevel\n print_info(\"flattening\")\n m = interpolation.zoom(image, self.param['zoom'])\n m = filters.percentile_filter(m, self.param['perc'], size=(self.param['range'], 2))\n m = filters.percentile_filter(m, self.param['perc'], size=(2, self.param['range']))\n m = interpolation.zoom(m, 1.0/self.param['zoom'])\n if self.param['debug'] > 0:\n clf()\n imshow(m, vmin=0, vmax=1)\n ginput(1, self.param['debug'])\n w, h = minimum(array(image.shape), array(m.shape))\n flat = clip(image[:w, :h]-m[:w, :h]+1, 0, 1)\n if self.param['debug'] > 0:\n clf()\n imshow(flat, vmin=0, vmax=1)\n ginput(1, self.param['debug'])\n\n # estimate low and high thresholds\n print_info(\"estimating thresholds\")\n d0, d1 = flat.shape\n o0, o1 = int(self.param['bignore']*d0), int(self.param['bignore']*d1)\n est = flat[o0:d0-o0, o1:d1-o1]\n if self.param['escale'] > 0:\n # by default, we use only regions that contain\n # significant variance; this makes the percentile\n # based low and high estimates more reliable\n e = self.param['escale']\n v = est-filters.gaussian_filter(est, e*20.0)\n v = filters.gaussian_filter(v**2, e*20.0)**0.5\n v = (v > 0.3*amax(v))\n v = morphology.binary_dilation(v, structure=ones((int(e*50), 1)))\n v = morphology.binary_dilation(v, structure=ones((1, int(e*50))))\n if self.param['debug'] > 0:\n imshow(v)\n ginput(1, self.param['debug'])\n est = est[v]\n lo = stats.scoreatpercentile(est.ravel(), self.param['lo'])\n hi = stats.scoreatpercentile(est.ravel(), self.param['hi'])\n # rescale the image to get the gray scale image\n print_info(\"rescaling\")\n flat -= lo\n flat /= (hi-lo)\n flat = clip(flat, 0, 1)\n if self.param['debug'] > 0:\n imshow(flat, vmin=0, vmax=1)\n ginput(1, self.param['debug'])\n binarized = 1*(flat > self.param['threshold'])\n\n # output the normalized grayscale and the thresholded images\n #print_info(\"%s lo-hi (%.2f %.2f) angle %4.1f %s\" % (fname, lo, hi, angle, comment))\n print_info(\"%s lo-hi (%.2f %.2f) %s\" % (fname, lo, hi, comment))\n print_info(\"writing\")\n if self.param['debug'] > 0 or self.param['show']:\n clf()\n gray()\n imshow(binarized)\n ginput(1, max(0.1, self.param['debug']))\n base, _ = ocrolib.allsplitext(fname)\n ocrolib.write_image_binary(base+\".bin.png\", binarized)\n ocrolib.write_image_gray(base+\".nrm.png\", flat)\n # print(\"########### File path : \", base+\".nrm.png\")\n # write_to_xml(base+\".bin.png\")\n return base+\".bin.png\"\n\n\ndef main():\n parser = argparse.ArgumentParser(\"\"\"\n Image binarization using non-linear processing.\n\n python ocrd-anyBaseOCR-binarize.py -m (mets input file path) -I (input-file-grp name) -O (output-file-grp name) -w (Working directory)\n\n This is a compute-intensive binarization method that works on degraded\n and historical book pages.\n \"\"\")\n\n parser.add_argument('-p', '--parameter', type=str, help=\"Parameter file location\")\n parser.add_argument('-w', '--work', type=str, help=\"Working directory location\", default=\".\")\n parser.add_argument('-I', '--Input', default=None, help=\"Input directory\")\n parser.add_argument('-O', '--Output', default=None, help=\"output directory\")\n parser.add_argument('-m', '--mets', default=None, help=\"METs input file\")\n parser.add_argument('-o', '--OutputMets', default=None, help=\"METs output file\")\n parser.add_argument('-g', '--group', default=None, help=\"METs image group id\")\n args = parser.parse_args()\n\n # Read parameter values from json file\n param = {}\n if args.parameter:\n with open(args.parameter, 'r') as param_file:\n param = json.loads(param_file.read())\n param = parse_params_with_defaults(param, OCRD_TOOL['tools']['ocrd-anybaseocr-binarize']['parameters'])\n # print(param)\n # End to read parameters\n\n # mandatory parameter check\n if not args.mets or not args.Input or not args.Output or not args.work:\n parser.print_help()\n print(\"Example: python ocrd-anyBaseOCR-binarize.py -m (mets input file path) -I (input-file-grp name) -O (output-file-grp name) -w (Working directory)\")\n sys.exit(0)\n\n if args.work:\n if not os.path.exists(args.work):\n os.mkdir(args.work)\n\n binarizer = OcrdAnybaseocrBinarizer(param)\n files = parseXML(args.mets, args.Input)\n fnames = []\n for i, fname in enumerate(files):\n fnames.append(binarizer.run(str(fname), i+1))\n write_to_xml(fnames, args.mets, args.Output, args.OutputMets, args.work)\n", "id": "4677957", "language": "Python", "matching_score": 9.44491195678711, "max_stars_count": 0, "path": "ocrd_anybaseocr/cli/binarize.py" }, { "content": "# ======================================================================\n# ====================================\n# README file for Skew Correction component\n# ====================================\n\n# Filename : ocrd-anyBaseOCR-deskew.py\n\n# Author: <NAME>, <NAME>, Md. <NAME>\n# Responsible: <NAME>, <NAME>, Md. <NAME>\n# Contact Email: <EMAIL>, <EMAIL>, <EMAIL>\n# Note:\n# 1) this work has been done in DFKI, Kaiserslautern, Germany.\n# 2) The parameters values are read from ocrd-anyBaseOCR-parameter.json file. The values can be changed in that file.\n# 3) The command line IO usage is based on \"OCR-D\" project guidelines (https://ocr-d.github.io/). A sample image file (samples/becker_quaestio_1586_00013.tif) and mets.xml (work_dir/mets.xml) are provided. The sequence of operations is: binarization, deskewing, cropping and dewarping (or can also be: binarization, dewarping, deskewing, and cropping; depends upon use-case).\n\n# *********** Method Behaviour ********************\n# This function takes a document image as input and do the skew correction of that document.\n# *********** Method Behaviour ********************\n\n# *********** LICENSE ********************\n# License: ocropus-nlbin.py (from https://github.com/tmbdev/ocropy/) contains both functionalities: binarization and skew correction.\n# This method (ocrd-anyBaseOCR-deskew.py) only contains the skew correction functionality of ocropus-nlbin.py.\n# It still has the same licenses as ocropus-nlbin, i.e Apache 2.0 (the ocropy license details are pasted below).\n# This file is dependend on ocrolib library which comes from https://github.com/tmbdev/ocropy/.\n\n# Copyright 2014 <NAME>\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# *********** LICENSE ********************\n# ======================================================================\n#!/usr/bin/env python\n\n\nimport argparse\nimport os\nimport os.path\nimport sys\nimport json\n\nfrom pylab import amin, amax, linspace, mean, var, plot, ginput, ones, clip, imshow\nfrom scipy.ndimage import filters, interpolation, morphology\nfrom scipy import stats\nimport ocrolib\nfrom ..utils import parseXML, write_to_xml, print_info, parse_params_with_defaults\nfrom ..constants import OCRD_TOOL\n\nclass OcrdAnybaseocrDeskewer():\n\n def __init__(self, param):\n self.param = param\n\n def estimate_skew_angle(self, image, angles):\n param = self.param\n estimates = []\n\n for a in angles:\n v = mean(interpolation.rotate(image, a, order=0, mode='constant'), axis=1)\n v = var(v)\n estimates.append((v, a))\n if param['debug'] > 0:\n plot([y for x, y in estimates], [x for x, y in estimates])\n ginput(1, param['debug'])\n _, a = max(estimates)\n return a\n\n\n def run(self, fpath, job):\n param = self.param\n base, _ = ocrolib.allsplitext(fpath)\n basefile = ocrolib.allsplitext(os.path.basename(fpath))[0]\n\n if param['parallel'] < 2:\n print_info(\"=== %s %-3d\" % (fpath, job))\n raw = ocrolib.read_image_gray(fpath)\n\n flat = raw\n # estimate skew angle and rotate\n if param['maxskew'] > 0:\n if param['parallel'] < 2:\n print_info(\"estimating skew angle\")\n d0, d1 = flat.shape\n o0, o1 = int(param['bignore']*d0), int(param['bignore']*d1)\n flat = amax(flat)-flat\n flat -= amin(flat)\n est = flat[o0:d0-o0, o1:d1-o1]\n ma = param['maxskew']\n ms = int(2*param['maxskew']*param['skewsteps'])\n angle = self.estimate_skew_angle(est, linspace(-ma, ma, ms+1))\n flat = interpolation.rotate(flat, angle, mode='constant', reshape=0)\n flat = amax(flat)-flat\n else:\n angle = 0\n\n # estimate low and high thresholds\n if param['parallel'] < 2:\n print_info(\"estimating thresholds\")\n d0, d1 = flat.shape\n o0, o1 = int(param['bignore']*d0), int(param['bignore']*d1)\n est = flat[o0:d0-o0, o1:d1-o1]\n if param['escale'] > 0:\n # by default, we use only regions that contain\n # significant variance; this makes the percentile\n # based low and high estimates more reliable\n e = param['escale']\n v = est-filters.gaussian_filter(est, e*20.0)\n v = filters.gaussian_filter(v**2, e*20.0)**0.5\n v = (v > 0.3*amax(v))\n v = morphology.binary_dilation(v, structure=ones((int(e*50), 1)))\n v = morphology.binary_dilation(v, structure=ones((1, int(e*50))))\n if param['debug'] > 0:\n imshow(v)\n ginput(1, param['debug'])\n est = est[v]\n lo = stats.scoreatpercentile(est.ravel(), param['lo'])\n hi = stats.scoreatpercentile(est.ravel(), param['hi'])\n # rescale the image to get the gray scale image\n if param['parallel'] < 2:\n print_info(\"rescaling\")\n flat -= lo\n flat /= (hi-lo)\n flat = clip(flat, 0, 1)\n if param['debug'] > 0:\n imshow(flat, vmin=0, vmax=1)\n ginput(1, param['debug'])\n deskewed = 1*(flat > param['threshold'])\n\n # output the normalized grayscale and the thresholded images\n print_info(\"%s lo-hi (%.2f %.2f) angle %4.1f\" % (basefile, lo, hi, angle))\n if param['parallel'] < 2:\n print_info(\"writing\")\n ocrolib.write_image_binary(base+\".ds.png\", deskewed)\n return base+\".ds.png\"\n\n\ndef main():\n parser = argparse.ArgumentParser(\"\"\"\n Image deskewing using non-linear processing.\n\n python ocrd-anyBaseOCR-deskew.py -m (mets input file path) -I (input-file-grp name) -O (output-file-grp name) -w (Working directory)\n\n This is a compute-intensive deskew method that works on degraded and historical book pages.\n \"\"\")\n\n parser.add_argument('-p', '--parameter', type=str, help=\"Parameter file location\")\n parser.add_argument('-O', '--Output', default=None, help=\"output directory\")\n parser.add_argument('-w', '--work', type=str, help=\"Working directory location\", default=\".\")\n parser.add_argument('-I', '--Input', default=None, help=\"Input directory\")\n parser.add_argument('-m', '--mets', default=None, help=\"METs input file\")\n parser.add_argument('-o', '--OutputMets', default=None, help=\"METs output file\")\n parser.add_argument('-g', '--group', default=None, help=\"METs image group id\")\n\n args = parser.parse_args()\n\n #args.files = ocrolib.glob_all(args.files)\n\n # Read parameter values from json file\n param = {}\n if args.parameter:\n with open(args.parameter, 'r') as param_file:\n param = json.loads(param_file.read())\n param = parse_params_with_defaults(param, OCRD_TOOL['tools']['ocrd-anybaseocr-deskew']['parameters'])\n print(\"%s\" % param)\n # End to read parameters\n\n # mendatory parameter check\n if not args.mets or not args.Input or not args.Output or not args.work:\n parser.print_help()\n print(\"Example: ocrd-anybaseocr-deskew -m (mets input file path) -I (input-file-grp name) -O (output-file-grp name) -w (Working directory)\")\n sys.exit(0)\n\n if args.work:\n if not os.path.exists(args.work):\n os.mkdir(args.work)\n\n deskewer = OcrdAnybaseocrDeskewer(param)\n files = parseXML(args.mets, args.Input)\n fnames = []\n for i, fname in enumerate(files):\n fnames.append(deskewer.run(str(fname), i+1))\n write_to_xml(fnames, args.mets, args.Output, args.OutputMets, args.work)\n", "id": "12250339", "language": "Python", "matching_score": 1.40524160861969, "max_stars_count": 0, "path": "ocrd_anybaseocr/cli/deskew.py" }, { "content": "# -*- coding: utf-8 -*-\n# taken from keras/examples/recurrent_attention_machine_translation.py\n# (keras PR #11421 by andhus [<NAME>])\nfrom __future__ import print_function\n\nfrom keras import backend as K\nfrom keras import initializers, regularizers, constraints\nfrom keras.engine.base_layer import _collect_previous_mask\nfrom keras.layers import Layer, InputSpec\nfrom keras.layers import concatenate\nfrom keras.utils.generic_utils import has_arg, to_list\n\n\nclass AttentionCellWrapper(Layer):\n \"\"\"Base class for recurrent attention mechanisms.\n\n This base class implements the RNN cell interface and defines a standard\n way for attention mechanisms to interact with a wrapped RNNCell\n (such as the `SimpleRNNCell`, `GRUCell` or `LSTMCell`).\n\n The main idea is that the attention mechanism, implemented by\n `attention_call` in extensions of this class, computes an \"attention\n encoding\", based on the attended input as well as the input and the wrapped\n cell state(s) at the current time step, which will be used as modified\n input for the wrapped cell.\n\n # Arguments\n cell: A RNN cell instance. The cell to wrap by the attention mechanism.\n See docs of `cell` argument in the `RNN` Layer for further details.\n attend_after: Boolean (default False). If True, the attention\n transformation defined by `attention_call` will be applied after\n the wrapped cell transformation (and the attention encoding will be\n used as input for wrapped cell transformation next time step).\n input_mode: String, one of `\"replace\"` (default) or `\"concatenate\"`.\n `\"replace\"`: only the attention encoding will be used as input for\n the wrapped cell.\n `\"concatenate\"` the concatenation of the original input and the\n attention encoding will be used as input to the wrapped cell.\n TODO set \"concatenate\" to default?\n output_mode: String, one of `\"cell_output\"` (default) or `\"concatenate\"`.\n `\"cell_output\"`: the output from the wrapped cell will be used.\n `\"concatenate\"`: the attention encoding will be concatenated to the\n output of the wrapped cell.\n TODO set \"concatenate\" to default?\n\n # Abstract Methods and Properties\n Extension of this class must implement:\n - `attention_build` (method): Builds the attention transformation\n based on input shapes.\n - `attention_call` (method): Defines the attention transformation\n returning the attention encoding.\n - `attention_size` (property): After `attention_build` has been\n called, this property should return the size (int) of the\n attention encoding. Do this by setting `_attention_size` in scope\n of `attention_build` or by implementing `attention_size`\n property.\n Extension of this class can optionally implement:\n - `attention_state_size` (property): Default [`attention_size`].\n If the attention mechanism has it own internal states (besides\n the attention encoding which is by default the only part of\n `attention_states`) override this property accordingly.\n See docs of the respective method/property for further details.\n\n # Details of interaction between attention and cell transformations\n Let \"cell\" denote wrapped RNN cell and \"att(cell)\" the complete\n attentive RNN cell defined by this class. We write the wrapped cell\n transformation as:\n\n y{t}, s_cell{t+1} = cell.call(x{t}, s_cell{t})\n\n where y{t} denotes the output, x{t} the input at and s_cell{t} the wrapped\n cell state(s) at time t and s_cell{t+1} the updated state(s).\n\n We can then write the complete \"attentive\" cell transformation as:\n\n y{t}, s_att(cell){t+1} = att(cell).call(x{t}, s_att(cell){t},\n constants=attended)\n\n where s_att(cell) denotes the complete states of the attentive cell,\n which consists of the wrapped cell state(s) followed but the attention\n state(s), and attended denotes the tensor attended to (note: no time\n indexing as this is the same constant input at each time step).\n\n Internally, this is how the attention transformation, implemented by\n `attention_call`, interacts with the wrapped cell transformation\n `cell.call`:\n\n - with `attend_after=False` (default):\n a{t}, s_att{t+1} = att(cell).attention_call(x_t, s_cell{t},\n attended, s_att{t})\n with `input_mode=\"replace\"` (default):\n x'{t} = a{t}\n with `input_mode=\"concatenate\"`:\n x'{t} = [x{t}, a{t}]\n\n y{t}, s_cell{t+1} = cell.call(x'{t}, s_cell{t})\n\n - with `attend_after=True`:\n with `input_mode=\"replace\"` (default):\n x'{t} = a{t}\n with `input_mode=\"concatenate\"`:\n x'{t} = [x{t}, a{t}]\n\n y{t}, s_cell{t+1} = cell.call(x'{t}, s_cell{t})\n a{t}, s_att{t+1} = att(cell).attention_call(x_t, s_cell{t+1},\n attended, s_att{t})\n\n where a{t} denotes the attention encoding, s_att{t} the attention\n state(s), x'{t} the modified wrapped cell input and [x{.}, a{.}] the\n (tensor) concatenation of the input and attention encoding.\n \"\"\"\n # in/output modes\n _REPLACE = \"replace\"\n _CELL_OUTPUT = \"cell_output\"\n _CONCATENATE = \"concatenate\"\n _input_modes = [_REPLACE, _CONCATENATE]\n _output_modes = [_CELL_OUTPUT, _CONCATENATE]\n\n def __init__(self, cell,\n attend_after=False,\n input_mode=\"replace\",\n output_mode=\"cell_output\",\n **kwargs):\n self.cell = cell # must be set before calling super\n super(AttentionCellWrapper, self).__init__(**kwargs)\n self.attend_after = attend_after\n if input_mode not in self._input_modes:\n raise ValueError(\n \"input_mode must be one of {}\".format(self._input_modes))\n self.input_mode = input_mode\n if output_mode not in self._output_modes:\n raise ValueError(\n \"output_mode must be one of {}\".format(self._output_modes))\n self.output_mode = output_mode\n self.attended_spec = None\n self._attention_size = None\n\n def attention_call(self,\n inputs,\n cell_states,\n attended,\n attention_states,\n attended_mask,\n training=None):\n \"\"\"The main logic for computing the attention encoding.\n\n # Arguments\n inputs: The input at current time step.\n cell_states: States for the wrapped RNN cell.\n attended: The constant tensor(s) to attend at each time step.\n attention_states: States dedicated for the attention mechanism.\n attended_mask: Collected masks for the attended.\n training: Whether run in training mode or not.\n\n # Returns\n attention_h: The computed attention encoding at current time step.\n attention_states: States to be passed to next `attention_call`. By\n default this should be [`attention_h`].\n NOTE: if additional states are used, these should be appended\n after `attention_h`, i.e. `attention_states[0]` should always\n be `attention_h`.\n \"\"\"\n raise NotImplementedError(\n '`attention_call` must be implemented by extensions of `{}`'.format(\n self.__class__.__name__))\n\n def attention_build(self, input_shape, cell_state_size, attended_shape):\n \"\"\"Build the attention mechanism.\n\n NOTE: `self._attention_size` should be set in this method to the size\n of the attention encoding (i.e. size of first `attention_states`)\n unless `attention_size` property is implemented in another way.\n\n # Arguments\n input_shape: Tuple of integers. Shape of the input at a single time\n step.\n cell_state_size: List of tuple of integers.\n attended_shape: List of tuple of integers.\n\n NOTE: both `cell_state_size` and `attended_shape` will always be\n lists - for simplicity. For example: even if (wrapped)\n `cell.state_size` is an integer, `cell_state_size` will be a list\n of this one element.\n \"\"\"\n raise NotImplementedError(\n '`attention_build` must be implemented by extensions of `{}`'.format(\n self.__class__.__name__))\n\n @property\n def attention_size(self):\n \"\"\"Size off attention encoding, an integer.\n \"\"\"\n if self._attention_size is None and self.built:\n raise NotImplementedError(\n 'extensions of `{}` must either set property `_attention_size`'\n ' in `attention_build` or implement the or implement'\n ' `attention_size` in some other way'.format(\n self.__class__.__name__))\n\n return self._attention_size\n\n @property\n def attention_state_size(self):\n \"\"\"Size of attention states, defaults to `attention_size`, an integer.\n\n Modify this property to return list of integers if the attention\n mechanism has several internal states. Note that the first size should\n always be the size of the attention encoding, i.e.:\n `attention_state_size[0]` = `attention_size`\n \"\"\"\n return self.attention_size\n\n @property\n def state_size(self):\n \"\"\"Size of states of the complete attentive cell, a tuple of integers.\n\n The attentive cell's states consists of the wrapped RNN cell state size(s)\n followed by attention state size(s). NOTE it is important that the wrapped\n cell states are first as the first state of any RNN cell should be same\n as the cell's output.\n \"\"\"\n state_size_s = []\n for state_size in [self.cell.state_size, self.attention_state_size]:\n if hasattr(state_size, '__len__'):\n state_size_s += list(state_size)\n else:\n state_size_s.append(state_size)\n\n return tuple(state_size_s)\n\n @property\n def output_size(self):\n if self.output_mode == self._CELL_OUTPUT:\n return self._wrapped_cell_output_size\n if self.output_mode == self._CONCATENATE:\n return self._wrapped_cell_output_size + self.attention_size\n raise RuntimeError( # already validated in __init__\n \"got unexpected output_mode: {}\".format(self.output_mode))\n\n def call(self, inputs, states, constants, training=None):\n \"\"\"Complete attentive cell transformation.\n \"\"\"\n attended = to_list(constants, allow_tuple=True)\n # NOTE: `K.rnn` will pass constants as a tuple and `_collect_previous_mask`\n # returns `None` if passed a tuple of tensors, hence `to_list` above!\n # We also make `attended` and `attended_mask` always lists for uniformity:\n attended_mask = to_list(_collect_previous_mask(attended))\n cell_states = states[:self._num_wrapped_states]\n attention_states = states[self._num_wrapped_states:]\n\n if self.attend_after:\n call = self._call_attend_after\n else:\n call = self._call_attend_before\n\n return call(inputs=inputs,\n cell_states=cell_states,\n attended=attended,\n attention_states=attention_states,\n attended_mask=attended_mask,\n training=training)\n\n def _call_attend_before(self,\n inputs,\n cell_states,\n attended,\n attention_states,\n attended_mask,\n training=None):\n \"\"\"Complete attentive cell transformation, if `attend_after=False`.\n \"\"\"\n attention_h, new_attention_states, alignment = self.attention_call(\n inputs=inputs,\n cell_states=cell_states,\n attended=attended,\n attention_states=attention_states,\n attended_mask=attended_mask,\n training=training)\n\n cell_input = self._get_cell_input(inputs, attention_h)\n\n if has_arg(self.cell.call, 'training'):\n cell_output, new_cell_states = self.cell.call(\n cell_input, cell_states, training=training)\n else:\n cell_output, new_cell_states = self.cell.call(cell_input, cell_states)\n\n output = self._get_output(cell_output, attention_h)\n output = concatenate([output, alignment])\n\n return output, new_cell_states + new_attention_states\n\n def _call_attend_after(self,\n inputs,\n cell_states,\n attended,\n attention_states,\n attended_mask,\n training=None):\n \"\"\"Complete attentive cell transformation, if `attend_after=True`.\n \"\"\"\n attention_h_previous = attention_states[0]\n\n cell_input = self._get_cell_input(inputs, attention_h_previous)\n\n if has_arg(self.cell.call, 'training'):\n cell_output, new_cell_states = self.cell.call(\n cell_input, cell_states, training=training)\n else:\n cell_output, new_cell_states = self.cell.call(cell_input, cell_states)\n\n attention_h, new_attention_states = self.attention_call(\n inputs=inputs,\n cell_states=new_cell_states,\n attended=attended,\n attention_states=attention_states,\n attended_mask=attended_mask,\n training=training)\n\n output = self._get_output(cell_output, attention_h)\n\n return output, new_cell_states, new_attention_states\n\n def _get_cell_input(self, inputs, attention_h):\n if self.input_mode == self._REPLACE:\n return attention_h\n if self.input_mode == self._CONCATENATE:\n return concatenate([inputs, attention_h])\n raise RuntimeError( # already validated in __init__\n \"got unexpected input_mode: {}\".format(self.input_mode))\n\n def _get_output(self, cell_output, attention_h):\n if self.output_mode == self._CELL_OUTPUT:\n return cell_output\n if self.output_mode == self._CONCATENATE:\n return concatenate([cell_output, attention_h])\n raise RuntimeError( # already validated in __init__\n \"got unexpected output_mode: {}\".format(self.output_mode))\n\n @staticmethod\n def _num_elements(x):\n if hasattr(x, '__len__'):\n return len(x)\n else:\n return 1\n\n @property\n def _num_wrapped_states(self):\n return self._num_elements(self.cell.state_size)\n\n @property\n def _num_attention_states(self):\n return self._num_elements(self.attention_state_size)\n\n @property\n def _wrapped_cell_output_size(self):\n if hasattr(self.cell, \"output_size\"):\n return self.cell.output_size\n if hasattr(self.cell.state_size, '__len__'):\n return self.cell.state_size[0]\n return self.cell.state_size\n\n def build(self, input_shape):\n \"\"\"Builds attention mechanism and wrapped cell (if keras layer).\n\n Arguments:\n input_shape: list of tuples of integers, the input feature shape\n (inputs sequence shape without time dimension) followed by\n constants (i.e. attended) shapes.\n \"\"\"\n if not isinstance(input_shape, list):\n raise ValueError('input shape should contain shape of both cell '\n 'inputs and constants (attended)')\n\n attended_shape = input_shape[1:]\n input_shape = input_shape[0]\n self.attended_spec = [InputSpec(shape=shape) for shape in attended_shape]\n if isinstance(self.cell.state_size, int):\n cell_state_size = [self.cell.state_size]\n else:\n cell_state_size = list(self.cell.state_size)\n self.attention_build(\n input_shape=input_shape,\n cell_state_size=cell_state_size,\n attended_shape=attended_shape,\n )\n\n if isinstance(self.cell, Layer):\n if self.input_mode == self._REPLACE:\n cell_input_size = self._attention_size\n elif self.input_mode == self._CONCATENATE:\n cell_input_size = self.attention_size + input_shape[-1]\n else:\n raise RuntimeError( # already validated in __init__\n \"got unexpected input_mode: {}\".format(self.input_mode))\n\n cell_input_shape = (input_shape[0], cell_input_size)\n self.cell.build(cell_input_shape)\n\n self.built = True\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], self.output_size\n\n @property\n def trainable_weights(self):\n return (super(AttentionCellWrapper, self).trainable_weights +\n self.cell.trainable_weights)\n\n @property\n def non_trainable_weights(self):\n return (super(AttentionCellWrapper, self).non_trainable_weights +\n self.cell.non_trainable_weights)\n\n def get_config(self):\n config = {'attend_after': self.attend_after,\n 'input_mode': self.input_mode,\n 'output_mode': self.output_mode}\n\n cell_config = self.cell.get_config()\n config['cell'] = {'class_name': self.cell.__class__.__name__,\n 'config': cell_config}\n base_config = super(AttentionCellWrapper, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass DenseAnnotationAttention(AttentionCellWrapper):\n \"\"\"Recurrent attention mechanism for attending sequences.\n\n This class implements the attention mechanism used in [1] for machine\n translation. It is, however, a generic sequence attention mechanism that can be\n used for other sequence-to-sequence problems.\n\n As any recurrent attention mechanism extending `_RNNAttentionCell`, this class\n should be used in conjunction with a wrapped (non attentive) RNN Cell, such as\n the `SimpleRNNCell`, `LSTMCell` or `GRUCell`. It modifies the input of the\n wrapped cell by attending to a constant sequence (i.e. independent of the time\n step of the recurrent application of the attention mechanism). The attention\n encoding is obtained by computing a scalar weight for each time step of the\n attended by applying two stacked Dense layers to the concatenation of the\n attended feature vector at the respective time step with the previous state of\n the RNN Cell. The attention encoding is the weighted sum of the attended feature\n vectors using these weights.\n\n Half of the first Dense transformation is independent of the RNN Cell state and\n can be computed once for the attended sequence. Therefore this transformation\n should be computed externally of the attentive RNN Cell (for efficiency) and this\n layer expects both the attended sequence and the output of a Dense transformation\n of the attended sequence (see Example below). The number of hidden units of the\n attention mechanism is subsequently defined by the number of units of this\n (external) dense transformation.\n\n # Arguments\n cell: A RNN cell instance. The wrapped RNN cell wrapped by this attention\n mechanism. See docs of `cell` argument in the `RNN` Layer for further\n details.\n kernel_initializer: Initializer for all weights matrices\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for all bias vectors\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n all weights matrices. (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to all biases\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n all weights matrices. (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to all bias vectors\n (see [constraints](../constraints.md)).\n\n # Example\n\n ```python\n # machine translation (similar to the architecture used in [1])\n x = Input((None,), name=\"input_sequences\")\n y = Input((None,), name=\"target_sequences\")\n x_emb = Embedding(INPUT_NUM_WORDS, 256, mask_zero=True)(x)\n y_emb = Embedding(TARGET_NUM_WORDS, 256, mask_zero=True)(y)\n encoder = Bidirectional(GRU(512, return_sequences=True))\n x_enc = encoder(x_emb)\n\n # first part of the dense annotation, independent of the decoder time step\n u = TimeDistributed(Dense(128, use_bias=False))(x_enc)\n decoder = RNN(cell=DenseAnnotationAttention(cell=GRUCell(512)),\n return_sequences=True)\n h = decoder(y_emb, constants=[x_enc, u])\n y_pred = TimeDistributed(Dense(TARGET_NUM_WORDS, activation='softmax'))(h)\n model = Model([y, x], y_pred)\n model.compile(loss='sparse_categorical_crossentropy', optimizer=OPTIMIZER)\n ```\n\n # References\n [1] Neural Machine Translation by Jointly Learning to Align and Translate\n https://arxiv.org/abs/1409.0473\n \"\"\"\n def __init__(self, cell,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(DenseAnnotationAttention, self).__init__(cell, **kwargs)\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n\n def attention_call(self,\n inputs,\n cell_states,\n attended,\n attention_states,\n attended_mask,\n training=None):\n # there must be two attended sequences (verified in build)\n [attended, u] = attended\n attended_mask = attended_mask[0]\n h_cell_tm1 = cell_states[0]\n\n # compute attention weights\n w = K.repeat(K.dot(h_cell_tm1, self.W_a) + self.b_UW, K.shape(attended)[1])\n e = K.exp(K.dot(K.tanh(w + u), self.v_a) + self.b_v)\n\n if attended_mask is not None:\n e = e * K.cast(K.expand_dims(attended_mask, -1), K.dtype(e))\n\n a = e / K.sum(e, axis=1, keepdims=True)\n c = K.sum(a * attended, axis=1, keepdims=False)\n\n return c, [c], K.squeeze(a, -1)\n\n def attention_build(self, input_shape, cell_state_size, attended_shape):\n if not len(attended_shape) == 2:\n raise ValueError('There must be two attended tensors')\n for a in attended_shape:\n if not len(a) == 3:\n raise ValueError('only support attending tensors with dim=3')\n [attended_shape, u_shape] = attended_shape\n\n # NOTE _attention_size must always be set in `attention_build`\n self._attention_size = attended_shape[-1]\n units = u_shape[-1]\n\n kernel_kwargs = dict(initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.W_a = self.add_weight(shape=(cell_state_size[0], units),\n name='W_a', **kernel_kwargs)\n self.v_a = self.add_weight(shape=(units, 1),\n name='v_a', **kernel_kwargs)\n\n bias_kwargs = dict(initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n self.b_UW = self.add_weight(shape=(units,),\n name=\"b_UW\", **bias_kwargs)\n self.b_v = self.add_weight(shape=(1,),\n name=\"b_v\", **bias_kwargs)\n\n def get_config(self):\n config = {\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(DenseAnnotationAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "id": "2320871", "language": "Python", "matching_score": 1.677994728088379, "max_stars_count": 0, "path": "ocrd_cor_asv_ann/lib/attention.py" }, { "content": "import logging\nimport signal\nfrom keras.callbacks import Callback\n\nclass StopSignalCallback(Callback):\n '''Keras callback for graceful interruption of training.\n\n Halts training prematurely at the end of the current batch\n when the given signal was received once. If the callback\n gets to receive the signal again, exits immediately.\n '''\n\n def __init__(self, sig=signal.SIGINT, logger=None):\n super(StopSignalCallback, self).__init__()\n self.received = False\n self.sig = sig\n self.logger = logger or logging.getLogger(__name__)\n def stopper(sig, _frame):\n if sig == self.sig:\n if self.received: # called again?\n self.logger.critical('interrupting')\n exit(0)\n else:\n self.logger.critical('stopping training')\n self.received = True\n self.action = signal.signal(self.sig, stopper)\n\n def __del__(self):\n signal.signal(self.sig, self.action)\n\n def on_batch_end(self, batch, logs=None):\n if self.received:\n self.model.stop_training = True\n\nclass ResetStatesCallback(Callback):\n '''Keras callback for stateful models to reset state between files.\n\n Callback to be called by `fit_generator()` or even `evaluate_generator()`:\n do `model.reset_states()` whenever generator sees EOF (on_batch_begin with self.eof),\n and between training and validation (on_batch_end with batch>=steps_per_epoch-1).\n '''\n def __init__(self, callback_model, logger=None):\n super(ResetStatesCallback, self).__init__()\n self.eof = False\n self.here = ''\n self.next = ''\n self.callback_model = callback_model # different than self.model set by set_model()\n self.logger = logger or logging.getLogger(__name__)\n\n def reset(self, where):\n self.eof = True\n self.next = where\n\n def on_batch_begin(self, batch, logs={}):\n if self.eof:\n #self.logger.debug('resetting model at batch %d for %s',\n # batch, \"training\" if self.params['do_validation'] else \"validation\")\n # called between training files,\n # reset only encoder (training does not converge if applied to complete encoder-decoder)\n self.callback_model.reset_states()\n self.eof = False\n self.here = self.next\n\n def on_batch_end(self, batch, logs={}):\n if logs.get('loss') > 10:\n pass # print(u'huge loss in', self.here, u'at', batch)\n", "id": "11172696", "language": "Python", "matching_score": 1.2100318670272827, "max_stars_count": 10, "path": "ocrd_cor_asv_ann/lib/callbacks.py" }, { "content": "# -*- coding: utf-8\nimport os\nimport logging\nimport click\n\nfrom ..lib.seq2seq import Sequence2Sequence\n\[email protected]()\[email protected]('-m', '--save-model', default=\"model.h5\", help='model file for saving',\n type=click.Path(dir_okay=False, writable=True))\[email protected]('--load-model', help='model file for loading (incremental/pre-training)',\n type=click.Path(dir_okay=False, exists=True))\[email protected]('--init-model', help='model file for initialisation (transfer from LM or shallower model)',\n type=click.Path(dir_okay=False, exists=True))\[email protected]('--reset-encoder', is_flag=True, help='reset encoder weights after load/init')\[email protected]('-w', '--width', default=128, help='number of nodes per hidden layer',\n type=click.IntRange(min=1, max=9128))\[email protected]('-d', '--depth', default=2, help='number of stacked hidden layers',\n type=click.IntRange(min=1, max=10))\[email protected]('-v', '--valdata', multiple=True, help='file to use for validation (instead of random split)',\n type=click.Path(dir_okay=False, exists=True))\n# click.File is impossible since we do not now a priori whether\n# we have to deal with pickle dumps (mode 'rb', includes confidence)\n# or plain text files (mode 'r')\[email protected]('data', nargs=-1, type=click.Path(dir_okay=False, exists=True))\ndef cli(save_model, load_model, init_model, reset_encoder, width, depth, valdata, data):\n \"\"\"Train a correction model.\n \n Configure a sequence-to-sequence model with the given parameters.\n \n If given `load_model`, and its configuration matches the current parameters,\n then load its weights.\n If given `init_model`, then transfer its mapping and matching layer weights.\n (Also, if its configuration has 1 less hidden layers, then fixate the loaded\n weights afterwards.)\n If given `reset_encoder`, re-initialise the encoder weights afterwards.\n \n Then, regardless, train on the file paths `data` using early stopping.\n If no `valdata` were given, split off a random fraction of lines for\n validation. Otherwise, use only those files for validation.\n \n If the training has been successful, save the model under `save_model`.\n \"\"\"\n if not 'TF_CPP_MIN_LOG_LEVEL' in os.environ:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n logging.basicConfig()\n logging.getLogger(__name__).setLevel(logging.DEBUG)\n \n s2s = Sequence2Sequence(logger=logging.getLogger(__name__), progbars=True)\n s2s.width = width\n s2s.depth = depth\n s2s.configure()\n \n # there could be both, a full pretrained model to load,\n # and a model to initialise parts from (e.g. only decoder for LM)\n if load_model:\n s2s.load_config(load_model)\n if s2s.width == width and s2s.depth == depth:\n logging.info('loading weights from existing model for incremental training')\n s2s.configure()\n s2s.load_weights(load_model)\n else:\n logging.warning('ignoring existing model due to different topology (width=%d, depth=%d)',\n s2s.width, s2s.depth)\n if init_model:\n s2s.configure()\n s2s.load_transfer_weights(init_model)\n \n if reset_encoder:\n # reset weights of pretrained encoder (i.e. keep only decoder weights as initialization):\n from keras import backend as K\n session = K.get_session()\n for layer in s2s.encoder_model.layers:\n for var in layer.__dict__:\n var_arg = getattr(layer, var)\n if hasattr(var_arg, 'initializer'):\n initializer_method = getattr(var_arg, 'initializer')\n initializer_method.run(session=session)\n \n s2s.train(data, valdata or None)\n if s2s.status > 1:\n s2s.save(save_model)\n \n", "id": "2981158", "language": "Python", "matching_score": 5.9395036697387695, "max_stars_count": 0, "path": "ocrd_cor_asv_ann/scripts/train.py" }, { "content": "# -*- coding: utf-8\nimport os\nimport logging\nimport click\n\nfrom ..lib.seq2seq import Sequence2Sequence\n\[email protected]()\[email protected]('-m', '--load-model', default=\"model.h5\", help='model file to load',\n type=click.Path(dir_okay=False, exists=True))\n# click.File is impossible since we do not now a priori whether\n# we have to deal with pickle dumps (mode 'rb', includes confidence)\n# or plain text files (mode 'r')\[email protected]('--fast', is_flag=True, help='only decode greedily')\[email protected]('--rejection', default=0.5, type=click.FloatRange(0,1.0),\n help='probability of the input characters in all hypotheses (set 0 to use raw predictions)')\[email protected]('data', nargs=-1, type=click.Path(dir_okay=False, exists=True))\ndef cli(load_model, fast, rejection, data):\n \"\"\"Evaluate a correction model.\n \n Load a sequence-to-sequence model from the given path.\n \n Then apply on the file paths `data`, comparing predictions\n (both greedy and beamed) with GT target, and measuring\n error rates.\n \"\"\"\n if not 'TF_CPP_MIN_LOG_LEVEL' in os.environ:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n logging.basicConfig()\n logging.getLogger(__name__).setLevel(logging.DEBUG)\n \n s2s = Sequence2Sequence(logger=logging.getLogger(__name__), progbars=True)\n s2s.load_config(load_model)\n s2s.configure()\n s2s.load_weights(load_model)\n s2s.rejection_threshold = rejection\n \n s2s.evaluate(data, fast)\n", "id": "10130017", "language": "Python", "matching_score": 0.8118248581886292, "max_stars_count": 0, "path": "ocrd_cor_asv_ann/scripts/eval.py" }, { "content": "import click\n\nfrom ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor\nfrom .shell import ShellPreprocessor\nfrom .skimage_binarize import SkimageBinarize\nfrom .skimage_denoise import SkimageDenoise\nfrom .skimage_denoise_raw import SkimageDenoiseRaw\nfrom .skimage_normalize import SkimageNormalize\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_preprocess_image(*args, **kwargs):\n return ocrd_cli_wrap_processor(ShellPreprocessor, *args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_skimage_normalize(*args, **kwargs):\n return ocrd_cli_wrap_processor(SkimageNormalize, *args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_skimage_denoise_raw(*args, **kwargs):\n return ocrd_cli_wrap_processor(SkimageDenoiseRaw, *args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_skimage_binarize(*args, **kwargs):\n return ocrd_cli_wrap_processor(SkimageBinarize, *args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_skimage_denoise(*args, **kwargs):\n return ocrd_cli_wrap_processor(SkimageDenoise, *args, **kwargs)\n", "id": "10645002", "language": "Python", "matching_score": 2.5958240032196045, "max_stars_count": 0, "path": "ocrd_wrap/cli.py" }, { "content": "\"\"\"\nInstalls:\n - ocrd-preprocess-image\n - ocrd-skimage-normalize\n - ocrd-skimage-denoise-raw\n - ocrd-skimage-binarize\n - ocrd-skimage-denoise\n\"\"\"\n\nimport codecs\nimport json\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nwith codecs.open('README.md', encoding='utf-8') as f:\n README = f.read()\n\nwith open('./ocrd-tool.json', 'r') as f:\n version = json.load(f)['version']\n \nsetup(\n name='ocrd_wrap',\n version=version,\n description='OCR-D wrapper for arbitrary coords-preserving image operations',\n long_description=README,\n long_description_content_type='text/markdown',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/bertsky/ocrd_wrap',\n license='MIT',\n packages=find_packages(),\n include_package_data=True,\n install_requires=open('requirements.txt').read().split('\\n'),\n package_data={\n '': ['*.json', '*.yml', '*.yaml', '*.csv.gz', '*.jar', '*.zip'],\n },\n entry_points={\n 'console_scripts': [\n 'ocrd-preprocess-image=ocrd_wrap.cli:ocrd_preprocess_image',\n 'ocrd-skimage-binarize=ocrd_wrap.cli:ocrd_skimage_binarize',\n 'ocrd-skimage-denoise=ocrd_wrap.cli:ocrd_skimage_denoise',\n 'ocrd-skimage-denoise-raw=ocrd_wrap.cli:ocrd_skimage_denoise_raw',\n 'ocrd-skimage-normalize=ocrd_wrap.cli:ocrd_skimage_normalize',\n ]\n },\n)\n", "id": "1999861", "language": "Python", "matching_score": 3.63596248626709, "max_stars_count": 0, "path": "setup.py" }, { "content": "\"\"\"\nInstalls:\n - ocrd-cis-align\n\"\"\"\n\nfrom setuptools import setup, find_packages\n\nsetup(\n name='cis-ocrd',\n version='0.0.1',\n description='description',\n long_description='long description',\n author='<NAME>, <NAME>',\n author_email='<EMAIL>, <EMAIL>',\n url='https://github.com/cisocrgroup/cis-ocrd-py',\n license='MIT',\n packages=find_packages(),\n install_requires=[\n 'ocrd >= 0.4.0',\n 'click',\n ],\n package_data={\n '': ['*.json', '*.yml', '*.yaml'],\n },\n entry_points={\n 'console_scripts': [\n 'ocrd-cis-align=align.cli:cis_ocrd_align'\n ]\n },\n)\n", "id": "460708", "language": "Python", "matching_score": 3.5371460914611816, "max_stars_count": 0, "path": "setup.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup, find_packages\nfrom ocrd_utils import VERSION\n\ninstall_requires = open('requirements.txt').read().split('\\n')\ninstall_requires.append('ocrd_utils == %s' % VERSION)\ninstall_requires.append('ocrd_models == %s' % VERSION)\ninstall_requires.append('ocrd_modelfactory == %s' % VERSION)\ninstall_requires.append('ocrd_validators == %s' % VERSION)\n\nsetup(\n name='ocrd',\n version=VERSION,\n description='OCR-D framework',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/OCR-D/core',\n license='Apache License 2.0',\n packages=find_packages(exclude=('tests', 'docs')),\n include_package_data=True,\n install_requires=install_requires,\n package_data={\n '': ['*.json', '*.yml', '*.yaml', '*.bash', '*.xml'],\n },\n entry_points={\n 'console_scripts': [\n 'ocrd=ocrd.cli:cli',\n 'ocrd-dummy=ocrd.cli.dummy_processor:cli',\n ]\n },\n)\n", "id": "9917315", "language": "Python", "matching_score": 3.5429182052612305, "max_stars_count": 0, "path": "ocrd/setup.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nsetup(\n name='ocrd_anybaseocr',\n version='0.0.1',\n description='Tweaked ocropus scripts',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/kba/ocrd_dfkitools',\n license='Apache License 2.0',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n install_requires=open('requirements.txt').read().split('\\n'),\n packages=['ocrd_anybaseocr'],\n package_data={\n '': ['*.json']\n },\n entry_points={\n 'console_scripts': [\n 'ocrd-anybaseocr-binarize = ocrd_anybaseocr.cli.binarize:main',\n 'ocrd-anybaseocr-crop = ocrd_anybaseocr.cli.cropping:main',\n 'ocrd-anybaseocr-deskew = ocrd_anybaseocr.cli.deskew:main',\n ]\n },\n)\n", "id": "249259", "language": "Python", "matching_score": 0.1831251084804535, "max_stars_count": 0, "path": "setup.py" }, { "content": "from __future__ import absolute_import\nimport json\nfrom ocrd import Processor\nfrom ocrd import MIMETYPE_PAGE\nfrom ocrd.utils import getLogger\nfrom ocrd.model.ocrd_page import from_file\nfrom ocrd.model.ocrd_page import to_xml\nfrom ocrd.model.ocrd_page_generateds import TextEquivType\nfrom lib.javaprocess import JavaProcess\nfrom align.ocrd_tool import get_ocrd_tool\n\n\nclass Aligner(Processor):\n def __init__(self, *args, **kwargs):\n ocrd_tool = get_ocrd_tool()\n kwargs['ocrd_tool'] = ocrd_tool['tools']['ocrd-cis-align']\n kwargs['version'] = ocrd_tool['version']\n super(Aligner, self).__init__(*args, **kwargs)\n self.log = getLogger('Processor.Aligner')\n\n def process(self):\n ifgs = self.input_file_grp.split(\",\") # input file groups\n ifts = self.zip_input_files(ifgs) # input file tuples\n page_alignments = list()\n for ift in ifts:\n page_alignments.append(PageAlignment(self, ifgs, ift))\n for pa in page_alignments:\n for la in pa.line_alignments:\n self.log.info(\"%s\", la)\n pa.write_alignment_to_xml()\n self.workspace.save_mets()\n\n def zip_input_files(self, ifgs):\n \"\"\"Zip files of the given input file groups\"\"\"\n files = list()\n for ifg in ifgs:\n self.log.info(\"input file group: %s\", ifg)\n ifiles = sorted(\n self.workspace.mets.find_files(fileGrp=ifg),\n key=lambda ifile: ifile.ID)\n for i in ifiles:\n self.log.info(\"sorted file: %s %s\", i.url, i.ID)\n self.log.info(\"input files: %s\", ifiles)\n files.append(ifiles)\n return zip(*files)\n\n\nclass PageAlignment:\n \"\"\"PageAlignment holds a list of LineAlignments.\"\"\"\n def __init__(self, process, ifgs, ifs):\n \"\"\"Create a page alignment form a list of input files.\"\"\"\n self.process = process\n self.ifgs = ifgs\n self.ifs = ifs\n self.log = getLogger('PageAlignment')\n self.align_lines()\n\n def align_lines(self):\n lines = list()\n for ifile in self.ifs:\n lines.append(self.read_lines_from_input_file(ifile))\n lines = zip(*lines)\n _input = [x for t in lines for x in t]\n for i in _input:\n self.log.debug(\"input line: %s\", i)\n n = len(self.ifs)\n p = JavaProcess(\n jar=self.process.parameter['cisOcrdJar'],\n main=\"de.lmu.cis.ocrd.cli.Align\",\n input_str=\"\\n\".join(_input),\n args=[str(n)])\n p.run()\n lines = p.output.split(\"\\n\")\n self.line_alignments = list()\n for i in range(0, len(lines), n):\n self.line_alignments.append(LineAlignment(lines[i:i+n]))\n\n def read_lines_from_input_file(self, ifile):\n self.log.info(\"reading input file: %s\", ifile.url)\n lines = list()\n pcgts = from_file(self.process.workspace.download_file(ifile))\n for region in pcgts.get_Page().get_TextRegion():\n for line in region.get_TextLine():\n lines.append(line.get_TextEquiv()[0].Unicode)\n return lines\n\n def write_alignment_to_xml(self):\n \"\"\"\n Write the alignments into new output-file-group.\n The alignment is done by the master file (first index)\n \"\"\"\n self.log.info(\"writing alignment to %s\", self.process.output_file_grp)\n master = self.ifs[0]\n pcgts = from_file(self.process.workspace.download_file(master))\n ilist = iter(self.line_alignments)\n for region in pcgts.get_Page().get_TextRegion():\n for line in region.get_TextLine():\n self.log.info(\"line: %s\", line.get_TextEquiv()[0].Unicode)\n line.get_TextEquiv()[0].set_index(0)\n current = next(ilist)\n self.add_line_alignments(line, current)\n self.add_word_alignments(line, current)\n self.log.debug(\"master basename: %s\", master.basename)\n self.process.add_output_file(\n ID=\"{}_{}\".format(master.ID, self.process.output_file_grp),\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts),\n file_grp=self.process.output_file_grp,\n basename=master.basename,\n )\n\n def add_word_alignments(self, page_xml_line, alignment_line):\n \"\"\"\n Add word alignments to the words of the given page XML line.\n We iterate over the master-OCR words, so the first word of the\n tuple must be contained in the given page XML word.\n \"\"\"\n k = 0\n for word in page_xml_line.get_Word():\n page_xml_word = word.get_TextEquiv()[0].Unicode\n # skip words that do not contain current (e.g. '
§.')\n if alignment_line.tokens[k][0] in page_xml_word:\n self.log.debug(\"word: %s\", page_xml_word)\n for (i, w) in enumerate(alignment_line.tokens[k]):\n self.log.debug(\" - word: %s (%s)\", w, self.ifgs[i])\n eq = TextEquivType(\n index=i+1,\n dataType='alignment-token-{}'.format(self.ifgs[i]),\n Unicode=w,\n )\n word.add_TextEquiv(eq)\n k += 1\n\n def add_line_alignments(self, page_xml_line, alignment_line):\n \"\"\"\n Add alignment TextEquivs to the given page XML line.\n \"\"\"\n page_xml_line.get_TextEquiv()[0].set_index(0)\n self.log.debug(\"line %s\", page_xml_line.get_TextEquiv()[0].Unicode)\n self.log.debug(\" - line: %s (%s)\", alignment_line.pairwise[0][0], self.ifgs[0])\n eq = TextEquivType(\n index=1,\n dataType=\"alignment-line-{}\".format(self.ifgs[0]),\n Unicode=alignment_line.pairwise[0][0],\n )\n page_xml_line.add_TextEquiv(eq)\n for i in range(1, len(alignment_line.pairwise)):\n self.log.debug(\" - line: %s (%s)\", alignment_line.pairwise[i][1], self.ifgs[i])\n eq = TextEquivType(\n index=i+1,\n dataType=\"alignment-line-{}\".format(self.ifgs[i]),\n Unicode=alignment_line.pairwise[i][1],\n )\n page_xml_line.add_TextEquiv(eq)\n\n\nclass LineAlignment:\n \"\"\"\n LineAlignment holds a line alignment.\n A line alignment of n lines holds n-1 pairwise alignments\n and a list of token alignments of n-tuples.\n\n Each pairwise alignment represents the alignment of the\n master line with another. Pairwise aligned lines have always\n the same length. Underscores ('_') mark deletions or insertions.\n \"\"\"\n def __init__(self, lines):\n \"\"\"\n Create a LineAlignment from n-1 pairwise\n alignments an one token alignment at pos n-1.\n \"\"\"\n self.n = len(lines)\n self.pairwise = list()\n for i in range(0, self.n-1):\n self.pairwise.append(tuple(lines[i].split(\",\")))\n self.tokens = list()\n for ts in lines[self.n-1].split(\",\"):\n self.tokens.append(tuple(ts.split(\":\")))\n\n def __str__(self):\n data = {}\n data['pairwise'] = self.pairwise\n data['tokens'] = self.tokens\n return json.dumps(data)\n", "id": "8686775", "language": "Python", "matching_score": 4.198946952819824, "max_stars_count": 0, "path": "align/aligner.py" }, { "content": "from __future__ import absolute_import\n\nimport os\nimport math\n\nfrom ocrd import Processor\nfrom ocrd_utils import getLogger, concat_padded\nfrom ocrd_modelfactory import page_from_file\n\nfrom .config import OCRD_TOOL\nfrom ..lib.alignment import Alignment\n\nLOG = getLogger('processor.EvaluateLines')\nTOOL_NAME = 'ocrd-cor-asv-ann-evaluate'\n\nclass EvaluateLines(Processor):\n \n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL_NAME]\n kwargs['version'] = OCRD_TOOL['version']\n super(EvaluateLines, self).__init__(*args, **kwargs)\n if not hasattr(self, 'workspace') or not self.workspace:\n # no parameter/workspace for --dump-json or --version (no processing)\n return\n self.alignment = Alignment(logger=LOG)\n \n def process(self):\n \"\"\"Align textlines of multiple file groups and calculate distances.\n \n Find files in all input file groups of the workspace for the same\n pageIds (or, as a fallback, the same pageIds at their imageFilename).\n The first file group serves as reference annotation (ground truth).\n \n Open and deserialise PAGE input files, then iterative over the element\n hierarchy down to the TextLine level, looking at each first TextEquiv.\n Align character sequences in all pairs of lines for the same TextLine IDs,\n and calculate the distances using the error metric `metric`. Accumulate\n distances and sequence lengths per file group globally and per file,\n and show each fraction as a CER rate in the log.\n \"\"\"\n metric = self.parameter['metric']\n if metric == 'Levenshtein':\n metric = self.alignment.get_levenshtein_distance\n else:\n metric = (lambda ocr, gt, normalization=metric\n if metric != 'combining-e-umlauts'\n else None: # NFC / NFKC / historic_latin\n self.alignment.get_adjusted_distance(\n ocr, gt, normalization=normalization))\n \n ifgs = self.input_file_grp.split(\",\") # input file groups\n if len(ifgs) < 2:\n raise Exception(\"need multiple input file groups to compare\")\n \n dists = [0 for _ in ifgs]\n total = [0 for _ in ifgs]\n # get input files:\n ifts = self.zip_input_files(ifgs) # input file tuples\n for ift in ifts:\n file_dists = [0 for _ in ifgs] # sum distances for this file\n file_total = [0 for _ in ifgs] # num characters for this file\n # get input lines:\n file_lines = [{} for _ in ifgs] # line dicts for this file\n for i, input_file in enumerate(ift):\n if not i:\n LOG.info(\"processing page %s\", input_file.pageId)\n if not input_file:\n # file/page was not found in this group\n continue\n LOG.info(\"INPUT FILE for %s: %s\", ifgs[i], input_file.ID)\n pcgts = page_from_file(self.workspace.download_file(input_file))\n file_lines[i] = _page_get_lines(pcgts)\n for line_id in file_lines[0].keys():\n for i in range(1, len(ift)):\n if not ift[i]:\n # file/page was not found in this group\n continue\n elif line_id not in file_lines[i]:\n LOG.error('line \"%s\" in file %s is missing from input %d / %s',\n line_id, ift[i].ID, i, ifgs[i])\n continue\n gt_line = file_lines[0][line_id]\n ocr_line = file_lines[i][line_id]\n gt_len = len(gt_line)\n ocr_len = len(ocr_line)\n if 0.2 * (gt_len + ocr_len) < math.fabs(gt_len - ocr_len) > 5:\n LOG.warning('line length differs significantly (%d vs %d) for line %s',\n gt_len, ocr_len, line_id)\n dist, chars = metric(ocr_line, gt_line)\n file_dists[i] += dist\n file_total[i] += chars\n for i in range(1, len(ift)):\n if not ift[i]:\n # file/page was not found in this group\n continue\n LOG.info(\"CER %s / %s vs %s: %.3f\",\n ift[i].pageId, ifgs[0], ifgs[i], file_dists[i] / file_total[i])\n dists[i] += file_dists[i]\n total[i] += file_total[i]\n for i in range(1, len(ifgs)):\n if not total[i]:\n LOG.warning('%s had no textlines whatsoever', ifgs[i])\n continue\n LOG.info(\"CER overall / %s vs %s: %.3f\",\n ifgs[0], ifgs[i], dists[i] / total[i])\n \n def zip_input_files(self, ifgs):\n ifts = list() # file tuples\n for page_id in self.workspace.mets.physical_pages:\n ifiles = list()\n for ifg in ifgs:\n LOG.debug(\"adding input file group %s to page %s\", ifg, page_id)\n files = self.workspace.mets.find_files(pageId=page_id, fileGrp=ifg)\n if not files:\n # fall back for missing pageId via Page imageFilename:\n all_files = self.workspace.mets.find_files(fileGrp=ifg)\n for file_ in all_files:\n pcgts = page_from_file(self.workspace.download_file(file_))\n image_url = pcgts.get_Page().get_imageFilename()\n img_files = self.workspace.mets.find_files(url=image_url)\n if img_files and img_files[0].pageId == page_id:\n files = [file_]\n break\n if not files:\n # other fallback options?\n LOG.error('found no page %s in file group %s',\n page_id, ifg)\n ifiles.append(None)\n else:\n ifiles.append(files[0])\n if ifiles[0]:\n ifts.append(tuple(ifiles))\n return ifts\n\ndef _page_get_lines(pcgts):\n '''Get all TextLines in the page.\n \n Iterate the element hierarchy of the page `pcgts` down\n to the TextLine level. For each line, store the element\n ID and its first TextEquiv annotation.\n \n Return the stored dictionary.\n '''\n result = dict()\n regions = pcgts.get_Page().get_TextRegion()\n if not regions:\n LOG.warning(\"Page contains no text regions\")\n for region in regions:\n lines = region.get_TextLine()\n if not lines:\n LOG.warning(\"Region '%s' contains no text lines\", region.id)\n continue\n for line in lines:\n textequivs = line.get_TextEquiv()\n if not textequivs:\n LOG.warning(\"Line '%s' contains no text results\", line.id)\n continue\n result[line.id] = textequivs[0].Unicode\n return result\n \n", "id": "8779738", "language": "Python", "matching_score": 3.13592529296875, "max_stars_count": 0, "path": "ocrd_cor_asv_ann/wrapper/evaluate.py" }, { "content": "from __future__ import absolute_import\n\nimport os.path\nfrom PIL import Image\nimport numpy as np\nfrom skimage.filters import (\n threshold_niblack,\n threshold_li,\n threshold_local,\n threshold_otsu,\n threshold_sauvola,\n threshold_yen\n)\n\nfrom ocrd import Processor\nfrom ocrd_utils import (\n getLogger,\n make_file_id,\n assert_file_grp_cardinality,\n MIMETYPE_PAGE\n)\nfrom ocrd_modelfactory import page_from_file\nfrom ocrd_models.ocrd_page import (\n LabelType, LabelsType,\n MetadataItemType,\n AlternativeImageType,\n to_xml\n)\nfrom .config import OCRD_TOOL\n\nTOOL = 'ocrd-skimage-binarize'\nLOG = getLogger('processor.SkimageBinarize')\n\nclass SkimageBinarize(Processor):\n\n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]\n kwargs['version'] = OCRD_TOOL['version']\n super(SkimageBinarize, self).__init__(*args, **kwargs)\n \n def process(self):\n \"\"\"Performs binarization of segment or page images with scikit-image on the workspace.\n \n Open and deserialize PAGE input files and their respective images,\n then iterate over the element hierarchy down to the requested\n ``level-of-operation`` in the element hierarchy.\n \n For each segment element, retrieve a segment image according to\n the layout annotation (from an existing AlternativeImage, or by\n cropping via coordinates into the higher-level image, and -\n when applicable - deskewing).\n \n Next, binarize the image according to ``method`` with skimage.\n \n Then write the new image to the workspace along with the output fileGrp,\n and using a file ID with suffix ``.IMG-BIN`` with further identification\n of the input element.\n \n Produce a new PAGE output file by serialising the resulting hierarchy.\n \"\"\"\n oplevel = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n \n for (n, input_file) in enumerate(self.input_files):\n file_id = make_file_id(input_file, self.output_file_grp)\n page_id = input_file.pageId or input_file.ID\n LOG.info(\"INPUT FILE %i / %s\", n, page_id)\n \n pcgts = page_from_file(self.workspace.download_file(input_file))\n page = pcgts.get_Page()\n metadata = pcgts.get_Metadata() # ensured by from_file()\n metadata.add_MetadataItem(\n MetadataItemType(type_=\"processingStep\",\n name=self.ocrd_tool['steps'][0],\n value=TOOL,\n Labels=[LabelsType(\n externalModel=\"ocrd-tool\",\n externalId=\"parameters\",\n Label=[LabelType(type_=name,\n value=self.parameter[name])\n for name in self.parameter.keys()])]))\n \n for page in [page]:\n page_image, page_coords, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n dpi = self.parameter['dpi']\n LOG.info(\"Page '%s' images will use %d DPI from parameter override\", page_id, dpi)\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi = round(dpi * 2.54)\n LOG.info(\"Page '%s' images will use %d DPI from image meta-data\", page_id, dpi)\n else:\n dpi = 300\n LOG.info(\"Page '%s' images will use 300 DPI from fall-back\", page_id)\n # guess a useful window size if not given\n if not self.parameter['window_size']:\n def odd(n):\n return int(n) + int((n+1)%2)\n # use 1x1 inch square\n self.parameter['window_size'] = odd(dpi)\n if not self.parameter['k']:\n self.parameter['k'] = 0.34\n \n if oplevel == 'page':\n self._process_segment(page, page_image, page_coords,\n \"page '%s'\" % page_id, input_file.pageId,\n file_id + '.IMG-BIN')\n continue\n regions = page.get_AllRegions(classes=['Text'])\n if not regions:\n LOG.warning(\"Page '%s' contains no text regions\", page_id)\n for region in regions:\n region_image, region_coords = self.workspace.image_from_segment(\n region, page_image, page_coords, feature_filter='binarized')\n if oplevel == 'region':\n self._process_segment(region, region_image, region_coords,\n \"region '%s'\" % region.id, None,\n file_id + '.IMG-BIN_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n LOG.warning(\"Region '%s' contains no text lines\", region.id)\n for line in lines:\n line_image, line_coords = self.workspace.image_from_segment(\n line, region_image, region_coords, feature_filter='binarized')\n if oplevel == 'line':\n self._process_segment(line, line_image, line_coords,\n \"line '%s'\" % line.id, None,\n file_id + '.IMG-BIN_' + line.id)\n continue\n words = line.get_Word()\n if not words:\n LOG.warning(\"Line '%s' contains no words\", line.id)\n for word in words:\n word_image, word_coords = self.workspace.image_from_segment(\n word, line_image, line_coords, feature_filter='binarized')\n if oplevel == 'word':\n self._process_segment(word, word_image, word_coords,\n \"word '%s'\" % word.id, None,\n file_id + '.IMG-BIN_' + word.id)\n continue\n glyphs = word.get_Glyph()\n if not glyphs:\n LOG.warning(\"Word '%s' contains no glyphs\", word.id)\n for glyph in glyphs:\n glyph_image, glyph_coords = self.workspace.image_from_segment(\n glyph, word_image, word_coords, feature_filter='binarized')\n self._process_segment(glyph, glyph_image, glyph_coords,\n \"glyph '%s'\" % glyph.id, None,\n file_id + '.IMG-BIN_' + glyph.id)\n \n pcgts.set_pcGtsId(file_id)\n self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n mimetype=MIMETYPE_PAGE,\n local_filename=os.path.join(self.output_file_grp,\n file_id + '.xml'),\n content=to_xml(pcgts))\n \n def _process_segment(self, segment, image, coords, where, page_id, file_id):\n features = coords['features'] # features already applied to image\n features += ',binarized'\n method = self.parameter['method']\n array = np.array(image.convert('L'))\n if method == 'otsu':\n thres = threshold_otsu(array)\n elif method == 'li':\n thres = threshold_li(array)\n elif method == 'yen':\n thres = threshold_yen(array)\n elif method == 'gauss':\n thres = threshold_local(array, self.parameter['window_size'])\n elif method == 'niblack':\n thres = threshold_niblack(array, self.parameter['window_size'], self.parameter['k'])\n elif method == 'sauvola':\n thres = threshold_sauvola(array, self.parameter['window_size'], self.parameter['k'])\n array = array > thres\n image = Image.fromarray(array)\n # annotate results\n file_path = self.workspace.save_image_file(\n image,\n file_id,\n file_grp=self.output_file_grp,\n page_id=page_id)\n segment.add_AlternativeImage(AlternativeImageType(\n filename=file_path, comments=features))\n LOG.debug(\"Binarized image for %s saved as '%s'\", where, file_path)\n", "id": "3419124", "language": "Python", "matching_score": 8.434163093566895, "max_stars_count": 0, "path": "ocrd_wrap/skimage_binarize.py" }, { "content": "from __future__ import absolute_import\n\nimport os.path\nfrom PIL import Image\nimport numpy as np\nfrom skimage import img_as_float, img_as_uint, img_as_ubyte\nfrom skimage.color.adapt_rgb import adapt_rgb, hsv_value\nfrom skimage.exposure import rescale_intensity, equalize_adapthist\n\nfrom ocrd import Processor\nfrom ocrd_utils import (\n getLogger,\n make_file_id,\n assert_file_grp_cardinality,\n MIMETYPE_PAGE\n)\nfrom ocrd_modelfactory import page_from_file\nfrom ocrd_models.ocrd_page import (\n LabelType, LabelsType,\n MetadataItemType,\n AlternativeImageType,\n to_xml\n)\nfrom .config import OCRD_TOOL\n\nTOOL = 'ocrd-skimage-normalize'\nLOG = getLogger('processor.SkimageNormalize')\n\nclass SkimageNormalize(Processor):\n\n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]\n kwargs['version'] = OCRD_TOOL['version']\n super(SkimageNormalize, self).__init__(*args, **kwargs)\n \n def process(self):\n \"\"\"Performs contrast-enhancing equalization of segment or page images with scikit-image on the workspace.\n \n Open and deserialize PAGE input files and their respective images,\n then iterate over the element hierarchy down to the requested\n ``level-of-operation`` in the element hierarchy.\n \n For each segment element, retrieve a segment image according to\n the layout annotation (from an existing AlternativeImage, or by\n cropping via coordinates into the higher-level image, and -\n when applicable - deskewing), in raw (non-binarized) form.\n \n Next, normalize the image according to ``method`` in skimage.\n \n Then write the new image to the workspace along with the output fileGrp,\n and using a file ID with suffix ``.IMG-NRM`` with further identification\n of the input element.\n \n Produce a new PAGE output file by serialising the resulting hierarchy.\n \"\"\"\n oplevel = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n \n for (n, input_file) in enumerate(self.input_files):\n file_id = make_file_id(input_file, self.output_file_grp)\n page_id = input_file.pageId or input_file.ID\n LOG.info(\"INPUT FILE %i / %s\", n, page_id)\n \n pcgts = page_from_file(self.workspace.download_file(input_file))\n page = pcgts.get_Page()\n metadata = pcgts.get_Metadata() # ensured by from_file()\n metadata.add_MetadataItem(\n MetadataItemType(type_=\"processingStep\",\n name=self.ocrd_tool['steps'][0],\n value=TOOL,\n Labels=[LabelsType(\n externalModel=\"ocrd-tool\",\n externalId=\"parameters\",\n Label=[LabelType(type_=name,\n value=self.parameter[name])\n for name in self.parameter.keys()])]))\n \n for page in [page]:\n page_image, page_coords, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n dpi = self.parameter['dpi']\n LOG.info(\"Page '%s' images will use %d DPI from parameter override\", page_id, dpi)\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi = round(dpi * 2.54)\n LOG.info(\"Page '%s' images will use %d DPI from image meta-data\", page_id, dpi)\n else:\n dpi = 300\n LOG.info(\"Page '%s' images will use 300 DPI from fall-back\", page_id)\n \n if oplevel == 'page':\n self._process_segment(page, page_image, page_coords,\n \"page '%s'\" % page_id, input_file.pageId,\n file_id + '.IMG-NRM')\n continue\n regions = page.get_AllRegions(classes=['Text'])\n if not regions:\n LOG.warning(\"Page '%s' contains no text regions\", page_id)\n for region in regions:\n region_image, region_coords = self.workspace.image_from_segment(\n region, page_image, page_coords, feature_filter='binarized')\n if oplevel == 'region':\n self._process_segment(region, region_image, region_coords,\n \"region '%s'\" % region.id, None,\n file_id + '.IMG-NRM_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n LOG.warning(\"Region '%s' contains no text lines\", region.id)\n for line in lines:\n line_image, line_coords = self.workspace.image_from_segment(\n line, region_image, region_coords, feature_filter='binarized')\n if oplevel == 'line':\n self._process_segment(line, line_image, line_coords,\n \"line '%s'\" % line.id, None,\n file_id + '.IMG-NRM_' + line.id)\n continue\n words = line.get_Word()\n if not words:\n LOG.warning(\"Line '%s' contains no words\", line.id)\n for word in words:\n word_image, word_coords = self.workspace.image_from_segment(\n word, line_image, line_coords, feature_filter='binarized')\n if oplevel == 'word':\n self._process_segment(word, word_image, word_coords,\n \"word '%s'\" % word.id, None,\n file_id + '.IMG-NRM_' + word.id)\n continue\n glyphs = word.get_Glyph()\n if not glyphs:\n LOG.warning(\"Word '%s' contains no glyphs\", word.id)\n for glyph in glyphs:\n glyph_image, glyph_coords = self.workspace.image_from_segment(\n glyph, word_image, word_coords, feature_filter='binarized')\n self._process_segment(glyph, glyph_image, glyph_coords,\n \"glyph '%s'\" % glyph.id, None,\n file_id + '.IMG-NRM_' + glyph.id)\n \n pcgts.set_pcGtsId(file_id)\n self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n mimetype=MIMETYPE_PAGE,\n local_filename=os.path.join(self.output_file_grp,\n file_id + '.xml'),\n content=to_xml(pcgts))\n \n def _process_segment(self, segment, image, coords, where, page_id, file_id):\n features = coords['features'] # features already applied to image\n features += ',normalized'\n method = self.parameter['method']\n LOG.debug(\"processing %s image size %s mode %s with method %s\",\n coords['features'], str(image.size), str(image.mode), method)\n if image.mode == 'RGBA':\n image = image.convert('RGB')\n elif image.mode == 'LA':\n image = image.convert('L')\n rgb = image.mode == 'RGB'\n array = img_as_float(image)\n # Estimate the noise standard deviation across color channels.\n pctiles = np.percentile(array, (0.2, 99.8), axis=(0, 1))\n LOG.debug(\"2‰ percentiles before: %s\", pctiles)\n if method == 'stretch':\n @adapt_rgb(hsv_value)\n def normalize(a):\n # defaults: stretch from in_range='image' to out_range='dtype'\n v_min, v_max = np.percentile(a, (1.0, 99.0))\n return rescale_intensity(a, in_range=(v_min, v_max))\n array = normalize(array)\n elif method == 'adapthist':\n # (implicitly does hsv_value when RGB)\n # defaults: tiles with kernel_size 1/8 width and height\n array = equalize_adapthist(array)\n pctiles = np.percentile(array, (0.2, 99.8), axis=(0, 1))\n LOG.debug(\"2‰ percentiles after: %s\", pctiles)\n if image.mode in ['F', 'I']:\n array = img_as_uint(array)\n else:\n array = img_as_ubyte(array)\n image = Image.fromarray(array)\n # annotate results\n file_path = self.workspace.save_image_file(\n image,\n file_id,\n file_grp=self.output_file_grp,\n page_id=page_id)\n segment.add_AlternativeImage(AlternativeImageType(\n filename=file_path, comments=features))\n LOG.debug(\"Normalized image for %s saved as '%s'\", where, file_path)\n", "id": "364388", "language": "Python", "matching_score": 7.950172424316406, "max_stars_count": 0, "path": "ocrd_wrap/skimage_normalize.py" }, { "content": "from __future__ import absolute_import\n\nimport os.path\nfrom os import close, unlink, makedirs\nfrom tempfile import mkstemp\nimport subprocess\nfrom PIL import Image\n\nfrom ocrd import Processor\nfrom ocrd_utils import (\n getLogger,\n make_file_id,\n assert_file_grp_cardinality,\n MIMETYPE_PAGE,\n MIME_TO_PIL,\n MIME_TO_EXT\n)\nfrom ocrd_modelfactory import page_from_file\nfrom ocrd_models.ocrd_page import (\n LabelType, LabelsType,\n MetadataItemType,\n AlternativeImageType,\n to_xml\n)\nfrom .config import OCRD_TOOL\n\nTOOL = 'ocrd-preprocess-image'\nLOG = getLogger('processor.ShellPreprocessor')\n\nclass ShellPreprocessor(Processor):\n\n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]\n kwargs['version'] = OCRD_TOOL['version']\n super(ShellPreprocessor, self).__init__(*args, **kwargs)\n \n def process(self):\n \"\"\"Performs coords-preserving image operations via runtime shell calls anywhere.\n \n Open and deserialize PAGE input files and their respective images,\n then iterate over the element hierarchy down to the requested\n ``level-of-operation`` in the element hierarchy.\n \n For each segment element, retrieve a segment image according to\n the layout annotation (from an existing AlternativeImage, or by\n cropping via coordinates into the higher-level image, and -\n when applicable - deskewing.\n \n If ``input_feature_selector`` and/or ``input_feature_filter`` is\n non-empty, then select/filter among the @imageFilename image and\n the available AlternativeImages the last one which contains all\n of the selected, but none of the filtered features (i.e. @comments\n classes), or raise an error.\n \n Then write that image into a temporary PNG file, create a new METS file ID\n for the result image (based on the segment ID and the operation to be run),\n along with a local path for it, and pass ``command`` to the shell\n after replacing:\n - the string ``@INFILE`` with that input image path, and\n - the string ``@OUTFILE`` with that output image path.\n \n If the shell returns with a failure, skip that segment with an\n approriate error message.\n Otherwise, add the new image to the workspace along with the\n output fileGrp, and using a file ID with suffix ``.IMG-``,\n and further identification of the input element.\n \n Reference it as AlternativeImage in the element,\n adding ``output_feature_added`` to its @comments.\n \n Produce a new PAGE output file by serialising the resulting hierarchy.\n \"\"\"\n oplevel = self.parameter['level-of-operation']\n feature_selector = self.parameter['input_feature_selector']\n feature_filter = self.parameter['input_feature_filter']\n command = self.parameter['command']\n if '@INFILE' not in command:\n raise Exception(\"command parameter requires @INFILE pattern\")\n if '@OUTFILE' not in command:\n raise Exception(\"command parameter requires @OUTFILE pattern\")\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n \n for (n, input_file) in enumerate(self.input_files):\n file_id = make_file_id(input_file, self.output_file_grp)\n page_id = input_file.pageId or input_file.ID\n LOG.info(\"INPUT FILE %i / %s\", n, page_id)\n pcgts = page_from_file(self.workspace.download_file(input_file))\n page = pcgts.get_Page()\n metadata = pcgts.get_Metadata() # ensured by from_file()\n metadata.add_MetadataItem(\n MetadataItemType(type_=\"processingStep\",\n name=self.ocrd_tool['steps'][0],\n value=TOOL,\n Labels=[LabelsType(\n externalModel=\"ocrd-tool\",\n externalId=\"parameters\",\n Label=[LabelType(type_=name,\n value=self.parameter[name])\n for name in self.parameter.keys()])]))\n\n for page in [page]:\n page_image, page_coords, _ = self.workspace.image_from_page(\n page, page_id,\n feature_filter=feature_filter, feature_selector=feature_selector)\n if oplevel == 'page':\n self._process_segment(page, page_image, page_coords,\n \"page '%s'\" % page_id, input_file.pageId,\n file_id)\n continue\n regions = page.get_AllRegions(classes=['Text'])\n if not regions:\n LOG.warning(\"Page '%s' contains no text regions\", page_id)\n for region in regions:\n region_image, region_coords = self.workspace.image_from_segment(\n region, page_image, page_coords,\n feature_filter=feature_filter, feature_selector=feature_selector)\n if oplevel == 'region':\n self._process_segment(region, region_image, region_coords,\n \"region '%s'\" % region.id, None,\n file_id + '_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n LOG.warning(\"Region '%s' contains no text lines\", region.id)\n for line in lines:\n line_image, line_coords = self.workspace.image_from_segment(\n line, region_image, region_coords,\n feature_filter=feature_filter, feature_selector=feature_selector)\n if oplevel == 'line':\n self._process_segment(line, line_image, line_coords,\n \"line '%s'\" % line.id, None,\n file_id + '_' + line.id)\n continue\n words = line.get_Word()\n if not words:\n LOG.warning(\"Line '%s' contains no words\", line.id)\n for word in words:\n word_image, word_coords = self.workspace.image_from_segment(\n word, line_image, line_coords,\n feature_filter=feature_filter, feature_selector=feature_selector)\n if oplevel == 'word':\n self._process_segment(word, word_image, word_coords,\n \"word '%s'\" % word.id, None,\n file_id + '_' + word.id)\n continue\n glyphs = word.get_Glyph()\n if not glyphs:\n LOG.warning(\"Word '%s' contains no glyphs\", word.id)\n for glyph in glyphs:\n glyph_image, glyph_coords = self.workspace.image_from_segment(\n glyph, word_image, word_coords,\n feature_filter=feature_filter, feature_selector=feature_selector)\n self._process_segment(glyph, glyph_image, glyph_coords,\n \"glyph '%s'\" % glyph.id, None,\n file_id + '_' + glyph.id)\n \n pcgts.set_pcGtsId(file_id)\n self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n mimetype=MIMETYPE_PAGE,\n local_filename=os.path.join(self.output_file_grp,\n file_id + '.xml'),\n content=to_xml(pcgts))\n \n def _process_segment(self, segment, image, coords, where, page_id, file_id):\n features = coords['features'] # features already applied to image\n feature_added = self.parameter['output_feature_added']\n if feature_added:\n features += ',' + feature_added\n command = self.parameter['command']\n input_mime = self.parameter['input_mimetype']\n output_mime = self.parameter['output_mimetype']\n # save retrieved segment image to temporary file\n in_fd, in_fname = mkstemp(suffix=file_id + MIME_TO_EXT[input_mime])\n image.save(in_fname, format=MIME_TO_PIL[input_mime])\n # prepare output file name\n out_id = file_id + '.IMG-' + feature_added.upper().replace(',', '-')\n out_fname = os.path.join(self.output_file_grp,\n out_id + MIME_TO_EXT[output_mime])\n if not os.path.exists(self.output_file_grp):\n makedirs(self.output_file_grp)\n # remove quotation around filename patterns, if any\n command = command.replace('\"@INFILE\"', '@INFILE')\n command = command.replace('\"@OUTFILE\"', '@OUTFILE')\n command = command.replace(\"'@INFILE'\", '@INFILE')\n command = command.replace(\"'@OUTFILE'\", '@OUTFILE')\n # replace filename patterns with actual paths, quoted\n command = command.replace('@INFILE', '\"' + in_fname + '\"')\n command = command.replace('@OUTFILE', '\"' + out_fname + '\"')\n # execute command pattern\n LOG.debug(\"Running command: '%s'\", command)\n # pylint: disable=subprocess-run-check\n result = subprocess.run(command, shell=True,\n universal_newlines=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n close(in_fd)\n unlink(in_fname)\n LOG.debug(\"Command for %s returned: %d\", where, result.returncode)\n if result.stdout:\n LOG.info(\"Command for %s stdout: %s\", where, result.stdout)\n if result.stderr:\n LOG.warning(\"Command for %s stderr: %s\", where, result.stderr)\n if result.returncode != 0:\n LOG.error(\"Command for %s failed\", where)\n if os.path.exists(out_fname):\n unlink(out_fname)\n return\n # check resulting image\n with Image.open(out_fname) as image2:\n if image.size != image2.size:\n LOG.error(\"Command for %s produced image of different size (%s vs %s)\",\n where, str(image.size), str(image2.size))\n return\n # annotate results\n self.workspace.add_file(\n ID=out_id,\n local_filename=out_fname,\n file_grp=self.output_file_grp,\n pageId=page_id,\n mimetype=output_mime)\n LOG.info(\"created file ID: %s, file_grp: %s, path: %s\",\n out_id, self.output_file_grp, out_fname)\n segment.add_AlternativeImage(AlternativeImageType(\n filename=out_fname, comments=features))\n", "id": "6454564", "language": "Python", "matching_score": 4.426897048950195, "max_stars_count": 0, "path": "ocrd_wrap/shell.py" }, { "content": "from __future__ import absolute_import\n\nfrom tesserocr import PyTessBaseAPI, PSM, get_languages\nfrom ocrd.utils import getLogger, mets_file_id, xywh_from_points\nfrom ocrd.model.ocrd_page import from_file, to_xml, TextEquivType\nfrom ocrd import Processor, MIMETYPE_PAGE\nfrom ocrd_tesserocr.config import TESSDATA_PREFIX\n\nlog = getLogger('processor.TesserocrRecognize')\n\nDEFAULT_MODEL = get_languages()[1][-1]\n\nclass TesserocrRecognize(Processor):\n\n def process(self):\n \"\"\"\n Performs the (text) recognition.\n \"\"\"\n print(self.parameter)\n with PyTessBaseAPI(path=TESSDATA_PREFIX, lang=DEFAULT_MODEL) as tessapi:\n log.info(\"Using model %s in %s for recognition\", get_languages()[0], get_languages()[1][-1])\n for (n, input_file) in enumerate(self.input_files):\n log.info(\"INPUT FILE %i / %s\", n, input_file)\n pcgts = from_file(self.workspace.download_file(input_file))\n # TODO use binarized / gray\n pil_image = self.workspace.resolve_image_as_pil(pcgts.get_Page().imageFilename)\n tessapi.SetImage(pil_image)\n # TODO slow\n # tessapi.SetPageSegMode(PSM.SINGLE_LINE)\n log.info(\"page %s\", pcgts)\n for region in pcgts.get_Page().get_TextRegion():\n textlines = region.get_TextLine()\n log.info(\"About to recognize text in %i lines of region '%s'\", len(textlines), region.id)\n for line in textlines:\n log.debug(\"Recognizing text in line '%s'\", line.id)\n xywh = xywh_from_points(line.get_Coords().points)\n tessapi.SetRectangle(xywh['x'], xywh['y'], xywh['w'], xywh['h'])\n # log.debug(\"xywh: %s\", xywh)\n line.add_TextEquiv(TextEquivType(Unicode=tessapi.GetUTF8Text()))\n # tessapi.G\n # print(tessapi.AllWordConfidences())\n ID = mets_file_id(self.output_file_grp, n)\n self.add_output_file(\n ID=ID,\n file_grp=self.output_file_grp,\n basename=ID + '.xml',\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts).encode('utf-8'),\n )\n", "id": "3297177", "language": "Python", "matching_score": 7.1028971672058105, "max_stars_count": 0, "path": "ocrd_tesserocr/recognize.py" }, { "content": "from __future__ import absolute_import\nfrom tesserocr import RIL, PyTessBaseAPI, OEM, PSM\nfrom ocrd import Processor, MIMETYPE_PAGE\nfrom ocrd_tesserocr.config import TESSDATA_PREFIX\nfrom ocrd.utils import getLogger, mets_file_id, points_from_xywh, polygon_from_points, xywh_from_points\nfrom ocrd.model.ocrd_page import (\n CoordsType,\n WordType,\n from_file,\n to_xml\n)\n\nlog = getLogger('processor.TesserocrSegmentWord')\n\nclass TesserocrSegmentWord(Processor):\n\n def process(self):\n \"\"\"\n Performs the line segmentation.\n \"\"\"\n with PyTessBaseAPI(\n psm=PSM.SINGLE_LINE,\n path=TESSDATA_PREFIX,\n ) as tessapi:\n for (n, input_file) in enumerate(self.input_files):\n pcgts = from_file(self.workspace.download_file(input_file))\n image_url = pcgts.get_Page().imageFilename\n for region in pcgts.get_Page().get_TextRegion():\n for line in region.get_TextLine():\n log.debug(\"Detecting words in line '%s'\", line.id)\n image = self.workspace.resolve_image_as_pil(image_url, polygon_from_points(line.get_Coords().points))\n tessapi.SetImage(image)\n offset = xywh_from_points(line.get_Coords().points)\n for (word_no, component) in enumerate(tessapi.GetComponentImages(RIL.WORD, True)):\n word_id = '%s_word%04d' % (line.id, word_no)\n word_xywh = component[1]\n word_xywh['x'] += offset['x']\n word_xywh['y'] += offset['y']\n line.add_Word(WordType(id=word_id, Coords=CoordsType(points_from_xywh(word_xywh))))\n ID = mets_file_id(self.output_file_grp, n)\n self.add_output_file(\n ID=ID,\n file_grp=self.output_file_grp,\n basename=ID + '.xml',\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts).encode('utf-8'),\n )\n", "id": "12748722", "language": "Python", "matching_score": 7.9628753662109375, "max_stars_count": 0, "path": "ocrd_tesserocr/segment_word.py" }, { "content": "from __future__ import absolute_import\nfrom tesserocr import PyTessBaseAPI, RIL\nfrom ocrd import Processor, MIMETYPE_PAGE\nfrom ocrd_tesserocr.config import TESSDATA_PREFIX\nfrom ocrd.utils import getLogger, mets_file_id, points_from_xywh, polygon_from_points, xywh_from_points\nfrom ocrd.model.ocrd_page import (\n CoordsType,\n TextLineType,\n from_file,\n to_xml\n)\n\nlog = getLogger('processor.TesserocrSegmentLine')\n\nclass TesserocrSegmentLine(Processor):\n\n def process(self):\n \"\"\"\n Performs the line segmentation.\n \"\"\"\n with PyTessBaseAPI(path=TESSDATA_PREFIX) as tessapi:\n for (n, input_file) in enumerate(self.input_files):\n pcgts = from_file(self.workspace.download_file(input_file))\n image_url = pcgts.get_Page().imageFilename\n for region in pcgts.get_Page().get_TextRegion():\n log.debug(\"Detecting lines in %s with tesseract\", region.id)\n image = self.workspace.resolve_image_as_pil(image_url, polygon_from_points(region.get_Coords().points))\n tessapi.SetImage(image)\n offset = xywh_from_points(region.get_Coords().points)\n for (line_no, component) in enumerate(tessapi.GetComponentImages(RIL.TEXTLINE, True)):\n line_id = '%s_line%04d' % (region.id, line_no)\n line_xywh = component[1]\n line_xywh['x'] += offset['x']\n line_xywh['y'] += offset['y']\n line_points = points_from_xywh(line_xywh)\n region.add_TextLine(TextLineType(id=line_id, Coords=CoordsType(line_points)))\n ID = mets_file_id(self.output_file_grp, n)\n self.add_output_file(\n ID=ID,\n file_grp=self.output_file_grp,\n basename=ID + '.xml',\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts).encode('utf-8'),\n )\n", "id": "7129251", "language": "Python", "matching_score": 6.520130157470703, "max_stars_count": 0, "path": "ocrd_tesserocr/segment_line.py" }, { "content": "from __future__ import absolute_import\nimport tesserocr\nfrom ocrd.utils import getLogger, mets_file_id, points_from_xywh\nfrom ocrd.model.ocrd_page import (\n ReadingOrderType,\n RegionRefIndexedType,\n TextRegionType,\n CoordsType,\n OrderedGroupType,\n from_file,\n to_xml\n)\nfrom ocrd import Processor, MIMETYPE_PAGE\nfrom ocrd_tesserocr.config import TESSDATA_PREFIX\n\nlog = getLogger('processor.TesserocrSegmentRegion')\n\nclass TesserocrSegmentRegion(Processor):\n\n def process(self):\n \"\"\"\n Performs the region segmentation.\n \"\"\"\n with tesserocr.PyTessBaseAPI(path=TESSDATA_PREFIX) as tessapi:\n print(self.input_file_grp)\n for (n, input_file) in enumerate(self.input_files):\n pcgts = from_file(self.workspace.download_file(input_file))\n image = self.workspace.resolve_image_as_pil(pcgts.get_Page().imageFilename)\n log.debug(\"Detecting regions with tesseract\")\n tessapi.SetImage(image)\n for component in tessapi.GetComponentImages(tesserocr.RIL.BLOCK, True):\n points, index = points_from_xywh(component[1]), component[2]\n\n #\n # the region reference in the reading order element\n #\n ID = \"region%04d\" % index\n log.debug(\"Detected region '%s': %s\", ID, points)\n # <pg:ReadingOrder>\n ro = pcgts.get_Page().get_ReadingOrder()\n if ro is None:\n ro = ReadingOrderType()\n pcgts.get_Page().set_ReadingOrder(ro)\n # <pg:OrderedGroup>\n og = ro.get_OrderedGroup()\n if og is None:\n og = OrderedGroupType(id=\"reading-order\")\n ro.set_OrderedGroup(og)\n # <pg:RegionRefIndexed>\n og.add_RegionRefIndexed(RegionRefIndexedType(regionRef=ID, index=index))\n\n #\n # text region\n #\n pcgts.get_Page().add_TextRegion(TextRegionType(id=ID, Coords=CoordsType(points=points)))\n\n ID = mets_file_id(self.output_file_grp, n)\n self.add_output_file(\n ID=ID,\n file_grp=self.output_file_grp,\n basename=ID + '.xml',\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts).encode('utf-8'),\n )\n", "id": "6358084", "language": "Python", "matching_score": 1.9493286609649658, "max_stars_count": 0, "path": "ocrd_tesserocr/segment_region.py" }, { "content": "import os\nimport tesserocr\nTESSDATA_PREFIX = os.environ['TESSDATA_PREFIX'] if 'TESSDATA_PREFIX' in os.environ else tesserocr.get_languages()[0]\n", "id": "5210711", "language": "Python", "matching_score": 0.5659877061843872, "max_stars_count": 0, "path": "ocrd_tesserocr/config.py" }, { "content": "import os\n\nimport click\nfrom jinja2 import Environment, FileSystemLoader\n\n\nfrom qurator.dinglehopper import *\n\n\ndef gen_diff_report(gt_things, ocr_things, css_prefix, joiner, none, align):\n gtx = ''\n ocrx = ''\n\n def format_thing(t, css_classes=None):\n if t is None:\n t = none\n css_classes += ' ellipsis'\n if t == '\\n':\n t = '<br>'\n\n if css_classes:\n return '<span class=\"{css_classes}\">{t}</span>'.format(css_classes=css_classes, t=t)\n else:\n return '{t}'.format(t=t)\n\n for k, (g, o) in enumerate(align(gt_things, ocr_things)):\n if g == o:\n css_classes = None\n else:\n css_classes = '{css_prefix}diff{k} diff'.format(css_prefix=css_prefix, k=k)\n\n gtx += joiner + format_thing(g, css_classes)\n ocrx += joiner + format_thing(o, css_classes)\n\n return \\\n '''\n <div class=\"row\">\n <div class=\"col-md-6 gt\">{}</div>\n <div class=\"col-md-6 ocr\">{}</div>\n </div>\n '''.format(gtx, ocrx)\n\n\ndef process(gt, ocr, report_prefix):\n \"\"\"Check OCR result against GT.\n\n The @click decorators change the signature of the decorated functions, so we keep this undecorated version and use\n Click on a wrapper.\n \"\"\"\n\n gt_text = text(gt)\n ocr_text = text(ocr)\n\n gt_text = substitute_equivalences(gt_text)\n ocr_text = substitute_equivalences(ocr_text)\n\n cer = character_error_rate(gt_text, ocr_text)\n wer = word_error_rate(gt_text, ocr_text)\n uwer = unordered_word_error_rate(gt_text, ocr_text)\n\n char_diff_report = gen_diff_report(gt_text, ocr_text, css_prefix='c', joiner='', none='·', align=align)\n\n gt_words = words_normalized(gt_text)\n ocr_words = words_normalized(ocr_text)\n word_diff_report = gen_diff_report(gt_words, ocr_words, css_prefix='w', joiner=' ', none='⋯', align=seq_align)\n\n env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')))\n for report_suffix in ('.html', '.json'):\n template_fn = 'report' + report_suffix + '.j2'\n out_fn = report_prefix + report_suffix\n\n template = env.get_template(template_fn)\n template.stream(\n gt=gt, ocr=ocr,\n cer=cer, wer=wer, uwer=uwer,\n char_diff_report=char_diff_report,\n word_diff_report=word_diff_report\n ).dump(out_fn)\n\n\[email protected]()\[email protected]('gt', type=click.Path(exists=True))\[email protected]('ocr', type=click.Path(exists=True))\[email protected]('report_prefix', type=click.Path(), default='report')\ndef main(gt, ocr, report_prefix):\n process(gt, ocr, report_prefix)\n\n\nif __name__ == '__main__':\n main()\n", "id": "5345030", "language": "Python", "matching_score": 0.8503869771957397, "max_stars_count": 0, "path": "qurator/dinglehopper/cli.py" }, { "content": "from ocrd_cor_asv_fst.lib.helper import transducer_from_dict\n\nimport pynini\nimport unittest\n\n\nclass HelperTest(unittest.TestCase):\n\n def test_transducer_from_dict(self):\n testdata_in = { 'abc' : 3.5, 'düh' : 5.7, 'a\\u0364hi' : 9.2 }\n tr = transducer_from_dict(testdata_in)\n self.assertIsInstance(tr, pynini.Fst)\n testdata_out = { str_in : float(weight) \\\n for str_in, str_out, weight in tr.paths().items() }\n for key in set(testdata_in.keys()) | set(testdata_out.keys()):\n self.assertAlmostEqual(\n testdata_in[key], testdata_out[key], places=5)\n\n", "id": "6791938", "language": "Python", "matching_score": 0.994768500328064, "max_stars_count": 11, "path": "tests/helper.py" }, { "content": "from collections import namedtuple\nimport logging\nimport networkx as nx\nfrom operator import itemgetter\nimport pynini\nimport time\n\nfrom ocrd_models.ocrd_page import TextEquivType\n\nfrom .helper import escape_for_pynini\n\n\ndef _print_paths(paths):\n paths_lst = [(output_str, float(weight)) \\\n for input_str, output_str, weight in paths.items()]\n paths_lst.sort(key=itemgetter(1))\n if not paths_lst:\n logging.debug('no paths!')\n else:\n for p in paths_lst:\n logging.debug('{}\\t{}'.format(*p))\n\n\ndef split_input_string(string):\n '''Split an input string around whitespaces.'''\n return string.split()\n\n\ndef create_window(tokens):\n '''\n Create a window for the given input tokens (supplied as a list of\n strings).\n '''\n result = pynini.acceptor(escape_for_pynini(' '.join(tokens)))\n return result\n\n\ndef process_window(input_str, window_fst, model,\n beam_width=5, rejection_weight=1.5):\n '''\n Compose a window input automaton with the model.\n '''\n t1 = time.time()\n window_fst.relabel_tables(\n new_isymbols=model[0].output_symbols(),\n new_osymbols=model[0].output_symbols())\n for fst in model:\n window_fst = pynini.compose(window_fst, fst)\n window_fst.project(project_output=True)\n window_fst.prune(weight=beam_width)\n window_fst.optimize()\n t3 = time.time()\n logging.debug('- composition: {}s'.format(t3-t1))\n # allow also identity for windows of length 1\n # (with weight `rejection_weight`)\n if ' ' not in input_str:\n # The formula:\n # rejection_weight*(len(input_str)+2)\n # means that rejection_weight*2 is the initial cost of having an OOV\n # word (which is than more expensive with increasing length).\n # While discovered by accident, this turned out to work well as\n # a very naive OOV word model.\n window_fst.union(\n pynini.acceptor(\n escape_for_pynini(input_str),\n weight=rejection_weight*(len(input_str)+2)))\n t2 = time.time()\n logging.debug('Total processing time: {}s'.format(t2-t1))\n return window_fst\n\n\ndef recombine_windows(window_fsts):\n '''\n Recombine processed window FSTs (containing hypotheses for a given\n window) to a lattice, which is also represented as an FST.\n '''\n\n def _label(pos, length):\n return 'WIN-{}-{}'.format(pos, length)\n \n t1 = time.time()\n space_tr = pynini.acceptor(' ')\n\n # determine the input string length and max. window size\n # (TODO without iterating!!!)\n num_tokens = max(i for (i, j) in window_fsts)+1\n max_window_size = max(j for (i, j) in window_fsts)\n\n root = pynini.Fst()\n for i in range(num_tokens+1):\n s = root.add_state()\n root.set_start(0)\n root.set_final(num_tokens, 0)\n\n # FIXME refactor the merging of symbol tables into a separate function\n symbol_table = pynini.SymbolTable()\n for window_fst in window_fsts.values():\n symbol_table = pynini.merge_symbol_table(symbol_table, window_fst.input_symbols())\n symbol_table = pynini.merge_symbol_table(symbol_table, window_fst.output_symbols())\n for (pos, length), window_fst in window_fsts.items():\n label = _label(pos, length)\n sym = symbol_table.add_symbol(label)\n\n root.set_input_symbols(symbol_table)\n root.set_output_symbols(symbol_table)\n\n replacements = []\n for (pos, length), window_fst in window_fsts.items():\n label = _label(pos, length)\n sym = root.output_symbols().find(label)\n if pos+length < num_tokens:\n # append a space if this is not the last token, so that the final\n # string consists of tokens separated by spaces\n window_fst.concat(space_tr)\n replacements.append((label, window_fst))\n root.add_arc(pos, pynini.Arc(0, sym, 0, pos+length))\n\n result = pynini.replace(root, replacements)\n result.optimize()\n\n t2 = time.time()\n logging.debug('Recombining time: {}s'.format(t2-t1))\n\n return result\n\n\ndef lexicon_to_window_fst(lexicon_fst, words_per_window=2):\n '''\n Concatenate the lexicon FST `words_per_window` times, inserting\n spaces in between. The resulting FST accepts up to\n `words_per_window` words from the lexicon.\n '''\n result = lexicon_fst.copy()\n if words_per_window == 1:\n return result\n result.concat(pynini.acceptor(' '))\n result.closure(0, words_per_window-1)\n result.concat(lexicon_fst)\n return result\n\n\ndef lattice_shortest_path(lattice_fst):\n '''\n Extract the shortest path (i.e. with the lowest weight) from a\n lattice of hypotheses represented as an FST.\n '''\n return pynini.shortestpath(lattice_fst).stringify()\n\n\ndef combine_windows_to_graph(windows):\n '''\n Combine windows FSTs containing hypotheses for given windows to a\n graph of hypotheses in `nx.DiGraph` format, with decoding\n alternatives represented as `TextEquivType` at the edges. This is\n suitable for decoding data supplied in PageXML input format.\n\n The windows are passed as a dictionary:\n (starting_position, length) -> window_fst\n '''\n graph = nx.DiGraph()\n line_end_node = max(i+j for i, j in windows)\n graph.add_nodes_from(range(line_end_node + 1))\n for (i, j), fst in windows.items():\n start_node = i\n end_node = i + j\n paths = [(output_str, float(weight)) \\\n for input_str, output_str, weight in \\\n fst.paths().items()]\n if paths:\n for path in paths:\n logging.debug('({}, {}, \\'{}\\', {})'.format(\\\n start_node, end_node, path[0], pow(2, -path[1])))\n graph.add_edge(\n start_node, end_node, element=None,\n alternatives=[\n TextEquivType(Unicode=path[0], conf=pow(2, -path[1])) \\\n for path in paths \\\n ])\n else:\n logging.warning('No path from {} to {}.'.format(i, i+j))\n return graph\n\n\nclass FSTLatticeGenerator:\n '''\n This is the class responsible for generating lattices from input\n strings using the FST error model and lexicon. The lattices may be\n returned in two different output formats:\n - FST -- This allows for very fast search of a best path. It is the\n preferred format if no rescoring (with a language model) is\n applied afterwards.\n - networkx -- This returns the lattice as a `networkx.DiGraph`. It\n is slower, but allows for rescoring.\n The output format has to be passed to the constructor, because\n working with two formats simultaneously is never necessary.\n '''\n\n def __init__(self, lexicon_file, error_model_file = None,\n lattice_format = 'fst', **kwargs):\n # load all transducers and build a model out of them\n self.lexicon_fst = pynini.Fst.read(lexicon_file)\n self.window_fst = lexicon_to_window_fst(\n self.lexicon_fst,\n kwargs['words_per_window'])\n self.window_fst.arcsort()\n self.error_fst = pynini.Fst.read(error_model_file) \\\n if error_model_file \\\n else None\n self.rejection_weight = kwargs['rejection_weight']\n self.beam_width = kwargs['beam_width']\n self.max_window_size = 2 # TODO expose as a parameter\n self.lattice_format = lattice_format\n\n def lattice_from_string(self, string):\n windows = {}\n tokens = split_input_string(string)\n for i in range(len(tokens)):\n for j in range(1, min(self.max_window_size+1, len(tokens)-i+1)):\n windows[(i,j)] = create_window(tokens[i:i+j])\n # compose each window with the model\n for (i, j) in windows:\n logging.debug('Processing window ({}, {})'.format(i, j))\n windows[(i,j)] = process_window(\n ' '.join(tokens[i:i+j]),\n windows[(i,j)],\n (self.error_fst, self.window_fst),\n beam_width=self.beam_width,\n rejection_weight=self.rejection_weight)\n _print_paths(windows[(i,j)].paths())\n\n # recombine the windows to a lattice represented in the desired format\n if self.lattice_format == 'fst':\n return recombine_windows(windows)\n elif self.lattice_format == 'networkx':\n return combine_windows_to_graph(windows)\n else:\n raise RuntimeError('Invaild lattice format: {}'\\\n .format(self.lattice_format))\n\n", "id": "11795990", "language": "Python", "matching_score": 2.864187240600586, "max_stars_count": 0, "path": "ocrd_cor_asv_fst/lib/latticegen.py" }, { "content": "from __future__ import absolute_import\n\nimport os\nfrom functools import reduce\nimport numpy as np\n\nfrom ocrd import Processor\nfrom ocrd_utils import getLogger, concat_padded, xywh_from_points, points_from_xywh, MIMETYPE_PAGE\nfrom ocrd_modelfactory import page_from_file\nfrom ocrd_models.ocrd_page import (\n to_xml,\n WordType, CoordsType, TextEquivType,\n MetadataItemType, LabelsType, LabelType\n)\n\nfrom .config import OCRD_TOOL\nfrom ..lib.seq2seq import Sequence2Sequence, GAP\n\nLOG = getLogger('processor.ANNCorrection')\nTOOL_NAME = 'ocrd-cor-asv-ann-process'\n\nclass ANNCorrection(Processor):\n \n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL_NAME]\n kwargs['version'] = OCRD_TOOL['version']\n super(ANNCorrection, self).__init__(*args, **kwargs)\n if not hasattr(self, 'workspace') or not self.workspace:\n # no parameter/workspace for --dump-json or --version (no processing)\n return\n \n if not 'TF_CPP_MIN_LOG_LEVEL' in os.environ:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n \n self.s2s = Sequence2Sequence(logger=LOG, progbars=True)\n self.s2s.load_config(self.parameter['model_file'])\n self.s2s.configure()\n self.s2s.load_weights(self.parameter['model_file'])\n \n def process(self):\n \"\"\"Perform OCR post-correction with encoder-attention-decoder ANN on the workspace.\n \n Open and deserialise PAGE input files, then iterate over the element hierarchy\n down to the requested `textequiv_level`, making sequences of TextEquiv objects\n as lists of lines. Concatenate their string values, obeying rules of implicit\n whitespace, and map the string positions where the objects start.\n \n Next, transcode the input lines into output lines in parallel, and use\n the retrieved soft alignment scores to calculate hard alignment paths\n between input and output string via Viterbi decoding. Then use those\n to map back the start positions and overwrite each TextEquiv with its\n new content, paying special attention to whitespace:\n \n Distribute edits such that whitespace objects cannot become more than whitespace\n (or be deleted) and that non-whitespace objects must not start of end with\n whitespace (but may contain new whitespace in the middle).\n \n Subsequently, unless processing on the `line` level, make the Word segmentation\n consistent with that result again: merge around deleted whitespace tokens and\n split at whitespace inside non-whitespace tokens.\n \n Finally, make the levels above `textequiv_level` consistent with that\n textual result (by concatenation joined by whitespace).\n \n Produce new output files by serialising the resulting hierarchy.\n \"\"\"\n # Dragging Word/TextLine references along in all lists besides TextEquiv\n # is necessary because the generateDS version of the PAGE-XML model\n # has no references upwards in the hierarchy (from TextEquiv to containing\n # elements, from Glyph/Word/TextLine to Word/TextLine/TextRegion), and\n # its classes are not hashable.\n level = self.parameter['textequiv_level']\n for n, input_file in enumerate(self.input_files):\n LOG.info(\"INPUT FILE %i / %s\", n, input_file)\n pcgts = page_from_file(self.workspace.download_file(input_file))\n LOG.info(\"Correcting text in page '%s' at the %s level\", pcgts.get_pcGtsId(), level)\n \n # annotate processing metadata:\n metadata = pcgts.get_Metadata() # ensured by from_file()\n metadata.add_MetadataItem(\n MetadataItemType(type_=\"processingStep\",\n name=OCRD_TOOL['tools'][TOOL_NAME]['steps'][0],\n value=TOOL_NAME,\n # FIXME: externalRef is invalid by pagecontent.xsd, but ocrd does not reflect this\n # what we want here is `externalModel=\"ocrd-tool\" externalId=\"parameters\"`\n Labels=[LabelsType(#externalRef=\"parameters\",\n Label=[LabelType(type_=name,\n value=self.parameter[name])\n for name in self.parameter.keys()])]))\n \n # get textequiv references for all lines:\n # FIXME: conf with TextEquiv alternatives\n line_sequences = _page_get_line_sequences_at(level, pcgts)\n\n # concatenate to strings and get dict of start positions to refs:\n input_lines, conf, textequiv_starts, word_starts, textline_starts = (\n _line_sequences2string_sequences(self.s2s.mapping[0], line_sequences))\n \n # correct string and get input-output alignment:\n # FIXME: split into self.batch_size chunks\n output_lines, output_probs, output_scores, alignments = (\n self.s2s.correct_lines(input_lines, conf, fast=True, greedy=True))\n \n # re-align (from alignment scores) and overwrite the textequiv references:\n for (input_line, output_line, output_prob,\n score, alignment,\n textequivs, words, textlines) in zip(\n input_lines, output_lines, output_probs,\n output_scores, alignments,\n textequiv_starts, word_starts, textline_starts):\n LOG.debug('\"%s\" -> \"%s\"', input_line.rstrip('\\n'), output_line.rstrip('\\n'))\n \n # convert soft scores (seen from output) to hard path (seen from input):\n realignment = _alignment2path(alignment, len(input_line), len(output_line),\n 1. / self.s2s.voc_size)\n \n # overwrite TextEquiv references:\n new_sequence = _update_sequence(\n input_line, output_line, output_prob,\n score, realignment,\n textequivs, words, textlines)\n \n # update Word segmentation:\n if level != 'line':\n _resegment_sequence(new_sequence, level)\n \n LOG.info('corrected line with %d elements, ppl: %.3f', len(new_sequence), np.exp(score))\n \n # make higher levels consistent again:\n page_update_higher_textequiv_levels(level, pcgts)\n \n # write back result to new annotation:\n file_id = concat_padded(self.output_file_grp, n)\n self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n local_filename=os.path.join(self.output_file_grp, file_id + '.xml'), # with suffix or bare?\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n \ndef _page_get_line_sequences_at(level, pcgts):\n '''Get TextEquiv sequences for PAGE-XML hierarchy level including whitespace.\n \n Return a list of lines from the document `pcgts`,\n where each line is a list of 3-tuples containing\n TextEquiv / Word / TextLine objects from the given\n hierarchy `level`. This includes artificial objects\n for implicit whitespace between elements (marked by\n `index=-1`, which is forbidden in the XML Schema).\n \n (If `level` is `glyph`, then the Word reference\n will be the Word that contains the Glyph which\n contains the TextEquiv.\n If `level` is `word`, then the Word reference\n will be the Word which contains the TextEquiv.\n If `level` is `line`, then the Word reference\n will be None.)\n '''\n sequences = list()\n word = None # make accessible after loop\n line = None # make accessible after loop\n regions = pcgts.get_Page().get_TextRegion()\n if not regions:\n LOG.warning(\"Page contains no text regions\")\n first_region = True\n for region in regions:\n lines = region.get_TextLine()\n if not lines:\n LOG.warning(\"Region '%s' contains no text lines\", region.id)\n continue\n if not first_region:\n sequences[-1].append((TextEquivType(Unicode='\\n', conf=1.0, index=-1), word, line))\n first_region = False\n first_line = True\n for line in lines:\n if not first_line:\n sequences[-1].append((TextEquivType(Unicode='\\n', conf=1.0, index=-1), word, line))\n sequences.append([])\n first_line = False\n if level == 'line':\n #LOG.debug(\"Getting text in line '%s'\", line.id)\n textequivs = line.get_TextEquiv()\n if not textequivs:\n LOG.warning(\"Line '%s' contains no text results\", line.id)\n continue\n sequences[-1].append((textequivs[0], word, line))\n continue\n words = line.get_Word()\n if not words:\n LOG.warning(\"Line '%s' contains no word\", line.id)\n continue\n first_word = True\n for word in words:\n if not first_word:\n sequences[-1].append((TextEquivType(Unicode=' ', conf=1.0, index=-1), word, line))\n first_word = False\n if level == 'word':\n #LOG.debug(\"Getting text in word '%s'\", word.id)\n textequivs = word.get_TextEquiv()\n if not textequivs:\n LOG.warning(\"Word '%s' contains no text results\", word.id)\n continue\n sequences[-1].append((textequivs[0], word, line))\n continue\n glyphs = word.get_Glyph()\n if not glyphs:\n LOG.warning(\"Word '%s' contains no glyphs\", word.id)\n continue\n for glyph in glyphs:\n #LOG.debug(\"Getting text in glyph '%s'\", glyph.id)\n textequivs = glyph.get_TextEquiv()\n if not textequivs:\n LOG.warning(\"Glyph '%s' contains no text results\", glyph.id)\n continue\n sequences[-1].append((textequivs[0], word, line))\n if sequences:\n sequences[-1].append((TextEquivType(Unicode='\\n', conf=1.0, index=-1), word, line))\n # filter empty lines (containing only newline):\n return [line for line in sequences if len(line) > 1]\n\ndef _line_sequences2string_sequences(mapping, line_sequences):\n '''Concatenate TextEquiv / Word / TextLine sequences to line strings.\n \n Return a list of line strings, a list of confidence lists,\n a list of dicts from string positions to TextEquiv references,\n a list of dicts from string positions to Word references, and\n a list of dicts from string positions to TextLine references.\n '''\n input_lines, conf, textequiv_starts, word_starts, textline_starts = [], [], [], [], []\n for line_sequence in line_sequences:\n i = 0\n input_lines.append('')\n conf.append(list())\n textequiv_starts.append(dict())\n word_starts.append(dict())\n textline_starts.append(dict())\n for textequiv, word, textline in line_sequence:\n textequiv_starts[-1][i] = textequiv\n word_starts[-1][i] = word\n textline_starts[-1][i] = textline\n j = len(textequiv.Unicode)\n if not textequiv.Unicode:\n # empty element (OCR rejection):\n # this information is still valuable for post-correction,\n # and we reserved index zero for underspecified inputs,\n # therefore here we just need to replace the gap with some\n # unmapped character, like GAP:\n assert GAP not in mapping, (\n 'character \"%s\" must not be mapped (needed for gap repair)' % GAP)\n textequiv.Unicode = GAP\n j = 1\n input_lines[-1] += textequiv.Unicode\n # generateDS does not convert simpleType for attributes (yet?)\n conf[-1].extend([float(textequiv.conf or \"1.0\")] * j)\n i += j\n return input_lines, conf, textequiv_starts, word_starts, textline_starts\n\ndef _alignment2path(alignment, i_max, j_max, min_score):\n '''Find the best path through a soft alignment matrix via Viterbi search.\n \n The `alignment` is a list of vectors of scores (between 0..1).\n The list indexes are output positions (ignored above `j_max`),\n the vector indexes are input positions (ignored above `i_max`).\n Viterbi forward scores are only calculated where the alignment\n scores are larger than `min_score` (to save time).\n \n Return a dictionary mapping input positions to output positions\n (i.e. a realignment path).\n '''\n # compute Viterbi forward pass:\n viterbi_fw = np.zeros((i_max, j_max), dtype=np.float32)\n i, j = 0, 0\n while i < i_max and j < j_max:\n if i > 0:\n im1 = viterbi_fw[i - 1, j]\n else:\n im1 = 0\n if j > 0:\n jm1 = viterbi_fw[i, j - 1]\n else:\n jm1 = 0\n if i > 0 and j > 0:\n ijm1 = viterbi_fw[i - 1, j - 1]\n else:\n ijm1 = 0\n viterbi_fw[i, j] = alignment[j][i] + max(im1, jm1, ijm1)\n while True:\n i += 1\n if i == i_max:\n j += 1\n if j == j_max:\n break\n i = 0\n if alignment[j][i] > min_score:\n break\n # compute Viterbi backward pass:\n i = i_max - 1 if i_max <= j_max else j_max - 2 + int(\n np.argmax(viterbi_fw[j_max - i_max - 2:, j_max - 1]))\n j = j_max - 1 if j_max <= i_max else i_max - 2 + int(\n np.argmax(viterbi_fw[i_max - 1, i_max - j_max - 2:]))\n realignment = {i_max: j_max} # init end of line\n while i >= 0 and j >= 0:\n realignment[i] = j # (overwrites any previous assignment)\n if viterbi_fw[i - 1, j] > viterbi_fw[i, j - 1]:\n if viterbi_fw[i - 1, j] > viterbi_fw[i - 1, j - 1]:\n i -= 1\n else:\n i -= 1\n j -= 1\n elif viterbi_fw[i, j - 1] > viterbi_fw[i - 1, j - 1]:\n j -= 1\n else:\n j -= 1\n i -= 1\n realignment[0] = 0 # init start of line\n # LOG.debug('realignment: %s', str(realignment))\n # from matplotlib import pyplot\n # pyplot.imshow(viterbi_fw)\n # pyplot.show()\n return realignment\n\ndef _update_sequence(input_line, output_line, output_prob,\n score, realignment,\n textequivs, words, textlines):\n '''Apply correction across TextEquiv elements along alignment path of one line.\n \n Traverse the path `realignment` through `input_line` and `output_line`,\n looking up TextEquiv objects by their start positions via `textequivs`.\n Overwrite the string value of the objects (which equals the segment in\n `input_line`) with the corrected version (which equals the segment in\n `output_line`), and overwrite the confidence values from `output_prob`.\n \n Also, redistribute string parts bordering whitespace: make sure space\n only maps to space (or gets deleted, which necessitates merging Words),\n and non-space only maps to non-space (with space allowed only in the\n middle, which necessitates splitting Words). This is required in order\n to avoid loosing content: the implicit whitespace TextEquivs do not\n belong to the document hierarchy itself.\n (Merging and splitting can be done afterwards.)\n \n Return a list of TextEquiv / Word / TextLine tuples thus processed.\n '''\n i_max = len(input_line)\n j_max = len(output_line)\n textequivs.setdefault(i_max, None) # init end of line\n line = next(line for line in textlines.values() if line)\n last = None\n sequence = []\n for i in textequivs:\n if i in realignment:\n j = realignment[i]\n else:\n # this element was deleted\n j = last[1]\n #print(last, [i, j])\n if last:\n input_ = input_line[last[0]:i]\n output = output_line[last[1]:j]\n # try to distribute whitespace onto whitespace, i.e.\n # if input is Whitespace, move any Non-whitespace parts\n # in output to neighbours;\n # otherwise, move Whitespace parts to neighbours\n # if their input is Whitespace too;\n # input: N| W |N N| W | W| N |W\n # output: |<-N W N->| |<-W<-N W | |<-W N W->|\n if input_ in (\" \", \"\\n\"):\n if output and not output.startswith((\" \", \"\\n\")) and sequence:\n while output and not output.startswith((\" \", \"\\n\")):\n sequence[-1][0].Unicode += output[0]\n last[1] += 1\n output = output[1:]\n #print('corrected non-whitespace LHS: ', last, [i, j])\n if output and not output.endswith((\" \", \"\\n\")):\n j -= len(output.split(\" \")[-1])\n output = output_line[last[1]:j]\n #print('corrected non-whitespace RHS: ', last, [i, j])\n if output.split() and sequence:\n while output.split():\n sequence[-1][0].Unicode += output[0]\n last[1] += 1\n output = output[1:]\n #print('corrected non-whitespace middle: ', last, [i, j])\n else:\n if output.startswith(\" \") and sequence and sequence[-1][0].index == -1:\n while output.startswith(\" \"):\n sequence[-1][0].Unicode += output[0]\n last[1] += 1\n output = output[1:]\n #print('corrected whitespace LHS: ', last, [i, j])\n if output.endswith((\" \", \"\\n\")) and i < i_max and input_line[i] in (\" \", \"\\n\"):\n while output.endswith((\" \", \"\\n\")):\n j -= 1\n output = output[:-1]\n #print('corrected whitespace RHS: ', last, [i, j])\n textequiv = textequivs[last[0]]\n assert textequiv.Unicode == input_, (\n 'source element \"%s\" does not match input section \"%s\" in line \"%s\"' % (\n textequiv.Unicode, input_, line.id))\n #print(\"'\" + textequiv.Unicode + \"' -> '\" + output + \"'\")\n textequiv.Unicode = output\n #textequiv.conf = np.exp(-score)\n prob = output_prob[last[1]:j]\n textequiv.conf = np.mean(prob or [1.0])\n word = words[last[0]]\n textline = textlines[last[0]]\n sequence.append((textequiv, word, textline))\n last = [i, j]\n assert last == [i_max, j_max], (\n 'alignment path did not reach top: %d/%d vs %d/%d in line \"%s\"' % (\n last[0], last[1], i_max, j_max, line.id))\n for i, (textequiv, _, _) in enumerate(sequence):\n assert not textequiv.Unicode.split() or textequiv.index != -1, (\n 'output \"%s\" will be lost at (whitespace) element %d in line \"%s\"' % (\n textequiv.Unicode, i, line.id))\n return sequence\n\ndef _resegment_sequence(sequence, level):\n '''Merge and split Words among `sequence` after correction.\n \n At each empty whitespace TextEquiv, merge the neighbouring Words.\n At each non-whitespace TextEquiv which contains whitespace, split\n the containing Word at the respective positions.\n '''\n for i, (textequiv, word, textline) in enumerate(sequence):\n if textequiv.index == -1:\n if not textequiv.Unicode:\n # whitespace was deleted: merge adjacent words\n if i == 0 or i == len(sequence) - 1:\n LOG.error('cannot merge Words at the %s of line \"%s\"',\n 'end' if i else 'start', textline.id)\n else:\n prev_textequiv, prev_word, _ = sequence[i - 1]\n next_textequiv, next_word, _ = sequence[i + 1]\n if not prev_word or not next_word:\n LOG.error('cannot merge Words \"%s\" and \"%s\" in line \"%s\"',\n prev_textequiv.Unicode, next_textequiv.Unicode, textline.id)\n else:\n merged = _merge_words(prev_word, next_word)\n LOG.debug('merged %s and %s to %s in line %s',\n prev_word.id, next_word.id, merged.id, textline.id)\n textline.set_Word([merged if word is prev_word else word\n for word in textline.get_Word()\n if not word is next_word])\n elif \" \" in textequiv.Unicode:\n # whitespace was introduced: split word\n if not word:\n LOG.error('cannot split Word \"%s\" in line \"%s\"',\n textequiv.Unicode, textline.id)\n else:\n if level == 'glyph':\n glyph = next(glyph for glyph in word.get_Glyph()\n if textequiv in glyph.get_TextEquiv())\n prev_, next_ = _split_word_at_glyph(word, glyph)\n parts = [prev_, next_]\n else:\n parts = []\n next_ = word\n while True:\n prev_, next_ = _split_word_at_space(next_)\n if \" \" in next_.get_TextEquiv()[0].Unicode:\n parts.append(prev_)\n else:\n parts.append(prev_)\n parts.append(next_)\n break\n LOG.debug('split %s to %s in line %s',\n word.id, [w.id for w in parts], textline.id)\n textline.set_Word(reduce(lambda l, w, key=word, value=parts:\n l + value if w is key else l + [w],\n textline.get_Word(), []))\n \ndef _merge_words(prev_, next_):\n merged = WordType(id=prev_.id + '.' + next_.id)\n merged.set_Coords(CoordsType(points=points_from_xywh(xywh_from_points(\n prev_.get_Coords().points + ' ' + next_.get_Coords().points))))\n if prev_.get_language():\n merged.set_language(prev_.get_language())\n if prev_.get_TextStyle():\n merged.set_TextStyle(prev_.get_TextStyle())\n if prev_.get_Glyph() or next_.get_Glyph():\n merged.set_Glyph(prev_.get_Glyph() + next_.get_Glyph())\n if prev_.get_TextEquiv():\n merged.set_TextEquiv(prev_.get_TextEquiv())\n else:\n merged.set_TextEquiv([TextEquivType(Unicode='', conf=1.0)])\n if next_.get_TextEquiv():\n textequiv = merged.get_TextEquiv()[0]\n textequiv2 = next_.get_TextEquiv()[0]\n textequiv.Unicode += textequiv2.Unicode\n if textequiv.conf and textequiv2.conf:\n textequiv.conf *= textequiv2.conf\n return merged\n\ndef _split_word_at_glyph(word, glyph):\n prev_ = WordType(id=word.id + '_l')\n next_ = WordType(id=word.id + '_r')\n xywh_glyph = xywh_from_points(glyph.get_Coords().points)\n xywh_word = xywh_from_points(word.get_Coords().points)\n xywh_prev = xywh_word.copy()\n xywh_prev.update({'w': xywh_glyph['x'] - xywh_word['x']})\n prev_.set_Coords(CoordsType(points=points_from_xywh(\n xywh_prev)))\n xywh_next = xywh_word.copy()\n xywh_next.update({'x': xywh_glyph['x'] - xywh_glyph['w'],\n 'w': xywh_word['w'] - xywh_prev['w']})\n next_.set_Coords(CoordsType(points=points_from_xywh(\n xywh_next)))\n if word.get_language():\n prev_.set_language(word.get_language())\n next_.set_language(word.get_language())\n if word.get_TextStyle():\n prev_.set_TextStyle(word.get_TextStyle())\n next_.set_TextStyle(word.get_TextStyle())\n glyphs = word.get_Glyph()\n pos = glyphs.index(glyph)\n prev_.set_Glyph(glyphs[0:pos])\n next_.set_Glyph(glyphs[pos+1:])\n # TextEquiv: will be overwritten by page_update_higher_textequiv_levels\n return prev_, next_\n\ndef _split_word_at_space(word):\n prev_ = WordType(id=word.id + '_l')\n next_ = WordType(id=word.id + '_r')\n xywh = xywh_from_points(word.get_Coords().points)\n textequiv = word.get_TextEquiv()[0]\n pos = textequiv.Unicode.index(\" \")\n fract = pos / len(textequiv.Unicode)\n xywh_prev = xywh.copy()\n xywh_prev.update({'w': xywh['w'] * fract})\n prev_.set_Coords(CoordsType(points=points_from_xywh(\n xywh_prev)))\n xywh_next = xywh.copy()\n xywh_next.update({'x': xywh['x'] + xywh['w'] * fract,\n 'w': xywh['w'] * (1 - fract)})\n next_.set_Coords(CoordsType(points=points_from_xywh(\n xywh_next)))\n if word.get_language():\n prev_.set_language(word.get_language())\n next_.set_language(word.get_language())\n if word.get_TextStyle():\n prev_.set_TextStyle(word.get_TextStyle())\n next_.set_TextStyle(word.get_TextStyle())\n # Glyphs: irrelevant at this processing level\n textequiv_prev = TextEquivType(Unicode=textequiv.Unicode[0:pos],\n conf=textequiv.conf)\n textequiv_next = TextEquivType(Unicode=textequiv.Unicode[pos+1:],\n conf=textequiv.conf)\n prev_.set_TextEquiv([textequiv_prev])\n next_.set_TextEquiv([textequiv_next])\n return prev_, next_\n\ndef page_update_higher_textequiv_levels(level, pcgts):\n '''Update the TextEquivs of all PAGE-XML hierarchy levels above `level` for consistency.\n \n Starting with the hierarchy level chosen for processing,\n join all first TextEquiv (by the rules governing the respective level)\n into TextEquiv of the next higher level, replacing them.\n '''\n regions = pcgts.get_Page().get_TextRegion()\n if level != 'region':\n for region in regions:\n lines = region.get_TextLine()\n if level != 'line':\n for line in lines:\n words = line.get_Word()\n if level != 'word':\n for word in words:\n glyphs = word.get_Glyph()\n word_unicode = u''.join(glyph.get_TextEquiv()[0].Unicode\n if glyph.get_TextEquiv()\n else u'' for glyph in glyphs)\n word_conf = np.mean([glyph.get_TextEquiv()[0].conf\n if glyph.get_TextEquiv()\n else 1. for glyph in glyphs])\n word.set_TextEquiv( # remove old\n [TextEquivType(Unicode=word_unicode,\n conf=word_conf)])\n line_unicode = u' '.join(word.get_TextEquiv()[0].Unicode\n if word.get_TextEquiv()\n else u'' for word in words)\n line_conf = np.mean([word.get_TextEquiv()[0].conf\n if word.get_TextEquiv()\n else 1. for word in words])\n line.set_TextEquiv( # remove old\n [TextEquivType(Unicode=line_unicode,\n conf=line_conf)])\n region_unicode = u'\\n'.join(line.get_TextEquiv()[0].Unicode\n if line.get_TextEquiv()\n else u'' for line in lines)\n region_conf = np.mean([line.get_TextEquiv()[0].conf\n if line.get_TextEquiv()\n else 1. for line in lines])\n region.set_TextEquiv( # remove old\n [TextEquivType(Unicode=region_unicode,\n conf=region_conf)])\n", "id": "9882884", "language": "Python", "matching_score": 3.20343017578125, "max_stars_count": 0, "path": "ocrd_cor_asv_ann/wrapper/transcode.py" }, { "content": "from pathlib import Path\nfrom filecmp import cmp\n\nfrom ocrd import Resolver, Workspace\nfrom ocrd_models.ocrd_page import parse\nfrom ocrd_utils import pushd_popd\n\nfrom ocrd_vandalize.processor import OcrdVandalize\n\ndef test_vandalize_textequiv():\n \"\"\"Test whether text is changed\"\"\"\n resolver = Resolver()\n workspace_dir = str(Path(__file__).parent / 'assets/kant_aufklaerung_1784/data')\n with pushd_popd(workspace_dir):\n pcgts_before = parse('OCR-D-GT-PAGE/PAGE_0017_PAGE.xml')\n assert pcgts_before.get_Page().get_TextRegion()[0].get_TextLine()[0].get_TextEquiv()[0].Unicode == 'Berliniſche Monatsſchrift.'\n OcrdVandalize(\n Workspace(resolver, directory=workspace_dir),\n input_file_grp='OCR-D-GT-PAGE',\n output_file_grp='VANDALIZED').process()\n pcgts_after = parse('VANDALIZED/VANDALIZED_0001.xml')\n assert pcgts_after.get_Page().get_TextRegion()[0].get_TextLine()[0].get_TextEquiv()[0].Unicode != 'Berliniſche Monatsſchrift.'\n\ndef test_vandalize_image():\n \"\"\"Test whether image is changed\"\"\"\n resolver = Resolver()\n workspace_dir = str(Path(__file__).parent / 'assets/kant_aufklaerung_1784/data')\n with pushd_popd(workspace_dir):\n workspace = Workspace(resolver, directory=workspace_dir)\n file_before = list(workspace.mets.find_files(fileGrp='OCR-D-GT-PAGE',\n pageId='PHYS_0017'))\n img_before = list(workspace.mets.find_files(fileGrp='OCR-D-IMG',\n pageId='PHYS_0017'))\n assert len(file_before) == 1\n assert len(img_before) == 1\n pcgts_before = parse(file_before[0].url)\n OcrdVandalize(workspace,\n input_file_grp='OCR-D-GT-PAGE',\n output_file_grp='VANDALIZED').process()\n file_after = list(workspace.mets.find_files(fileGrp='VANDALIZED',\n pageId='PHYS_0017',\n mimetype='application/vnd.prima.page+xml'))\n assert len(file_after) == 1\n pcgts_after = parse(file_after[0].url)\n assert pcgts_after.get_Page().get_TextRegion()[0].get_TextLine()[0].get_TextEquiv()[0].Unicode != 'Berliniſche Monatsſchrift.'\n img_after = list(workspace.mets.find_files(fileGrp='VANDALIZED',\n pageId='PHYS_0017',\n mimetype='image/png'))\n assert len(img_after) == 1\n altimg_after = pcgts_after.get_Page().get_AlternativeImage()\n assert len(altimg_after) == 1\n assert 'watermarked' in altimg_after[-1].get_comments()\n assert img_after[0].url == altimg_after[-1].get_filename()\n assert not cmp(img_before[0].url, img_after[0].url, shallow=False)\n", "id": "2479607", "language": "Python", "matching_score": 2.7903823852539062, "max_stars_count": 8, "path": "tests/test_vandalize.py" }, { "content": "# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring\n# pylint: disable=invalid-name,line-too-long\n\nfrom tests.base import TestCase, assets, main, copy_of_directory # pylint: disable=import-error, no-name-in-module\nfrom ocrd import Resolver, Workspace\nfrom ocrd.processor.base import run_processor\nfrom ocrd.cli.dummy_processor import DummyProcessor\n\nclass TestDummyProcessor(TestCase):\n\n def test_copies_ok(self):\n with copy_of_directory(assets.url_of('SBB0000F29300010000/data')) as wsdir:\n workspace = Workspace(Resolver(), wsdir)\n input_files = workspace.mets.find_files(fileGrp='OCR-D-IMG')\n self.assertEqual(len(input_files), 3)\n output_files = workspace.mets.find_files(fileGrp='OUTPUT')\n self.assertEqual(len(output_files), 0)\n run_processor(\n DummyProcessor,\n input_file_grp='OCR-D-IMG',\n output_file_grp='OUTPUT',\n workspace=workspace\n )\n output_files = workspace.mets.find_files(fileGrp='OUTPUT')\n self.assertEqual(len(output_files), 3)\n self.assertEqual(len(workspace.mets.find_files(ID='//COPY_OF.*')), 3)\n\nif __name__ == \"__main__\":\n main()\n", "id": "10482507", "language": "Python", "matching_score": 2.7062671184539795, "max_stars_count": 0, "path": "tests/processor/test_ocrd_dummy.py" }, { "content": "# pylint: disable=missing-module-docstring,invalid-name\nfrom os.path import join, basename\n\nimport click\n\nfrom ocrd import Processor\nfrom ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor\nfrom ocrd_utils import getLogger\n\nDUMMY_TOOL = {\n 'executable': 'ocrd-dummy',\n 'description': 'Bare-bones processor that copies file from input group to output group',\n 'steps': ['preprocessing/optimization'],\n 'categories': ['Image preprocessing'],\n 'input_file_grp': 'DUMMY_INPUT',\n 'output_file_grp': 'DUMMY_OUTPUT',\n}\n\nLOG = getLogger('ocrd.dummy')\n\nclass DummyProcessor(Processor):\n \"\"\"\n Bare-bones processor that copies mets:file from input group to output group.\n \"\"\"\n\n def process(self):\n for n, input_file in enumerate(self.input_files):\n input_file = self.workspace.download_file(input_file)\n page_id = input_file.pageId or input_file.ID\n LOG.info(\"INPUT FILE %i / %s\", n, page_id)\n file_id = 'COPY_OF_%s' % input_file.ID\n local_filename = join(self.output_file_grp, basename(input_file.local_filename))\n with open(input_file.local_filename, 'rb') as f:\n content = f.read()\n self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n mimetype=input_file.mimetype,\n local_filename=local_filename,\n content=content)\n\n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = DUMMY_TOOL\n kwargs['version'] = '0.0.1'\n super(DummyProcessor, self).__init__(*args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef cli(*args, **kwargs):\n return ocrd_cli_wrap_processor(DummyProcessor, *args, **kwargs)\n", "id": "7717621", "language": "Python", "matching_score": 2.2998428344726562, "max_stars_count": 0, "path": "ocrd/ocrd/cli/dummy_processor.py" }, { "content": "import click\n\nfrom ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor\n\nfrom .transcode import ANNCorrection\nfrom .evaluate import EvaluateLines\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_cor_asv_ann_process(*args, **kwargs):\n return ocrd_cli_wrap_processor(ANNCorrection, *args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_cor_asv_ann_evaluate(*args, **kwargs):\n return ocrd_cli_wrap_processor(EvaluateLines, *args, **kwargs)\n", "id": "11228648", "language": "Python", "matching_score": 3.011193037033081, "max_stars_count": 10, "path": "ocrd_cor_asv_ann/wrapper/cli.py" }, { "content": "import click\n\nfrom ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor\n\nfrom .decode import PageXMLProcessor\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_cor_asv_fst(*args, **kwargs):\n return ocrd_cli_wrap_processor(PageXMLProcessor, *args, **kwargs)\n", "id": "2376277", "language": "Python", "matching_score": 3.1446316242218018, "max_stars_count": 0, "path": "ocrd_cor_asv_fst/wrapper/cli.py" }, { "content": "'''wrapper for OCR-D conformance (CLI and workspace processor)'''\n\nfrom .decode import PageXMLProcessor\nfrom .cli import ocrd_cor_asv_fst\nfrom .config import OCRD_TOOL\n", "id": "9034774", "language": "Python", "matching_score": 2.5608363151550293, "max_stars_count": 0, "path": "ocrd_cor_asv_fst/wrapper/__init__.py" }, { "content": "'''wrapper for OCR-D conformance (CLI and workspace processor)'''\n", "id": "4840450", "language": "Python", "matching_score": 0.8255061507225037, "max_stars_count": 10, "path": "ocrd_cor_asv_ann/wrapper/__init__.py" }, { "content": "\"\"\"\nOCR-D wrapper for arbitrary coords-preserving image operations\n\"\"\"\n", "id": "7750691", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ocrd_wrap/__init__.py" }, { "content": "#!/usr/bin/python -i\nimport shutil, h5py, sys\nsource = '../../tools/ocrd_keraslm/model_dta_l2_512s_512.h5'\ntarget = 'lm-char.d2.w0512.dta.h5'\nif len(sys.argv) > 1:\n if sys.argv[1] in ['-h', '-help', '--help']:\n print('usage: %s [source-file [target-file]]\\n\\ndefault source-file: %s\\ndefault target-file: %s\\n' %\n (sys.argv[0], source, target))\n exit()\n else:\n source = sys.argv[1]\n if len(sys.argv) > 2:\n target = sys.argv[2]\n\nshutil.copy(source, target)\n\nwith h5py.File(target, 'r+') as f:\n # default name in ocrd_keraslm vs name used by s2s (weight-tied to LM)\n f.copy('lstm_1', 'decoder_lstm_1')\n f.copy('lstm_2', 'decoder_lstm_2')\n #f.copy('dense_1', 'time_distributed_1')\n del f['lstm_1']\n del f['lstm_2']\n #del f['dense_1']\n # remove input weights for contexts:\n d = f['decoder_lstm_1/lstm_1/kernel:0'][:-10,:]\n del f['decoder_lstm_1/lstm_1/kernel:0']\n f['decoder_lstm_1/lstm_1'].create_dataset('kernel:0', data=d)\n #\n rename = {b'lstm_1': b'decoder_lstm_1', b'lstm_2': b'decoder_lstm_2'} #b'dense_1': b'time_distributed_1'}\n names = f.attrs['layer_names'].astype('|S20') # longer\n for i in range(names.shape[0]):\n names[i] = rename.get(names[i],names[i])\n #f.attrs.modify('layer_names', names)\n f.attrs['layer_names'] = names\n print(f.attrs['layer_names'])\n f.flush()\n\n\n\n\n \n", "id": "8154099", "language": "Python", "matching_score": 0.8848410248756409, "max_stars_count": 10, "path": "transfer-dta-lm.py" }, { "content": "import os\n\nDEFAULT_SEGMENTATION_MODEL_PATH = os.path.join(\n os.path.dirname(__file__), 'dta2-model', 'model.h5'\n)\n", "id": "10310849", "language": "Python", "matching_score": 0.013975878246128559, "max_stars_count": 0, "path": "ocrd_pc_segmentation/__init__.py" }, { "content": "#!/usr/bin/python\n\nimport argparse\nimport re\nimport unicodedata\nfrom PIL import Image\n\n#\n# command line arguments\n#\narg_parser = argparse.ArgumentParser('''Creates tesseract box files for given (line) image text pairs''')\n\n# clipping XML file\narg_parser.add_argument('-t', '--txt', type=argparse.FileType('r'), nargs='?', metavar='TXT', help='Line text (GT)', required=True)\n\n# Image file\narg_parser.add_argument('-i', '--image', nargs='?', metavar='IMAGE', help='Image file', required=True)\n\nargs = arg_parser.parse_args()\n\n#\n# main\n#\n\n# load image\nim = Image.open(file(args.image, \"r\"))\nimage = re.sub(\"\\\\.[^\\\\.]+$\", \"\", re.sub(\"^[^/]+/\", \"\", args.image))\nwidth, height = im.size\n\nfor line in args.txt:\n line = line.strip().decode(\"utf-8\")\n for i in range(1, len(line)):\n char = line[i]\n prev_char = line[i-1]\n if unicodedata.combining(char):\n print(\"%s %d %d %d %d 0\" % ((prev_char + char).encode(\"utf-8\"), 0, 0, width, height))\n elif not unicodedata.combining(prev_char):\n print(\"%s %d %d %d %d 0\" % (prev_char.encode(\"utf-8\"), 0, 0, width, height))\n if not unicodedata.combining(line[-1]):\n print(\"%s %d %d %d %d 0\" % (line[-1].encode(\"utf-8\"), 0, 0, width, height))\n print(\"%s %d %d %d %d 0\" % (\"\\t\", width, height, width+1, height+1))\n", "id": "918867", "language": "Python", "matching_score": 0.7671434879302979, "max_stars_count": 0, "path": "generate_line_box.py" }, { "content": "import os.path\nimport sys\nfrom xml.dom import minidom\n\nimport ocrolib\n\n__all__ = [\n 'parseXML',\n 'write_to_xml',\n 'print_error',\n 'print_info',\n]\n\ndef print_info(msg):\n print(\"INFO: %s\" % msg)\n\ndef print_error(msg):\n print(\"ERROR: %s\" % msg)\n\ndef parse_params_with_defaults(params_json, params_schema):\n \"\"\"\n Fill a parameters dict with the default values from param_schema. Dumbed down from ocr-d/core\n \"\"\"\n for param_name in params_schema:\n param_schema = params_schema[param_name]\n if param_name not in params_json and 'default' in param_schema:\n params_json[param_name] = param_schema['default']\n return params_json\n\ndef parseXML(fpath, Input):\n input_files = []\n xmldoc = minidom.parse(fpath)\n nodes = xmldoc.getElementsByTagName('mets:fileGrp')\n for attr in nodes:\n if attr.attributes['USE'].value == Input:\n childNodes = attr.getElementsByTagName('mets:FLocat')\n for f in childNodes:\n input_files.append(f.attributes['xlink:href'].value)\n return input_files\n\n\ndef write_to_xml(fpath, mets, Output, OutputMets, work):\n xmldoc = minidom.parse(mets)\n subRoot = xmldoc.createElement('mets:fileGrp')\n subRoot.setAttribute('USE', Output)\n\n for f in fpath:\n basefile = ocrolib.allsplitext(os.path.basename(f))[0]\n child = xmldoc.createElement('mets:file')\n child.setAttribute('ID', 'CROP_'+basefile)\n child.setAttribute('GROUPID', 'P_' + basefile)\n child.setAttribute('MIMETYPE', \"image/png\")\n\n subChild = xmldoc.createElement('mets:FLocat')\n subChild.setAttribute('LOCTYPE', \"URL\")\n subChild.setAttribute('xlink:href', f)\n\n subRoot.appendChild(child)\n child.appendChild(subChild)\n\n xmldoc.getElementsByTagName('mets:fileSec')[0].appendChild(subRoot)\n\n if not OutputMets:\n metsFileSave = open(os.path.join(\n work, os.path.basename(mets)), \"w\")\n else:\n metsFileSave = open(os.path.join(work, OutputMets if OutputMets.endswith(\n \".xml\") else OutputMets+'.xml'), \"w\")\n metsFileSave.write(xmldoc.toxml())\n", "id": "4738975", "language": "Python", "matching_score": 1.091442346572876, "max_stars_count": 0, "path": "ocrd_anybaseocr/utils.py" }, { "content": "import json\nimport glob\nimport re\nimport os\nfrom io import StringIO\nfrom pathlib import Path\n\nimport numpy as np\nimport click\nimport pandas as pd\nimport requests\nfrom lxml import etree as ET\n\nfrom ocrd_models.ocrd_page import parse\nfrom ocrd_utils import bbox_from_points\n\nfrom .ned import ned\nfrom .ner import ner\nfrom .tsv import read_tsv, write_tsv, extract_doc_links\nfrom .ocr import get_conf_color\n\n\[email protected]()\[email protected]('tsv-file', type=click.Path(exists=True), required=True, nargs=1)\[email protected]('url-file', type=click.Path(exists=False), required=True, nargs=1)\ndef extract_document_links(tsv_file, url_file):\n\n parts = extract_doc_links(tsv_file)\n\n urls = [part['url'] for part in parts]\n\n urls = pd.DataFrame(urls, columns=['url'])\n\n urls.to_csv(url_file, sep=\"\\t\", quoting=3, index=False)\n\n\[email protected]()\[email protected]('tsv-file', type=click.Path(exists=True), required=True, nargs=1)\[email protected]('annotated-tsv-file', type=click.Path(exists=False), required=True, nargs=1)\ndef annotate_tsv(tsv_file, annotated_tsv_file):\n\n parts = extract_doc_links(tsv_file)\n\n annotated_parts = []\n\n for part in parts:\n\n part_data = StringIO(part['header'] + part['text'])\n\n df = pd.read_csv(part_data, sep=\"\\t\", comment='#', quoting=3)\n\n df['url_id'] = len(annotated_parts)\n\n annotated_parts.append(df)\n\n df = pd.concat(annotated_parts)\n\n df.to_csv(annotated_tsv_file, sep=\"\\t\", quoting=3, index=False)\n\n\[email protected]()\[email protected]('page-xml-file', type=click.Path(exists=True), required=True, nargs=1)\[email protected]('tsv-out-file', type=click.Path(), required=True, nargs=1)\[email protected]('--purpose', type=click.Choice(['NERD', 'OCR'], case_sensitive=False), default=\"NERD\",\n help=\"Purpose of output tsv file. \"\n \"\\n\\nNERD: NER/NED application/ground-truth creation. \"\n \"\\n\\nOCR: OCR application/ground-truth creation. \"\n \"\\n\\ndefault: NERD.\")\[email protected]('--image-url', type=str, default='http://empty')\[email protected]('--ner-rest-endpoint', type=str, default=None,\n help=\"REST endpoint of sbb_ner service. See https://github.com/qurator-spk/sbb_ner for details. \"\n \"Only applicable in case of NERD.\")\[email protected]('--ned-rest-endpoint', type=str, default=None,\n help=\"REST endpoint of sbb_ned service. See https://github.com/qurator-spk/sbb_ned for details. \"\n \"Only applicable in case of NERD.\")\[email protected]('--noproxy', type=bool, is_flag=True, help='disable proxy. default: enabled.')\[email protected]('--scale-factor', type=float, default=1.0, help='default: 1.0')\[email protected]('--ned-threshold', type=float, default=None)\[email protected]('--min-confidence', type=float, default=None)\[email protected]('--max-confidence', type=float, default=None)\[email protected]('--ned-priority', type=int, default=1)\ndef page2tsv(page_xml_file, tsv_out_file, purpose, image_url, ner_rest_endpoint, ned_rest_endpoint,\n noproxy, scale_factor, ned_threshold, min_confidence, max_confidence, ned_priority):\n if purpose == \"NERD\":\n out_columns = ['No.', 'TOKEN', 'NE-TAG', 'NE-EMB', 'ID', 'url_id', 'left', 'right', 'top', 'bottom', 'conf']\n elif purpose == \"OCR\":\n out_columns = ['TEXT', 'url_id', 'left', 'right', 'top', 'bottom', 'conf', 'line_id']\n if min_confidence is not None and max_confidence is not None:\n out_columns += ['ocrconf']\n else:\n raise RuntimeError(\"Unknown purpose.\")\n\n if noproxy:\n os.environ['no_proxy'] = '*'\n\n urls = []\n if os.path.exists(tsv_out_file):\n parts = extract_doc_links(tsv_out_file)\n urls = [part['url'] for part in parts]\n else:\n pd.DataFrame([], columns=out_columns).to_csv(tsv_out_file, sep=\"\\t\", quoting=3, index=False)\n\n pcgts = parse(page_xml_file)\n tsv = []\n line_info = []\n\n for region_idx, region in enumerate(pcgts.get_Page().get_AllRegions(classes=['Text'], order='reading-order')):\n for text_line in region.get_TextLine():\n\n left, top, right, bottom = [int(scale_factor * x) for x in bbox_from_points(text_line.get_Coords().points)]\n\n if min_confidence is not None and max_confidence is not None:\n conf = np.max([textequiv.conf for textequiv in text_line.get_TextEquiv()])\n else:\n conf = np.nan\n\n line_info.append((len(urls), left, right, top, bottom, conf, text_line.id))\n\n for word in text_line.get_Word():\n for text_equiv in word.get_TextEquiv():\n # transform OCR coordinates using `scale_factor` to derive\n # correct coordinates for the web presentation image\n left, top, right, bottom = [int(scale_factor * x) for x in bbox_from_points(word.get_Coords().points)]\n\n tsv.append((region_idx, len(line_info) - 1, left + (right - left) / 2.0,\n text_equiv.get_Unicode(), len(urls), left, right, top, bottom, text_line.id))\n\n line_info = pd.DataFrame(line_info, columns=['url_id', 'left', 'right', 'top', 'bottom', 'conf', 'line_id'])\n\n if min_confidence is not None and max_confidence is not None:\n line_info['ocrconf'] = line_info.conf.map(lambda x: get_conf_color(x, min_confidence, max_confidence))\n\n tsv = pd.DataFrame(tsv, columns=['rid', 'line', 'hcenter'] +\n ['TEXT', 'url_id', 'left', 'right', 'top', 'bottom', 'line_id'])\n\n if len(tsv) == 0:\n return\n\n with open(tsv_out_file, 'a') as f:\n\n f.write('# ' + image_url + '\\n')\n\n vlinecenter = pd.DataFrame(tsv[['line', 'top']].groupby('line', sort=False).mean().top +\n (tsv[['line', 'bottom']].groupby('line', sort=False).mean().bottom -\n tsv[['line', 'top']].groupby('line', sort=False).mean().top) / 2,\n columns=['vlinecenter'])\n\n tsv = tsv.merge(vlinecenter, left_on='line', right_index=True)\n\n regions = [region.sort_values(['vlinecenter', 'hcenter']) for rid, region in tsv.groupby('rid', sort=False)]\n\n tsv = pd.concat(regions)\n\n if purpose == 'NERD':\n\n tsv['No.'] = 0\n tsv['NE-TAG'] = 'O'\n tsv['NE-EMB'] = 'O'\n tsv['ID'] = '-'\n tsv['conf'] = '-'\n\n tsv = tsv.rename(columns={'TEXT': 'TOKEN'})\n elif purpose == 'OCR':\n\n tsv = pd.DataFrame([(line, \" \".join(part.TEXT.to_list())) for line, part in tsv.groupby('line')],\n columns=['line', 'TEXT'])\n\n tsv = tsv.merge(line_info, left_on='line', right_index=True)\n\n tsv = tsv[out_columns].reset_index(drop=True)\n\n try:\n if purpose == 'NERD' and ner_rest_endpoint is not None:\n\n tsv, ner_result = ner(tsv, ner_rest_endpoint)\n\n if ned_rest_endpoint is not None:\n\n tsv, _ = ned(tsv, ner_result, ned_rest_endpoint, threshold=ned_threshold, priority=ned_priority)\n\n tsv.to_csv(tsv_out_file, sep=\"\\t\", quoting=3, index=False, mode='a', header=False)\n except requests.HTTPError as e:\n print(e)\n\n\[email protected]()\[email protected]('--output-filename', '-o', help=\"Output filename. \"\n \"If omitted, PAGE-XML filename with .corrected.xml extension\")\[email protected]('--keep-words', '-k', is_flag=True, help=\"Keep (out-of-date) Words of TextLines\")\[email protected]('page-file')\[email protected]('tsv-file')\ndef tsv2page(output_filename, keep_words, page_file, tsv_file):\n if not output_filename:\n output_filename = Path(page_file).stem + '.corrected.xml'\n ns = {'pc': 'http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15'}\n tsv = pd.read_csv(tsv_file, sep='\\t', comment='#', quoting=3)\n tree = ET.parse(page_file)\n for _, row in tsv.iterrows():\n el_textline = tree.find(f'//pc:TextLine[@id=\"{row.line_id}\"]', namespaces=ns)\n el_textline.find('pc:TextEquiv/pc:Unicode', namespaces=ns).text = row.TEXT\n if not keep_words:\n for el_word in el_textline.findall('pc:Word', namespaces=ns):\n el_textline.remove(el_word)\n with open(output_filename, 'w', encoding='utf-8') as f:\n f.write(ET.tostring(tree, pretty_print=True).decode('utf-8'))\n\n\[email protected]()\[email protected]('tsv-file', type=click.Path(exists=True), required=True, nargs=1)\[email protected]('tsv-out-file', type=click.Path(), required=True, nargs=1)\[email protected]('--ner-rest-endpoint', type=str, default=None,\n help=\"REST endpoint of sbb_ner service. See https://github.com/qurator-spk/sbb_ner for details.\")\[email protected]('--ned-rest-endpoint', type=str, default=None,\n help=\"REST endpoint of sbb_ned service. See https://github.com/qurator-spk/sbb_ned for details.\")\[email protected]('--ned-json-file', type=str, default=None)\[email protected]('--noproxy', type=bool, is_flag=True, help='disable proxy. default: proxy is enabled.')\[email protected]('--ned-threshold', type=float, default=None)\[email protected]('--ned-priority', type=int, default=1)\ndef find_entities(tsv_file, tsv_out_file, ner_rest_endpoint, ned_rest_endpoint, ned_json_file, noproxy, ned_threshold,\n ned_priority):\n\n if noproxy:\n os.environ['no_proxy'] = '*'\n\n tsv, urls = read_tsv(tsv_file)\n\n try:\n if ner_rest_endpoint is not None:\n\n tsv, ner_result = ner(tsv, ner_rest_endpoint)\n\n elif os.path.exists(tsv_file):\n\n print('Using NER information that is already contained in file: {}'.format(tsv_file))\n\n tmp = tsv.copy()\n tmp['sen'] = (tmp['No.'] == 0).cumsum()\n tmp.loc[~tmp['NE-TAG'].isin(['O', 'B-PER', 'B-LOC', 'B-ORG', 'I-PER', 'I-LOC', 'I-ORG']), 'NE-TAG'] = 'O'\n\n ner_result = [[{'word': str(row.TOKEN), 'prediction': row['NE-TAG']} for _, row in sen.iterrows()]\n for _, sen in tmp.groupby('sen')]\n else:\n raise RuntimeError(\"Either NER rest endpoint or NER-TAG information within tsv_file required.\")\n\n if ned_rest_endpoint is not None:\n\n tsv, ned_result = ned(tsv, ner_result, ned_rest_endpoint, json_file=ned_json_file, threshold=ned_threshold,\n priority=ned_priority)\n\n if ned_json_file is not None and not os.path.exists(ned_json_file):\n\n with open(ned_json_file, \"w\") as fp_json:\n json.dump(ned_result, fp_json, indent=2, separators=(',', ': '))\n\n write_tsv(tsv, urls, tsv_out_file)\n\n except requests.HTTPError as e:\n print(e)\n\n\[email protected]()\[email protected]('--xls-file', type=click.Path(exists=True), default=None,\n help=\"Read parameters from xls-file. Expected columns: Filename, iiif_url, scale_factor.\")\[email protected]('--directory', type=click.Path(exists=True), default=None,\n help=\"Search directory for PPN**/*.xml files. Extract PPN and file number into image-url.\")\[email protected]('--purpose', type=click.Choice(['NERD', 'OCR'], case_sensitive=False), default=\"NERD\",\n help=\"Purpose of output tsv file. \"\n \"\\n\\nNERD: NER/NED application/ground-truth creation. \"\n \"\\n\\nOCR: OCR application/ground-truth creation. \"\n \"\\n\\ndefault: NERD.\")\ndef make_page2tsv_commands(xls_file, directory, purpose):\n if xls_file is not None:\n\n if xls_file.endswith(\".xls\"):\n df = pd.read_excel(xls_file)\n else:\n df = pd.read_excel(xls_file, engine='openpyxl')\n\n df = df.dropna(how='all')\n\n for _, row in df.iterrows():\n print('page2tsv $(OPTIONS) {}.xml {}.tsv --image-url={} --scale-factor={} --purpose={}'.\n format(row.Filename, row.Filename, row.iiif_url.replace('/full/full', '/left,top,width,height/full'),\n row.scale_factor, purpose))\n\n elif directory is not None:\n for file in glob.glob('{}/**/*.xml'.format(directory), recursive=True):\n\n ma = re.match('(.*/(PPN[0-9X]+)/.*?([0-9]+).*?).xml', file)\n\n if ma:\n print('page2tsv {} {}.tsv '\n '--image-url=https://content.staatsbibliothek-berlin.de/dc/'\n '{}-{:08d}/left,top,width,height/full/0/default.jpg --scale-factor=1.0 --purpose={}'.\n format(file, ma.group(1), ma.group(2), int(ma.group(3)), purpose))\n\n", "id": "8435779", "language": "Python", "matching_score": 2.7614519596099854, "max_stars_count": 0, "path": "tsvtools/cli.py" }, { "content": "from __future__ import division, print_function\n\nfrom warnings import warn\n\nfrom lxml import etree as ET\nimport sys\n\nfrom lxml.etree import XMLSyntaxError\n\n\ndef alto_namespace(tree):\n \"\"\"Return the ALTO namespace used in the given ElementTree.\n\n This relies on the assumption that, in any given ALTO file, the root element has the local name \"alto\". We do not\n check if the files uses any valid ALTO namespace.\n \"\"\"\n root_name = ET.QName(tree.getroot().tag)\n if root_name.localname == 'alto':\n return root_name.namespace\n else:\n raise ValueError('Not an ALTO tree')\n\n\ndef alto_text(tree):\n \"\"\"Extract text from the given ALTO ElementTree.\"\"\"\n\n nsmap = {'alto': alto_namespace(tree)}\n\n lines = (\n ' '.join(string.attrib.get('CONTENT') for string in line.iterfind('alto:String', namespaces=nsmap))\n for line in tree.iterfind('.//alto:TextLine', namespaces=nsmap))\n text_ = '\\n'.join(lines)\n\n return text_\n\n\ndef page_namespace(tree):\n \"\"\"Return the PAGE content namespace used in the given ElementTree.\n\n This relies on the assumption that, in any given PAGE content file, the root element has the local name \"PcGts\". We\n do not check if the files uses any valid PAGE namespace.\n \"\"\"\n root_name = ET.QName(tree.getroot().tag)\n if root_name.localname == 'PcGts':\n return root_name.namespace\n else:\n raise ValueError('Not a PAGE tree')\n\n\ndef page_text(tree):\n \"\"\"Extract text from the given PAGE content ElementTree.\"\"\"\n\n nsmap = {'page': page_namespace(tree)}\n\n def region_text(region):\n try:\n return region.find('./page:TextEquiv/page:Unicode', namespaces=nsmap).text\n except AttributeError:\n return None\n\n region_texts = []\n reading_order = tree.find('.//page:ReadingOrder', namespaces=nsmap)\n if reading_order is not None:\n for group in reading_order.iterfind('./*', namespaces=nsmap):\n if ET.QName(group.tag).localname == 'OrderedGroup':\n region_ref_indexeds = group.findall('./page:RegionRefIndexed', namespaces=nsmap)\n for region_ref_indexed in sorted(region_ref_indexeds, key=lambda r: int(r.attrib['index'])):\n region_id = region_ref_indexed.attrib['regionRef']\n region = tree.find('.//page:TextRegion[@id=\"%s\"]' % region_id, namespaces=nsmap)\n if region is not None:\n region_texts.append(region_text(region))\n else:\n warn('Not a TextRegion: \"%s\"' % region_id)\n else:\n raise NotImplementedError\n else:\n for region in tree.iterfind('.//page:TextRegion', namespaces=nsmap):\n region_texts.append(region_text(region))\n\n # XXX Does a file have to have regions etc.? region vs lines etc.\n # Filter empty region texts\n region_texts = (t for t in region_texts if t)\n\n text_ = '\\n'.join(region_texts)\n\n return text_\n\n\ndef text(filename):\n \"\"\"Read the text from the given file.\n\n Supports PAGE, ALTO and falls back to plain text.\n \"\"\"\n\n try:\n tree = ET.parse(filename)\n except XMLSyntaxError:\n with open(filename, 'r') as f:\n return f.read()\n try:\n return page_text(tree)\n except ValueError:\n return alto_text(tree)\n\n\nif __name__ == '__main__':\n print(text(sys.argv[1]))\n", "id": "3531287", "language": "Python", "matching_score": 0.5174461603164673, "max_stars_count": 0, "path": "qurator/dinglehopper/ocr_files.py" }, { "content": "# -*- coding: utf-8\nimport logging\nimport bisect\nimport unicodedata\n\nclass Alignment(object):\n def __init__(self, gap_element=0, logger=None, confusion=False):\n self.confusion = dict() if confusion else None\n self.gap_element = gap_element\n self.logger = logger or logging.getLogger(__name__)\n # alignment for windowing...\n ## python-alignment is impractical with long or heavily deviating sequences (see github issues 9, 10, 11):\n #import alignment.sequence\n #alignment.sequence.GAP_ELEMENT = self.gap_element # override default\n #from alignment.sequence import Sequence, gap_element\n #from alignment.vocabulary import Vocabulary\n #from alignment.sequencealigner import SimpleScoring, StrictGlobalSequenceAligner\n # Levenshtein scoring:\n #self.scoring = SimpleScoring(2,-1) # match score, mismatch score\n #self.aligner = StrictGlobalSequenceAligner(scoring,-2) # gap score\n # Levenshtein-like scoring with 0.1 distance within typical OCR confusion classes (to improve alignment quality; to reduce error introduced by windowing):\n # class OCRScoring(SimpleScoring):\n # def __init__(self):\n # super(OCRScoring, self).__init__(0,-1) # match score, mismatch score (Levenshtein-like)\n # self.classes = [[u\"a\", u\"ä\", u\"á\", u\"â\", u\"à\", u\"ã\"],\n # [u\"o\", u\"ö\", u\"ó\", u\"ô\", u\"ò\", u\"õ\"],\n # [u\"u\", u\"ü\", u\"ú\", u\"û\", u\"ù\", u\"ũ\"],\n # [u\"A\", u\"Ä\", u\"Á\", u\"Â\", u\"À\", u\"Ã\"],\n # [u\"O\", u\"Ö\", u\"Ó\", u\"Ô\", u\"Ò\", u\"Õ\"],\n # [u\"U\", u\"Ü\", u\"Ú\", u\"Û\", u\"Ù\", u\"Ũ\"],\n # [0, u\"ͤ\"],\n # [u'\"', u\"“\", u\"‘\", u\"'\", u\"’\", u\"”\"],\n # [u',', u'‚', u'„'],\n # [u'-', u'‐', u'—', u'–', u'_'],\n # [u'=', u'⸗', u'⹀'],\n # [u'ſ', u'f', u'ß'], #s?\n # [u\"c\", u\"<\", u\"e\"]]\n # self.table = {}\n # for c in self.classes:\n # for i in c:\n # for j in c:\n # if i==j:\n # self.table[(i,j)] = 0.0\n # else:\n # self.table[(i,j)] = 0.1\n # def __call__(self, firstElement, secondElement):\n # if (firstElement,secondElement) in self.table:\n # return self.table[(firstElement,secondElement)]\n # else:\n # return super(OCRScoring, self).__call__(firstElement, secondElement)\n #\n # self.scoring = OCRScoring()\n # self.aligner = StrictGlobalSequenceAligner(scoring,-1) # gap score\n \n ## edlib does not work on Unicode (non-ASCII strings)\n # import edlib\n\n ## difflib is optimised for visual comparisons (Ratcliff-Obershelp), not minimal distance (Levenshtein):\n from difflib import SequenceMatcher\n self.matcher = SequenceMatcher(isjunk=None, autojunk=False)\n \n ## edit_distance is impractical with long sequences, even if very similar (GT lines > 1000 characters, see github issue 6)\n # from edit_distance.code import SequenceMatcher # similar API to difflib.SequenceMatcher\n # def char_similar(a, b):\n # return (a == b or (a,b) in table)\n # self.matcher = SequenceMatcher(test=char_similar)\n \n self.source_text = []\n self.target_text = []\n \n def set_seqs(self, source_text, target_text):\n ## code for python_alignment:\n #vocabulary = Vocabulary() # inefficient, but helps keep search space smaller if independent for each line\n #self.source_seq = vocabulary.encodeSequence(Sequence(source_text))\n #self.target_seq = vocabulary.encodeSequence(Sequence(target_text))\n \n ## code for edlib:\n #self.edres = edlib.align(source_text, target_text, mode='NW', task='path',\n # k=max(len(source_text),len(target_text))*2)\n \n ## code for difflib/edit_distance:\n self.matcher.set_seqs(source_text, target_text)\n \n self.source_text = source_text\n self.target_text = target_text\n \n \n def is_bad(self):\n ## code for python_alignment:\n #score = self.aligner.align(self.source_seq, self.target_seq)\n #if score < -10 and score < 5-len(source_text):\n # return True\n \n ## code for edlib:\n # assert self.edres\n # if self.edres['editDistance'] < 0:\n # return True\n\n ## code for difflib/edit_distance:\n # self.matcher = difflib_matcher if len(source_text) > 4000 or len(target_text) > 4000 else editdistance_matcher\n \n # if self.matcher.distance() > 10 and self.matcher.distance() > len(self.source_text)-5:\n return bool(self.matcher.quick_ratio() < 0.5 and len(self.source_text) > 5)\n \n def get_best_alignment(self):\n ## code for identity alignment (for GT-only training; faster, no memory overhead)\n # alignment1 = zip(source_text, target_text)\n \n ## code for python_alignment:\n #score, alignments = self.aligner.align(self.source_seq, self.target_seq, backtrace=True)\n #alignment1 = vocabulary.decodeSequenceAlignment(alignments[0])\n #alignment1 = zip(alignment1.first, alignment1.second)\n #print ('alignment score:', alignment1.score)\n #print ('alignment rate:', alignment1.percentIdentity())\n \n ## code for edlib:\n # assert self.edres\n # alignment1 = []\n # n = \"\"\n # source_k = 0\n # target_k = 0\n # for c in self.edres['cigar']:\n # if c.isdigit():\n # n = n + c\n # else:\n # i = int(n)\n # n = \"\"\n # if c in \"=X\": # identity/substitution\n # alignment1.extend(zip(self.source_text[source_k:source_k+i], self.target_text[target_k:target_k+i]))\n # source_k += i\n # target_k += i\n # elif c == \"I\": # insert into target\n # alignment1.extend(zip(self.source_text[source_k:source_k+i], [self.gap_element]*i))\n # source_k += i\n # elif c == \"D\": # delete from target\n # alignment1.extend(zip([self.gap_element]*i, self.target_text[target_k:target_k+i]))\n # target_k += i\n # else:\n # raise Exception(\"edlib returned invalid CIGAR opcode\", c)\n # assert source_k == len(self.source_text)\n # assert target_k == len(self.target_text)\n \n ## code for difflib/edit_distance:\n alignment1 = []\n source_end = len(self.source_text)\n target_end = len(self.target_text)\n try:\n opcodes = self.matcher.get_opcodes()\n except Exception as err:\n self.logger.exception('alignment of \"%s\" and \"%s\" failed',\n self.source_text, self.target_text)\n raise err\n for opcode, source_begin, source_end, target_begin, target_end in opcodes:\n if opcode == 'equal':\n alignment1.extend(zip(self.source_text[source_begin:source_end],\n self.target_text[target_begin:target_end]))\n elif opcode == 'replace': # not really substitution:\n delta = source_end-source_begin-target_end+target_begin\n #alignment1.extend(zip(self.source_text[source_begin:source_end] + [self.gap_element]*(-delta),\n # self.target_text[target_begin:target_end] + [self.gap_element]*(delta)))\n if delta > 0: # replace+delete\n alignment1.extend(zip(self.source_text[source_begin:source_end-delta],\n self.target_text[target_begin:target_end]))\n alignment1.extend(zip(self.source_text[source_end-delta:source_end],\n [self.gap_element]*(delta)))\n if delta <= 0: # replace+insert\n alignment1.extend(zip(self.source_text[source_begin:source_end],\n self.target_text[target_begin:target_end+delta]))\n alignment1.extend(zip([self.gap_element]*(-delta),\n self.target_text[target_end+delta:target_end]))\n elif opcode == 'insert':\n alignment1.extend(zip([self.gap_element]*(target_end-target_begin),\n self.target_text[target_begin:target_end]))\n elif opcode == 'delete':\n alignment1.extend(zip(self.source_text[source_begin:source_end],\n [self.gap_element]*(source_end-source_begin)))\n else:\n raise Exception(\"difflib returned invalid opcode\", opcode, \"in\", self.source_text, self.target_text)\n assert source_end == len(self.source_text), \\\n 'alignment does not span full sequence \"%s\" - %d' % (self.source_text, source_end)\n assert target_end == len(self.target_text), \\\n 'alignment does not span full sequence \"%s\" - %d' % (self.target_text, target_end)\n\n if not self.confusion is None:\n for pair in alignment1:\n if pair[0] == pair[1]:\n continue\n count = self.confusion.setdefault(pair, 0)\n self.confusion[pair] = count + 1\n \n return alignment1\n\n def get_confusion(self, limit=None):\n if self.confusion is None:\n raise Exception(\"aligner was not configured to count confusion\")\n table = []\n class Confusion(object):\n def __init__(self, count, pair):\n self.count = count\n self.pair = pair\n def __repr__(self):\n return str((self.count, self.pair))\n def __lt__(self, other):\n return self.count > other.count\n def __le__(self, other):\n return self.count >= other.count\n def __eq__(self, other):\n return self.count == other.count\n def __ne__(self, other):\n return self.count != other.count\n def __gt__(self, other):\n return self.count < other.count\n def __ge__(self, other):\n return self.count <= other.count\n for pair, count in self.confusion.items():\n conf = Confusion(count, pair)\n length = len(table)\n idx = bisect.bisect_left(table, conf, hi=min(limit or length, length))\n if limit and idx >= limit:\n continue\n table.insert(idx, conf)\n if limit:\n table = table[:limit]\n return table\n \n def get_levenshtein_distance(self, source_text, target_text):\n # alignment for evaluation only...\n import editdistance\n dist = editdistance.eval(source_text, target_text)\n return dist, max(len(source_text), len(target_text))\n \n def get_adjusted_distance(self, source_text, target_text, normalization=None):\n import unicodedata\n def normalize(seq):\n if normalization in ['NFC', 'NFKC']:\n if isinstance(seq, list):\n return [unicodedata.normalize(normalization, tok) for tok in seq]\n else:\n return unicodedata.normalize(normalization, seq)\n else:\n return seq\n self.set_seqs(normalize(source_text), normalize(target_text))\n alignment = self.get_best_alignment()\n dist = 0 # distance\n \n umlauts = {u\"ä\": \"a\", u\"ö\": \"o\", u\"ü\": \"u\"} # for combination with U+0363 (not in NFKC)\n #umlauts = {}\n if normalization == 'historic_latin':\n equivalences = [\n # some of these are not even in NFKC:\n {\"s\", \"ſ\"},\n {\"r\", \"ꝛ\"},\n {\"0\", \"⁰\"},\n {\"1\", \"¹\"},\n {\"2\", \"²\"},\n {\"3\", \"³\"},\n {\"4\", \"⁴\"},\n {\"5\", \"⁵\"},\n {\"6\", \"⁶\"},\n {\"7\", \"⁷\"},\n {\"8\", \"⁸\"},\n {\"9\", \"⁹\", \"ꝰ\"},\n {\"„\", \"»\", \"›\", \"〟\"},\n {\"“\", \"«\", \"‹\", \"〞\"},\n {\"'\", \"ʹ\", \"ʼ\", \"′\", \"‘\", \"’\", \"‛\", \"᾽\"},\n {\",\", \"‚\"},\n {\"-\", \"−\", \"—\", \"‐\", \"‑\", \"‒\", \"–\", \"⁃\", \"﹘\", \"―\", \"─\"},\n {\"‟\", \"〃\", \"”\", \"″\"}, # ditto signs\n {\"~\", \"∼\", \"˜\", \"῀\", \"⁓\"},\n {\"(\", \"⟨\", \"⁽\"},\n {\")\", \"⟩\", \"⁾\"},\n {\"/\", \"⧸\", \"⁄\", \"∕\"},\n {\"\\\\\", \"⧹\", \"∖\", \"⧵\"}\n ]\n else:\n equivalences = []\n def equivalent(x, y):\n for equivalence in equivalences:\n if x in equivalence and y in equivalence:\n return True\n return False\n\n # FIXME: cover all combining character sequences here (not just umlauts)\n # idea: assign all combining codepoints to previous position, leaving gap\n # (but do not add to gap)\n source_umlaut = ''\n target_umlaut = ''\n for source_sym, target_sym in alignment:\n #print(source_sym, target_sym)\n \n if source_sym == target_sym or equivalent(source_sym, target_sym):\n if source_umlaut: # previous source is umlaut non-error\n source_umlaut = False # reset\n dist += 1.0 # one full error (mismatch)\n elif target_umlaut: # previous target is umlaut non-error\n target_umlaut = False # reset\n dist += 1.0 # one full error (mismatch)\n else:\n if source_umlaut: # previous source is umlaut non-error\n source_umlaut = False # reset\n if (source_sym == self.gap_element and\n target_sym == u\"\\u0364\"): # diacritical combining e\n dist += 1.0 # umlaut error (umlaut match)\n #print('source umlaut match', a)\n else:\n dist += 2.0 # two full errors (mismatch)\n elif target_umlaut: # previous target is umlaut non-error\n target_umlaut = False # reset\n if (target_sym == self.gap_element and\n source_sym == u\"\\u0364\"): # diacritical combining e\n dist += 1.0 # umlaut error (umlaut match)\n #print('target umlaut match', a)\n else:\n dist += 2.0 # two full errors (mismatch)\n elif source_sym in umlauts and umlauts[source_sym] == target_sym:\n source_umlaut = True # umlaut non-error\n elif target_sym in umlauts and umlauts[target_sym] == source_sym:\n target_umlaut = True # umlaut non-error\n else:\n dist += 1.0 # one full error (non-umlaut mismatch)\n if source_umlaut or target_umlaut: # previous umlaut error\n dist += 1.0 # one full error\n\n # FIXME: determine WER as well\n # idea: assign all non-spaces to previous position, leaving gap\n # collapse gap-gap pairs, \n \n #length_reduction = max(source_text.count(u\"\\u0364\"), target_text.count(u\"\\u0364\"))\n return dist, max(len(source_text), len(target_text))\n", "id": "11949537", "language": "Python", "matching_score": 1.76280677318573, "max_stars_count": 0, "path": "ocrd_cor_asv_ann/lib/alignment.py" }, { "content": "'''backend library interface\n\nSequence2Sequence - encapsulates ANN model definition and application\nNode - tree data type for beam search\nAlignment - encapsulates global sequence alignment and distance metrics\n'''\n\nfrom .alignment import Alignment\nfrom .seq2seq import Sequence2Sequence, Node, GAP\n", "id": "6651441", "language": "Python", "matching_score": 1.7205442190170288, "max_stars_count": 10, "path": "ocrd_cor_asv_ann/lib/__init__.py" }, { "content": "'''backend library interface'''\n", "id": "1656499", "language": "Python", "matching_score": 1, "max_stars_count": 11, "path": "ocrd_cor_asv_fst/lib/__init__.py" }, { "content": "'''stand-alone command-line interface'''\n", "id": "11083858", "language": "Python", "matching_score": 0, "max_stars_count": 10, "path": "ocrd_cor_asv_ann/scripts/__init__.py" }, { "content": "import subprocess\nfrom ocrd.utils import getLogger\n\n\nclass JavaProcess:\n def __init__(self, jar, main, input_str, args):\n self.jar = jar\n self.main = main\n self.input_str = input_str\n self.args = args\n self.log = getLogger('JavaProcess')\n\n def run(self):\n cmd = self.get_cmd()\n self.log.info('command: %s', cmd)\n with subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n encoding='utf-8',\n # stderr=subprocess.DEVNULL,\n ) as p:\n self.output, err = p.communicate(input=self.input_str)\n self.output = self.output\n retval = p.wait()\n self.log.info(\"retval: %i\", retval)\n if retval != 0:\n raise ValueError(\n \"cannot execute {}: {}\\nreturned: {}\"\n .format(cmd, err, retval))\n\n def get_cmd(self):\n cmd = ['java', '-cp', self.jar, self.main]\n cmd.extend(self.args)\n return cmd\n", "id": "11245397", "language": "Python", "matching_score": 1.01227605342865, "max_stars_count": 0, "path": "lib/javaprocess.py" }, { "content": "import logging\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\nQUIET = True\n", "id": "6637528", "language": "Python", "matching_score": 0.06701003015041351, "max_stars_count": 3, "path": "settings.quiet.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom future import standard_library\nstandard_library.install_aliases()\n\nfrom builtins import str\nfrom builtins import map\nfrom builtins import object\n\nimport re\n\nclass HocrSpecProperties(object):\n\n class HocrSpecProperty(object):\n \"\"\"\n Definition of a 'title' property\n\n Args:\n name (str): Name of the property\n type (type): str, int, float\n deprecated (Optional[Tuple[int,str]]): Version and message at which\n the property was removed from the specs\n obsolete (Optional[Tuple[int,str]]): Version and message at which\n the property was removed from the specs\n not_checked (bool): Whether the check is not currently tested in-depth\n list (bool): Whether the values are a list\n required_properties (Optional[List[str]]): Names of title\n properties that must be present in the same title\n required_capabilities (Optional[List[str]]): Names of capabilities\n that must be enabled for the document in order to use this\n property.\n split_pattern (List[str]): List of regexes to split list values.\n Specify mutiple regexes for multi-dimensional. Default: ['\\s+']\n range (Optional[List[int]]): Minimum and maximum value of property\n \"\"\"\n\n def __init__(self, name, type,\n deprecated=False,\n obsolete=False,\n not_checked=False,\n required_properties=None,\n range=None,\n required_capabilities=[],\n split_pattern=[r\"\\s+\"],\n list=False):\n self.name = name\n self.type = type\n self.deprecated = deprecated\n self.obsolete = obsolete\n self.not_checked = not_checked\n self.required_properties = required_properties\n self.required_capabilities = required_capabilities\n self.range = range\n self.split_pattern = split_pattern\n self.list = list\n def __repr__(self):\n return '<* title=\"%s\">' % self.name\n\n # General Properties\n bbox = HocrSpecProperty('bbox', int, list=True)\n textangle = HocrSpecProperty('textangle', float)\n poly = HocrSpecProperty('poly', int, list=True,\n required_capabilities=['ocrp_poly'])\n order = HocrSpecProperty('order', int)\n presence = HocrSpecProperty('presence', str)\n cflow = HocrSpecProperty('cflow', str)\n baseline = HocrSpecProperty('baseline', float, list=True)\n\n # Recommended Properties for typesetting elements\n image = HocrSpecProperty('image', str)\n imagemd5 = HocrSpecProperty('imagemd5', str)\n ppageno = HocrSpecProperty('ppageno', int)\n lpageno = HocrSpecProperty('lpageno', int)\n\n # Optional Properties for typesetting elements\n scan_res = HocrSpecProperty('scan_res', int, list=True)\n x_scanner = HocrSpecProperty('x_scanner', str)\n x_source = HocrSpecProperty('x_source', str)\n hardbreak = HocrSpecProperty('hardbreak', int)\n\n # 7 Character Information\n cuts = HocrSpecProperty(\n 'cuts',\n int,\n list=True,\n split_pattern=[r'\\s+', ','],\n required_properties=['bbox'])\n nlp = HocrSpecProperty(\n 'nlp',\n float,\n list=True,\n required_properties=['bbox', 'cuts'],\n required_capabilities=['ocrp_nlp'])\n\n # 8 OCR Engine-Specific Markup\n x_font = HocrSpecProperty(\n 'xfont_s', str, required_capabilities=['ocrp_font'])\n x_fsize = HocrSpecProperty(\n 'xfont_s', int, required_capabilities=['ocrp_font'])\n x_bboxes = HocrSpecProperty('x_bboxes', int, list=True)\n x_confs = HocrSpecProperty('x_confs', float, list=True, range=[0, 100])\n x_wconf = HocrSpecProperty('x_wconf', float, range=[0, 100])\n\nclass HocrSpecAttributes(object):\n\n class HocrSpecAttribute(object):\n \"\"\"\n HTML Attributes that have special meaning in hOCR.\n\n Note: 'title', 'class', 'name' and 'content' are handled elsewhere,\n this is for attributes that require special capabilities.\n \"\"\"\n def __init__(self, name, required_capabilities=[]):\n self.name = name\n self.required_capabilities = required_capabilities\n\n attr_lang = HocrSpecAttribute('lang', required_capabilities=['ocrp_lang'])\n attr_dir = HocrSpecAttribute('dir', required_capabilities=['ocrp_dir'])\n\nclass HocrSpecCapabilities(object):\n\n class HocrSpecCapability(object):\n \"\"\"\n Definition of hOCR capabilities.\n \"\"\"\n def __init__(self, name):\n self.name = name\n\n ocrp_lang = HocrSpecCapability('ocrp_lang')\n ocrp_dir = HocrSpecCapability('ocrp_dir')\n ocrp_poly = HocrSpecCapability('ocrp_poly')\n ocrp_font = HocrSpecCapability('ocrp_font')\n ocrp_nlp = HocrSpecCapability('ocrp_nlp')\n\nclass HocrSpecMetadataFields(object):\n\n class HocrSpecMetadataField(object):\n \"\"\"\n Definition of hOCR metadata.\n \"\"\"\n def __init__(self, name, required=False, recommended=False, known=[]):\n self.name = name\n self.required = required\n self.recommended = recommended\n self.known = known\n\n ocr_system = HocrSpecMetadataField(\n 'ocr-system',\n required=True,\n known=['tesseract 3.03', 'OCRopus Revision: 312'])\n ocr_capabilities = HocrSpecMetadataField('ocr-capabilities', required=True)\n ocr_number_of_pages = HocrSpecMetadataField(\n 'ocr-number-of-pages', recommended=True)\n ocr_langs = HocrSpecMetadataField('ocr-langs', recommended=True)\n ocr_scripts = HocrSpecMetadataField('ocr-scripts', recommended=True)\n\n\nclass HocrSpecClasses(object):\n\n class HocrSpecClass(object):\n \"\"\"\n Definition of an element defined by its 'class' name\n\n Args:\n name (str): Name of the class\n deprecated (Optional[Tuple[int,str]]): Version and message at which\n the element was removed from the specs\n obsolete (Optional[Tuple[int,str]]): Version and message at which the element was\n removed from the specs\n not_checked (bool): Whether the validation is not currently tested in-depth\n tagnames (List[str]): Tag names elements with this class\n may have.\n must_exist (bool): Whether at least one elment of this class must\n be present in document\n must_not_contain (List[str]): Classes of elements that must not be\n descendants of this element\n required_attrib (Optional[List[str]]): Names of attributes that\n must be present on the element\n required_properties (Optional[List[str]]): Names of title\n properties that must be present in the title attribute\n required_capabilities (Optional[List[str]]): Names of capabilities\n that must be enabled for the document in order to use this\n class. Will always imply the class itself.\n one_ancestor: (Optional[List[str]]): Classes of elements\n of which there must be exactly one ancestor (think: every line\n must be in exactly one page)\n allowed_descendants: (Optional[List[str]]): Classes of elements\n that are allowed as descendants\n \"\"\"\n\n def __init__(self, name,\n deprecated=False,\n obsolete=False,\n not_checked=False,\n tagnames=[],\n must_exist=False,\n must_not_contain=[],\n required_attrib=[],\n required_capabilities=[],\n required_properties=[],\n one_ancestor=[],\n allowed_descendants=None):\n self.name = name\n self.deprecated = deprecated\n self.obsolete = obsolete\n self.not_checked = not_checked\n self.tagnames = tagnames\n self.must_exist = must_exist\n self.must_not_contain = must_not_contain\n self.required_attrib = required_attrib\n self.required_properties = required_properties\n self.required_capabilities = [self.name] + required_capabilities\n self.one_ancestor = one_ancestor\n self.allowed_descendants = allowed_descendants\n def __repr__(self):\n return '<* class=\"%s\">' % self.name\n\n # 4 Logical Structuring Elements\n ocr_document = HocrSpecClass('ocr_document', not_checked=True)\n ocr_title = HocrSpecClass('ocr_title', not_checked=True)\n ocr_author = HocrSpecClass('ocr_author', not_checked=True)\n ocr_abstract = HocrSpecClass('ocr_abstract', not_checked=True)\n ocr_part = HocrSpecClass('ocr_part', tagnames=['h1'])\n ocr_chapter = HocrSpecClass('ocr_chapter', tagnames=['h1'])\n ocr_section = HocrSpecClass('ocr_section', tagnames=['h2'])\n ocr_subsection = HocrSpecClass('ocr_subsection', tagnames=['h3'])\n ocr_subsubsection = HocrSpecClass('ocr_subsubsection', tagnames=['h4'])\n ocr_display = HocrSpecClass('ocr_display', not_checked=True)\n ocr_blockquote = HocrSpecClass('ocr_blockquote', tagnames=['<blockquote>'])\n ocr_par = HocrSpecClass(\n 'ocr_par', one_ancestor=['ocr_page'], tagnames=['p'])\n ocr_linear = HocrSpecClass('ocr_linear', not_checked=True)\n ocr_caption = HocrSpecClass('ocr_caption', not_checked=True)\n\n # 5 Typesetting Related Elements\n ocr_page = HocrSpecClass('ocr_page', must_exist=True)\n ocr_column = HocrSpecClass(\n 'ocr_column',\n one_ancestor=['ocr_page'],\n obsolete=('1.1', \"Please use ocr_carea instead of ocr_column\"))\n ocr_carea = HocrSpecClass(\n 'ocr_carea',\n one_ancestor=['ocr_page'])\n ocr_line = HocrSpecClass(\n 'ocr_line',\n must_not_contain=['ocr_line'],\n one_ancestor=['ocr_page'],\n required_properties=['bbox'])\n ocr_separator = HocrSpecClass('ocr_separator', not_checked=True)\n ocr_noise = HocrSpecClass('ocr_noise', not_checked=True)\n\n # Classes for floats\n __FLOATS = [\n 'ocr_float'\n 'ocr_textfloat',\n 'ocr_textimage',\n 'ocr_image',\n 'ocr_linedrawing',\n 'ocr_photo',\n 'ocr_header',\n 'ocr_footer',\n 'ocr_pageno',\n 'ocr_table',\n ]\n ocr_float = HocrSpecClass('ocr_float', must_not_contain=__FLOATS)\n ocr_textfloat = HocrSpecClass('ocr_textfloat', must_not_contain=__FLOATS)\n ocr_textimage = HocrSpecClass('ocr_textimage', must_not_contain=__FLOATS)\n ocr_image = HocrSpecClass('ocr_image', must_not_contain=__FLOATS)\n ocr_linedrawing = HocrSpecClass('ocr_linedrawing', must_not_contain=__FLOATS)\n ocr_photo = HocrSpecClass('ocr_photo', must_not_contain=__FLOATS)\n ocr_header = HocrSpecClass('ocr_header', must_not_contain=__FLOATS)\n ocr_footer = HocrSpecClass('ocr_footer', must_not_contain=__FLOATS)\n ocr_pageno = HocrSpecClass('ocr_pageno', must_not_contain=__FLOATS)\n ocr_table = HocrSpecClass('ocr_table', must_not_contain=__FLOATS)\n\n # 6 Inline Representation\n ocr_glyph = HocrSpecClass('ocr_glyph', not_checked=True)\n ocr_glyphs = HocrSpecClass('ocr_glyphs', not_checked=True)\n ocr_dropcap = HocrSpecClass('ocr_dropcap', not_checked=True)\n ocr_glyphs = HocrSpecClass('ocr_glyphs', not_checked=True)\n ocr_chem = HocrSpecClass('ocr_chem', not_checked=True)\n ocr_math = HocrSpecClass('ocr_math', not_checked=True)\n # 7 Character Information\n ocr_cinfo = HocrSpecClass('ocr_cinfo', not_checked=True)\n\nclass HocrSpecProfile(object):\n \"\"\"\n Restricts how the spec is checked.\n\n Args:\n version (str): Version to check against\n description (str): Descibe the profile\n implicit_capabilities (List[str]): Assume these capabilities were\n specified in <meta name=ocr-capabilities'>\n skip_check (List[str]): Specify a list of checks to skip.\n \"\"\"\n\n def __init__(self, version='1.1', description=None,\n implicit_capabilities=[], skip_check=[]):\n self.version = version\n self.description = description\n self.implicit_capabilities = implicit_capabilities\n self.skip_check = skip_check\n\nclass HocrSpec(object):\n \"\"\"\n The constraints of the HOCR HTML application profile.\n\n Checks:\n - metadata\n - attributes\n - properties\n - classes\n \"\"\"\n profiles = {\n 'standard': HocrSpecProfile(\n description=\"Full validation of current spec [Default]\"),\n 'relaxed': HocrSpecProfile(\n description=\"Validation without any capability and attribute checks\",\n implicit_capabilities=['*'],\n skip_check=['attribute']),\n }\n checks = ['attributes', 'classes', 'metadata', 'properties']\n\n def __init__(self, profile='standard', **kwargs):\n self.profile = self.__class__.profiles[profile]\n for arg in kwargs:\n if kwargs[arg]:\n setattr(self.profile, arg, kwargs[arg])\n self.checks = []\n for check in self.__class__.checks:\n if not check in self.profile.skip_check:\n self.checks.append(check)\n\n #=========================================================================\n #\n # Private methods\n #\n #=========================================================================\n def __elem_name(self, el):\n \"\"\"\n Stringify an element with its attributes\n \"\"\"\n attrib = \" \".join(['%s=\"%s\"'%(k, el.attrib[k]) for k in el.attrib])\n # attrib = str(el.attrib).replace(': ', ':').replace(', ', ',')\n # attrib = attrib.replace(\"{'\", '{').replace(\"':'\", \":'\").replace(\"'}\", '}')\n return \"<%s %s>\" %(el.tag, attrib)\n\n def __get_capabilities(self, root):\n \"\"\"\n List all capabilities of the document.\n \"\"\"\n try:\n caps = root.xpath('//meta[@name=\"ocr-capabilities\"]/@content')[0]\n return re.split(r'\\s+', caps)\n except IndexError as e: return []\n\n def __has_capability(self, report, el, cap):\n \"\"\"\n Check whether the document of `el` has capability `cap`.\n \"\"\"\n if '*' in self.profile.implicit_capabilities:\n return True\n if not cap in self.__get_capabilities(el) + self.profile.implicit_capabilities:\n report.add('ERROR',\n el.sourceline,\n '%s: Requires the \"%s\" capability but it is not specified'\n % (self.__elem_name(el), cap))\n\n def __not_contains_class(self, report, el, contains_classes):\n \"\"\"\n el must not contain any elements with a class from `contains_classes`\n \"\"\"\n for contains_class in contains_classes:\n contained = el.xpath(\".//*[@class='%s']\" % contains_class)\n if len(contained) > 0:\n report.add('ERROR', el.sourceline,\n \"%s must not contain '%s', but does contain %s in line %d\" %\n (self.__elem_name(el),\n contains_class,\n self.__elem_name(contained[0]),\n contained[0].sourceline))\n\n def __exactly_one_ancestor_class(self, report, el, ancestor_class):\n \"\"\"\n There must be exactly one element matching xpath.\n \"\"\"\n nr = len(el.xpath(\"./ancestor::*[@class='%s']\" % ancestor_class))\n if 1 != nr:\n report.add('ERROR', el.sourceline,\n \"%s must be descendant of exactly one '%s', but found %d\" %\n (self.__elem_name(el), ancestor_class, nr))\n\n def __has_tagname(self, report, el, tagnames):\n \"\"\"\n Element el must have one of the tag names tagnames\n \"\"\"\n if tagnames and not el.tag in tagnames:\n report.add('ERROR', el.sourceline,\n \"%s must have a tag name from %s, not '%s'\" %\n (self.__elem_name(el), tagnames, el.tag))\n\n def __has_attrib(self, report, el, attrib):\n \"\"\"\n Elements el must have attribute attrib\n \"\"\"\n if not attrib in el.attrib:\n report.add('ERROR', el.sourceline, \"%s must have attribute '%s'\"\n %(self.__elem_name(el), attrib))\n\n def __has_property(self, report, el, prop):\n \"\"\"\n Test whether an element el has a property prop in its title field\n \"\"\"\n try:\n props = self.parse_properties(el)\n except KeyError as e:\n report.add('ERROR', el.sourceline, '%s Cannot parse properties, missing atttribute: %s'\n %(self.__elem_name(el), e))\n return\n except Exception as e:\n report.add('ERROR', el.sourceline, 'Error parsing properties for \"%s\" : %s'\n %(self.__elem_name(el), e))\n return\n if not prop in props:\n report.add('ERROR', el.sourceline, \"Element %s must have title prop '%s'\"\n %(self.__elem_name(el), prop))\n\n def __check_version(self, report, el, spec):\n if spec.deprecated and self.profile.version >= spec.deprecated[0]:\n report.add(\n 'WARN', el.sourceline,\n '%s %s has been deprecated since version %s: %s' %\n (self.__elem_name(el), spec, spec.deprecated[0], spec.deprecated[1]))\n if spec.obsolete and self.profile.version >= spec.obsolete[0]:\n report.add('ERROR', el.sourceline,\n '%s %s has been obsolete since version %s: %s' %\n (self.__elem_name(el), spec, spec.obsolete[0], spec.obsolete[1]))\n\n def __check_against_ocr_class(self, report, el, c):\n \"\"\"\n check an element against its hOCR class.\n \"\"\"\n if c.not_checked:\n return report.add(\"WARN\", el.sourceline,\n \"Validation of %s not tested in-depth\" % c)\n self.__check_version(report, el, c)\n self.__has_tagname(report, el, c.tagnames)\n self.__not_contains_class(report, el, c.must_not_contain)\n for ancestor_class in c.one_ancestor:\n self.__exactly_one_ancestor_class(report, el, ancestor_class)\n for attrib in c.required_attrib:\n self.__has_attrib(report, el, attrib)\n for prop in c.required_properties:\n self.__has_property(report, el, prop)\n for cap in c.required_capabilities:\n self.__has_capability(report, el, cap)\n\n def __check_against_prop_spec(self, report, el, k, v):\n \"\"\"\n check a property value against its spec.\n\n Most structural validation must happen at parse-time to ensure\n syntactical correctness. Here we check value constraints.\n \"\"\"\n prop_spec = getattr(HocrSpecProperties, k)\n prop_str = str(prop_spec).replace('*', el.tag)\n for cap in prop_spec.required_capabilities:\n self.__has_capability(report, el, cap)\n if prop_spec.deprecated and self.profile.version >= prop_spec.deprecated[0]:\n report.add(\n 'WARN', el.sourceline,\n '%s %s has been deprecated since version %s: %s' %\n (self.__elem_name(el), prop_spec, prop_spec.deprecated[0], prop_spec.deprecated[1]))\n if prop_spec.obsolete and self.profile.version >= prop_spec.obsolete[0]:\n report.add('WARN', el.sourceline,\n '%s %s has been obsolete since version %s: %s' %\n (self.__elem_name(el), prop_spec, prop_spec.obsolete[0], prop_spec.obsolete[1]))\n # primitives\n if not prop_spec.list:\n if prop_spec.range:\n if not prop_spec.range[0] <= v <= prop_spec.range[1]:\n report.add(\n 'ERROR',\n el.sourceline,\n \"%s : Value out of range: %d not in %s\"\n % (prop_str, v, prop_spec.range))\n return\n # lists\n if prop_spec.range:\n for i, vv in enumerate(v):\n if 1 == len(prop_spec.split_pattern):\n if not prop_spec.range[0] <= vv <= prop_spec.range[1]:\n report.add(\n 'ERROR',\n el.sourceline,\n \"%s : List value [%d] out of range (%d not in %s\"\n % (prop_str, i, vv, prop_spec.range))\n if 2 == len(prop_spec.split_pattern):\n for ii, vv in enumerate(v):\n if not prop_spec.range[0] <= vv <= prop_spec.range[1]:\n report.add(\n 'ERROR',\n el.sourceline,\n \"%s : List value [%d][%d] out of range (%d not in %s\"\n % (prop_str, i, ii, vv, prop_spec.range))\n\n #=========================================================================\n #\n # Class methods\n #\n #=========================================================================\n @classmethod\n def list(cls, category):\n if category == 'profiles':\n return list(cls.profiles.keys())\n elif category == 'checks':\n return [k[len('check_'):] for k in dir(cls)]\n elif category == 'capabilities':\n return [k for k in dir(HocrSpecCapabilities) if re.match(r'^[a-z].*', k)]\n else:\n raise ValueError(\"Unknown category %s\" % category)\n\n #=========================================================================\n #\n # Public methods\n #\n #=========================================================================\n\n def parse_properties(self, title):\n \"\"\"\n Parse the 'title' attribute of an element.\n \"\"\"\n ret = {}\n # if it's an lxml node, take the 'title' attribute or die trying\n if hasattr(title, 'attrib'):\n title = title.attrib['title']\n # Split on semicolon, optionally preceded and followed by whitespace\n for kv in re.split(r'\\s*;\\s*', title):\n # Split key and value at first whitespace\n (k, v) = re.split(r'\\s+', kv, 1)\n # Make sure the property is from the list of known properties\n try:\n prop_spec = getattr(HocrSpecProperties, k)\n # If the property is a list value, split the value at the\n # property's 'split_pattern' and apply the type to its values\n if prop_spec.list:\n if 1 == len(prop_spec.split_pattern):\n v = list(map(prop_spec.type,\n re.split(prop_spec.split_pattern[0], v)))\n elif 2 == len(prop_spec.split_pattern):\n # lambda vv: map(prop_spec.type, re.split(prop_spec.split_pattern[1], vv)),\n v = [list(map(prop_spec.type, re.split(prop_spec.split_pattern[1], vv))) for vv in re.split(prop_spec.split_pattern[0], v)]\n # If the property is a scalar value, apply the type to the value\n else:\n v = prop_spec.type(v)\n except Exception as e:\n raise type(e)(str(e) + ' (%s on \"%s\")' % (type(e).__name__, k))\n ret[k] = v\n return ret\n\n def check_properties(self, report, root):\n \"\"\"\n Parse and check all properties.\n \"\"\"\n # print __method__\n # if self.profile.implicit_capabilities\n for el in root.xpath('//*[starts-with(@class, \"ocr\")][@title]'):\n try:\n props = self.parse_properties(el.attrib['title'])\n except Exception as e:\n return report.add('ERROR', el.sourceline,\n 'Error parsing properties for \"%s\" : (property %s)' %\n (self.__elem_name(el), e))\n for k in props:\n self.__check_against_prop_spec(report, el, k, props[k])\n\n def check_classes(self, report, root):\n \"\"\"\n check all elements by their class\n \"\"\"\n for class_spec in [getattr(HocrSpecClasses, k)\n for k in dir(HocrSpecClasses)\n if k.startswith('ocr')]:\n els = root.xpath('//*[@class=\"%s\"]' % class_spec.name)\n if class_spec.must_exist and len(els) == 0:\n report.add('ERROR', 0,\n 'At least one %s must exist' % class_spec)\n for el in els:\n self.__check_against_ocr_class(report, el, class_spec)\n\n def check_attributes(self, report, root):\n \"\"\"\n check attributes according to the spec.\n \"\"\"\n for attr_spec in [getattr(HocrSpecAttributes, k)\n for k in dir(HocrSpecAttributes)\n if k.startswith('attr_')]:\n els = root.xpath('//*[starts-with(@class, \"ocr\")][@%s]' %\n attr_spec.name)\n for el in els:\n if '' == el.attrib[attr_spec.name]:\n report.add(\n 'ERROR', el.sourceline,\n \"%s: Attribute '%s' is empty. \"\n \"Either use 'unknown' or don't specify the attribtue\"\n % (self.__elem_name(el), attr_spec.name))\n for cap in attr_spec.required_capabilities:\n self.__has_capability(report, el, cap)\n\n def check_metadata(self, report, root):\n \"\"\"\n check metadata tags.\n \"\"\"\n # Check for unknown fields\n for el in root.xpath(\"//meta[starts-with(@name, 'ocr')]\"):\n name = el.attrib['name']\n if not getattr(HocrSpecMetadataFields, name.replace('-', '_'), None):\n report.add('ERROR', el.sourceline, \"%s Unknown metadata field '%s'\"\n % (self.__elem_name(el), name))\n for field_spec in [getattr(HocrSpecMetadataFields, k)\n for k in dir(HocrSpecMetadataFields)\n if k.startswith('ocr')]:\n els = root.xpath(\"//meta[@name='%s']\" % field_spec.name)\n # Cardinality checks\n if len(els) > 1:\n report.add('ERROR', els[1].sourceline,\n \"Metadata fields must not be repeated\")\n elif len(els) == 0:\n if field_spec.required:\n report.add('ERROR', 0, \"Required metadata field '%s' missing\" %\n field_spec.name)\n elif field_spec.recommended:\n report.add('WARN', 0, \"Recommended metadata field '%s' missing\" %\n field_spec.name)\n return\n # Field-specific checks\n el = els[0]\n try:\n content = el.attrib['content']\n except KeyError as e:\n report.add('ERROR', el.sourceline, \"%s must have 'content' attribute\"\n % self.__elem_name(el))\n return\n if HocrSpecMetadataFields.ocr_system == field_spec:\n if not content in field_spec.known:\n report.add(\n 'DEBUG', el.sourceline,\n \"Unknown ocr-system: '%s'. \"\n \"Consider opening an issue to let others know about it.\"\n % content)\n # TODO check other metadata\n\n def check(self, report, root):\n \"\"\"\n Execute all enabled checks\n \"\"\"\n for check in self.checks:\n fn = getattr(HocrSpec, \"check_%s\"%(check))\n fn(self, report, root)\n", "id": "89689", "language": "Python", "matching_score": 2.633042573928833, "max_stars_count": 5, "path": "hocr_spec/spec.py" }, { "content": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nfrom future import standard_library\nstandard_library.install_aliases()\n\nimport sys\nfrom hocr_spec import HocrValidator, HocrSpec\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\n 'sources',\n nargs='+',\n help=\"hOCR file to check or '-' to read from STDIN\")\nparser.add_argument(\n '--format',\n '-f',\n choices=HocrValidator.formats,\n default=HocrValidator.formats[0],\n help=\"Report format\")\nparser.add_argument(\n '--filename',\n help=\"Filename to use in report\")\nparser.add_argument(\n '--profile',\n '-p',\n default='standard',\n choices=HocrSpec.list('profiles'),\n help=\"Validation profile\")\nparser.add_argument(\n '--implicit_capabilities',\n '-C',\n action='append',\n metavar='CAPABILITY',\n choices=HocrSpec.list('capabilities'),\n help=\"Enable this capability. Use '*' to enable all capabilities. \"\n \"In addition to the 'ocr*' classes, you can use %s\" %\n HocrSpec.list('capabilities')\n )\nparser.add_argument(\n '--skip-check',\n '-X',\n action='append',\n choices=HocrSpec.checks,\n help=\"Skip one check\")\nparser.add_argument(\n '--parse-strict',\n action='store_true',\n help=\"Parse HTML with less tolerance for errors\")\nparser.add_argument(\n '--silent',\n '-s',\n action='store_true',\n help=\"Don't produce any output but signal success with exit code.\")\n\ndef main():\n args = parser.parse_args()\n\n validator = HocrValidator(args.profile,\n skip_check=args.skip_check,\n implicit_capabilities=args.implicit_capabilities)\n failed = 0\n for source in args.sources:\n report = validator.validate(\n source, parse_strict=args.parse_strict, filename=args.filename)\n failed += not report.is_valid()\n if not args.silent:\n print(report.format(args.format))\n sys.exit(0 if not failed else 1)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "10770621", "language": "Python", "matching_score": 3.5101099014282227, "max_stars_count": 5, "path": "hocr_spec/cli.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom future import standard_library\nstandard_library.install_aliases()\n\nfrom builtins import object\n\nimport sys\nimport re\nfrom lxml import etree\nfrom functools import wraps\nfrom .spec import HocrSpec\n\nclass HocrValidator(object):\n\n class LevelAnsiColor(object):\n OK = '2'\n DEBUG = '4'\n WARN = '3'\n ERROR = '1'\n FATAL = '1;1'\n\n class ReportItem(object):\n \"\"\"\n A single report item\n \"\"\"\n def __init__(self, level, sourceline, message, **kwargs):\n assert getattr(HocrValidator.LevelAnsiColor, level) != None\n self.level = level\n self.sourceline = sourceline\n self.message = message\n def __str__(self):\n return \"[%s] +%s : %s\" % (self.level, self.sourceline, self.message)\n\n class Report(object):\n\n \"\"\"\n A validation Report\n \"\"\"\n def __init__(self, filename):\n self.filename = filename\n self.items = []\n self.abort = False\n\n def __escape_xml(self, s):\n translation = {\n \"'\": '&apos;',\n '\"': '&quot;',\n '<': '&lt;',\n '>': '&gt;',\n '&': '&amp;',\n }\n for k in translation:\n s = s.replace(k, translation[k])\n return s\n\n def add(self, level, *args, **kwargs):\n self.items.append(HocrValidator.ReportItem(level, *args, **kwargs))\n if level is 'FATAL':\n raise ValueError(\"Validation hit a FATAL issue: %s\" % self.items[-1])\n\n def is_valid(self):\n return 0 == len([x for x in self.items if x.level in ['ERROR', 'FATAL']])\n\n def format(self, *args):\n \"\"\"\n Format the report\n \"\"\"\n format = args[0] if args[0] else 'text'\n if format == 'bool':\n return self.is_valid()\n elif format in ['text', 'ansi']:\n if self.is_valid():\n self.add('OK', 0, \"Document is valid\")\n out = []\n for item in self.items:\n filename = self.filename\n if item.sourceline > 0:\n filename += ':%d' % (item.sourceline)\n level = item.level\n if format == 'ansi':\n level = \"\\033[3%sm%s\\033[0m\" % (\n getattr(HocrValidator.LevelAnsiColor, item.level),\n item.level)\n out.append(\"[%s] %s %s\" % (level, filename, item.message))\n return \"\\n\".join(out)\n elif format == 'xml':\n out = []\n for item in self.items:\n out.append(\n '\\t<item>\\n'\n '\\t\\t<level>%s</level>\\n'\n '\\t\\t<sourceline>%s</sourceline>\\n'\n '\\t\\t<message>%s</message>\\n'\n '\\t</item>' % (item.level, item.sourceline,\n self.__escape_xml(item.message)))\n return '<report filename=\"%s\" valid=\"%s\">\\n%s\\n</report>' % (\n self.filename,\n ('true' if self.is_valid() else 'false'),\n \"\\n\".join(out))\n else:\n raise ValueError(\"Unknown format '%s'\")\n\n formats = ['text', 'bool', 'ansi', 'xml']\n\n def __init__(self, profile, **kwargs):\n self.spec = HocrSpec(profile, **kwargs)\n\n def validate(self, source, parse_strict=False, filename=None):\n \"\"\"\n Validate a hocr document\n\n Args:\n source (str): A filename or '-' to read from STDIN\n parse_strict (bool): Whether to be strict about broken HTML. Default: False\n filename (str): Filename to use in the reports. Set this if reading\n from STDIN for nicer output\n\n \"\"\"\n parser = etree.HTMLParser(recover=parse_strict)\n if not filename: filename = source\n if source == '-': source = sys.stdin\n doc = etree.parse(source, parser)\n root = doc.getroot()\n report = HocrValidator.Report(filename)\n try:\n self.spec.check(report, root)\n except ValueError as e:\n sys.stderr.write(\"Validation errored\\n\")\n return report\n", "id": "6373437", "language": "Python", "matching_score": 2.5227701663970947, "max_stars_count": 5, "path": "hocr_spec/validate.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nClasses for validating and parsing hOCR, close to the spec.\n\"\"\"\n\nfrom .spec import HocrSpec\nfrom .validate import HocrValidator\n", "id": "12848833", "language": "Python", "matching_score": 0.22367608547210693, "max_stars_count": 5, "path": "hocr_spec/__init__.py" }, { "content": "'''OCR post-correction with error/lexicon FSTs and char-LM LSTMs'''\n", "id": "3089114", "language": "Python", "matching_score": 1.4281296730041504, "max_stars_count": 11, "path": "ocrd_cor_asv_fst/__init__.py" }, { "content": "'''OCR post-correction with attention encoder-decoder LSTMs'''\n", "id": "7749304", "language": "Python", "matching_score": 0, "max_stars_count": 10, "path": "ocrd_cor_asv_ann/__init__.py" }, { "content": "import logging\nimport math\nfrom os import listdir\nimport os.path\nimport pynini\n\n\ndef escape_for_pynini(s):\n '''\n Escapes a string for the usage in pynini. The following characters\n are prepended with a backslash:\n - the opening and closing square bracket,\n - the backslash itself.\n '''\n return s.replace('\\\\', '\\\\\\\\').replace('[', '\\\\[').replace(']', '\\\\]')\n\n\ndef get_filenames(directory, suffix):\n '''\n Return all filenames following the scheme <file_id>.<suffix> in the\n given directory.\n '''\n\n return (f for f in listdir(directory) if f.endswith('.' + suffix))\n\n\ndef generate_content(directory, filenames):\n '''\n Generate tuples of file basename and file content string for given\n filenames and directory.\n '''\n\n for filename in filenames:\n with open(os.path.join(directory, filename)) as f:\n for line in f:\n line = line.strip()\n if line:\n yield (filename.split('.')[0], line)\n\n\ndef load_pairs_from_file(filename):\n '''\n Load pairs of (line_ID, line) from a file.\n '''\n results = []\n with open(filename) as fp:\n for i, line in enumerate(fp, 1):\n line_spl = line.rstrip().split('\\t')\n if len(line_spl) >= 2:\n results.append(tuple(line_spl[:2]))\n else:\n logging.warning(\\\n '{}:{} -- line is not in two-column format: {}'\\\n .format(filename, i, line.rstrip()))\n return results\n\n\ndef load_pairs_from_dir(directory, suffix):\n '''\n Load pairs of (line_ID, line) from a file. Each text file ending\n with `suffix` contains a line of text and the line ID is the file\n name without the suffix.\n '''\n filenames = get_filenames(directory, suffix)\n return list(generate_content(directory, filenames))\n\n\ndef load_lines_from_file(filename):\n '''\n Load text lines from file.\n '''\n lines = None\n with open(filename) as fp:\n lines = [line.rstrip() for line in fp]\n return lines\n\n\ndef load_wordlist_from_file(filename):\n '''\n Load wordlist from a CSV file (word <tab> frequency).\n '''\n result = {}\n with open(filename) as fp:\n for line in fp:\n try:\n word, freq = line.rstrip().split('\\t')[:2]\n result[word] = int(freq)\n # ignore lines in wrong format\n # (less than two columns, second column is not a number etc.)\n except Exception:\n pass\n return result\n\n\ndef save_pairs_to_file(pairs, filename):\n '''\n Save pairs of (line_ID, line) to a file.\n '''\n with open(filename, 'w+') as fp:\n for p in pairs:\n fp.write('\\t'.join(p) + '\\n')\n\n\ndef save_pairs_to_dir(pairs, directory, suffix):\n '''\n Save pairs of (line_ID, line) to a directory.\n\n See the docstring of `load_pairs_from_dir` for an explanation of the\n format.\n '''\n for basename, string in pairs:\n filename = basename + '.' + suffix\n with open(os.path.join(directory, filename), 'w+') as fp:\n fp.write(string)\n\n\ndef convert_to_log_relative_freq(lexicon_dict, freq_threshold=2e-6):\n '''\n Convert counts of dict: word -> count into dict with relative\n frequencies: word -> relative_frequency.\n\n The entries with relative frequency lower than `freq_threshold` are\n dropped.\n '''\n\n total_freq = sum(lexicon_dict.values())\n print('converting dictionary of %d tokens / %d types' % (total_freq, len(lexicon_dict)))\n for key in list(lexicon_dict.keys()):\n abs_freq = lexicon_dict[key]\n rel_freq = abs_freq / total_freq\n if abs_freq <= 3 and rel_freq < freq_threshold:\n print('pruning rare word form \"%s\" (%d/%f)' % (key, abs_freq, rel_freq))\n del lexicon_dict[key]\n else:\n lexicon_dict[key] = -math.log(rel_freq)\n return lexicon_dict\n\n\ndef transducer_from_dict(dictionary, unweighted=False):\n '''\n Given a dictionary of strings and weights, build a transducer\n accepting those strings with given weights.\n '''\n return pynini.string_map(\\\n (escape_for_pynini(k),\n escape_for_pynini(k),\n str(w) if not unweighted else '0.0') \\\n for k, w in dictionary.items())\n\n", "id": "1517533", "language": "Python", "matching_score": 2.958562135696411, "max_stars_count": 11, "path": "ocrd_cor_asv_fst/lib/helper.py" }, { "content": "from collections import defaultdict, namedtuple\nimport logging\nimport re\n\n# to install models, do: `python -m spacy download de` after installation\nimport spacy\nimport spacy.tokenizer\n\nfrom .helper import \\\n convert_to_log_relative_freq, transducer_from_dict\n\n\nMIN_LINE_LENGTH = 3\nOPENING_BRACKETS = ['\"', '»', '(', '„']\nCLOSING_BRACKETS = ['\"', '«', ')', '“', '‘', \"'\"]\nUMLAUTS = { 'ä': 'a\\u0364', 'ö': 'o\\u0364', 'ü': 'u\\u0364', 'Ä': 'A\\u0364',\n 'Ö': 'O\\u0364', 'Ü': 'U\\u0364'}\n\nLexicon = namedtuple(\n 'Lexicon',\n ['opening_brackets', 'closing_brackets', 'punctuation', 'words'])\n\n\ndef get_digit_tuples():\n '''\n Gives tuple of all pairs of identical numbers. This is used to\n replace the ('1', '1') transitions in the lexicon by all possible\n numbers.\n '''\n return tuple([(str(i), str(i)) for i in range(10)])\n\n\ndef setup_spacy(use_gpu=False):\n if use_gpu:\n spacy.require_gpu()\n spacy.util.use_gpu(0)\n # disable everything we don't have at runtime either\n nlp = spacy.load('de', disable=['parser', 'ner'])\n infix_re = spacy.util.compile_infix_regex(\n nlp.Defaults.infixes +\n ['—', # numeric dash: (?<=[0-9])—(?=[0-9])\n '/']) # maybe more restrictive?\n suffix_re = spacy.util.compile_suffix_regex(\n nlp.Defaults.suffixes +\n ('/',)) # maybe more restrictive?\n # '〟' as historic quotation mark (left and right)\n # '〃' as historic quotation mark (at the start of the line!)\n # '‟' as historic quotation mark (at the start of the line!)\n # '›' and '‹' as historic quotation marks (maybe goes away with NFC?)\n # '⟨' and '⟩' parentheses (maybe goes away with NFC?)\n # '⁽' and '⁾' parentheses (maybe goes away with NFC?)\n # '〈' and '〉' brackets (maybe goes away with NFC?)\n # '‹' and '›' as historic quotation mark\n # '’' as historic apostrophe\n # '—' as dash, even when written like a prefix\n # \\u+feff (byte order mark) as prefix\n\n nlp.tokenizer = spacy.tokenizer.Tokenizer(\n nlp.vocab,\n token_match=nlp.tokenizer.token_match,\n prefix_search=nlp.tokenizer.prefix_search,\n suffix_search=nlp.tokenizer.suffix_search,\n infix_finditer=infix_re.finditer)\n return nlp\n\n\ndef build_lexicon(lines, _dict = None):\n '''\n Create lexicon with frequencies from lines of plain text. Words and\n punctation marks are inserted into separate dicts.\n\n The additional parameter `_dict` is a dictionary: type -> frequency.\n If it is given, those types are additionally inserted into the\n lexicon as words (without any preprocessing).\n '''\n\n # TODO: Bindestriche behandeln. Momentan werden sie abgetrennt vor dem\n # Hinzufügen zum Lexikon. Man müsste halbe Worte weglassen und\n # zusammengesetzte Zeilen für die Erstellung des Lexikons nutzen.\n # TODO: Groß-/Kleinschreibung wie behandeln? Momentan wird jedes\n # Wort in der kleingeschriebenen und der großgeschriebene Variante\n # zum Lexikon hinzugefügt (mit gleicher Häufigkeit).\n # Später vermutlich eher durch sowas wie {CAP}?\n\n def _is_opening_bracket(token):\n return token.text in OPENING_BRACKETS\n\n def _is_closing_bracket(token):\n return token.text in CLOSING_BRACKETS\n\n def _is_punctuation(token):\n # punctuation marks must not contain letters or numbers\n # hyphens in the middle of the text are treated as words\n return token.pos_ == 'PUNCT' and token.text != '—' and \\\n not any(c.isalpha() or c.isnumeric() for c in token.text)\n\n def _handle_problematic_cases(token):\n if token.text.strip() != token.text:\n logging.warning('Token contains leading or trailing '\n 'whitespaces: \\'{}\\''.format(token.text))\n if len(token.text) > 1 and token.text.endswith('—'):\n logging.warning('Possible tokenization error: \\'{}\\''\n .format(token.text))\n\n def _add_token_to_lexicon(token, freq = 1):\n _handle_problematic_cases(token)\n if _is_opening_bracket(token):\n lexicon.opening_brackets[token.text] += 1\n elif _is_closing_bracket(token):\n lexicon.closing_brackets[token.text] += 1\n elif _is_punctuation(token):\n lexicon.punctuation[token.text] += 1\n else:\n text = token.text.translate(umlauttrans)\n if text.isdigit() or num_re.match(text):\n text = len(text) * '1'\n lexicon.words[text] += freq\n # include also the (un)capitalized variant\n recap = text.lower() \\\n if text[0].isupper() \\\n else text.capitalize()\n if recap != text:\n lexicon.words[recap] += freq\n\n lexicon = Lexicon(\n opening_brackets=defaultdict(lambda: 0),\n closing_brackets=defaultdict(lambda: 0),\n punctuation=defaultdict(lambda: 0),\n words=defaultdict(lambda: 0))\n umlauttrans = str.maketrans(UMLAUTS)\n # '−' as sign prefix\n # '√' as prefix?\n # ¹²³⁴⁵⁶⁷⁸⁹⁰ digits (maybe goes away with NFC?)\n num_re = re.compile('[0-9]{1,3}([,.]?[0-9]{3})*([.,][0-9]*)?')\n nlp = setup_spacy()\n\n # process the text lines\n for line in lines:\n if len(line) < MIN_LINE_LENGTH:\n continue\n for token in nlp(line):\n _add_token_to_lexicon(token)\n\n # process the dictionary of words with frequencies\n if _dict is not None:\n for word, freq in _dict.items():\n lexicon.words[word] += freq\n\n return lexicon\n\n\ndef lexicon_to_fst(lexicon, punctuation='bracket', added_word_cost=0,\n unweighted=False):\n words_dict = convert_to_log_relative_freq(lexicon.words)\n # add `added_word_cost` to the cost of every word\n # (FIXME this is a dirty workaround to reproduce the approximate behaviour\n # of the legacy implementation of the sliding window algorithm; it should\n # be replaced with something more theoretically sound)\n if added_word_cost != 0:\n logging.debug('adding {} to word costs'.format(added_word_cost))\n for w in words_dict:\n words_dict[w] += added_word_cost\n words_fst = transducer_from_dict(words_dict, unweighted=unweighted)\n punctuation_fst = transducer_from_dict(\n convert_to_log_relative_freq(lexicon.punctuation),\n unweighted=unweighted)\n open_bracket_fst = transducer_from_dict(\n convert_to_log_relative_freq(lexicon.opening_brackets),\n unweighted=unweighted)\n close_bracket_fst = transducer_from_dict(\n convert_to_log_relative_freq(lexicon.closing_brackets),\n unweighted=unweighted)\n\n # in the lexicon dict, numbers are counted as sequences of 1\n # thus, they are replaced by any possible number of the according length\n # FIXME restore converting digits to ones\n # words_fst.substitute(('1', '1'), get_digit_tuples())\n\n if punctuation == 'bracket':\n # TODO compounds\n result = open_bracket_fst.ques\n result.concat(words_fst)\n result.concat(punctuation_fst.ques)\n result.concat(close_bracket_fst.ques)\n\n # standardize the umlaut characters\n # FIXME restore the umlaut standardization\n # precompose_transducer = hfst.regex(\n # '[a\\u0364:ä|o\\u0364:ö|u\\u0364:ü|A\\u0364:Ä|O\\u0364:Ö|U\\u0364:Ü|?]*')\n # result.compose(precompose_transducer)\n result.project(project_output=True)\n result.optimize(compute_props=True)\n result.push()\n return result\n else:\n # FIXME implement further punctuation methods\n raise NotImplementedError()\n\n", "id": "6906086", "language": "Python", "matching_score": 1.725738286972046, "max_stars_count": 11, "path": "ocrd_cor_asv_fst/lib/lexicon.py" }, { "content": "import argparse\nimport logging\nimport numpy as np\nfrom operator import itemgetter\n\nfrom ..lib.lexicon import build_lexicon, lexicon_to_fst\nfrom ..lib.error_simp import \\\n get_confusion_dicts, compile_single_error_transducer, \\\n combine_error_transducers\nfrom ..lib.helper import \\\n load_pairs_from_file, load_pairs_from_dir, load_lines_from_file, \\\n load_wordlist_from_file\nfrom ..lib.error_st import \\\n fit, compile_transducer, load_ngrams, matrix_to_mappings, save_ngrams, \\\n training_pairs_to_ngrams\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description='OCR post-correction model training')\n # GENERAL PARAMETERS\n parser.add_argument(\n '-l', '--lexicon-file', metavar='FILE', type=str, default=None,\n help='file to save the trained lexicon')\n parser.add_argument(\n '-e', '--error-model-file', metavar='FILE', type=str, default=None,\n help='file to save the trained error model')\n parser.add_argument(\n '-t', '--training-file', metavar='FILE', type=str, default=None,\n help='file containing training data in two-column format (OCR, GT)')\n parser.add_argument(\n '-L', '--log-level', metavar='LEVEL', type=str,\n choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],\n default='INFO',\n help='verbosity of logging output (standard log levels)')\n # alternative method of passing training data - two files:\n parser.add_argument(\n '-i', '--input-file', metavar='FILE', type=str, default=None,\n help='file containing the input data in two-column format')\n parser.add_argument(\n '-g', '--gt-file', metavar='FILE', type=str, default=None,\n help='file containing the ground truth data in two-column format')\n # yet alternative method of passing training data -- multiple files:\n parser.add_argument(\n '-I', '--input-suffix', metavar='SUF', type=str, default=None,\n help='input (OCR) filenames suffix')\n parser.add_argument(\n '-G', '--gt-suffix', metavar='SUF', type=str, default=None,\n help='ground truth filenames suffix')\n parser.add_argument(\n '-d', '--directory', metavar='PATH', default=None,\n help='directory for training files')\n # PARAMETERS FOR TRAINING THE LEXICON\n parser.add_argument(\n '-c', '--corpus-file', metavar='FILE', type=str, default=None,\n help='a file containing a plaintext corpus')\n parser.add_argument(\n '-w', '--wordlist-file', metavar='FILE', type=str, default=None,\n help='a file containing a wordlist in two-column format '\n '(word <tab> frequency)')\n parser.add_argument(\n '-P', '--punctuation', metavar='MODEL', type=str,\n choices=['bracket', 'lm', 'preserve'], default='bracket',\n help='how to model punctuation between words (bracketing rules, '\n 'inter-word language model, or keep unchanged)')\n parser.add_argument(\n '-D', '--composition-depth', metavar='NUM', type=int, default=2,\n help='max. number of lexicon words that can be concatenated')\n parser.add_argument(\n '--lexicon-added-word-cost', metavar='NUM', type=float, default=0,\n help='a constant to add to the weights of every word in lexicon')\n parser.add_argument(\n '--unweighted-lexicon', action='store_true', default=False,\n help='train an unweighted lexicon (use for combining with a language '\n 'model)')\n # PARAMETERS FOR TRAINING THE ERROR MODEL\n parser.add_argument(\n '-T', '--error-model-type', metavar='MODEL', type=str,\n choices=['simple', 'st'], default='simple',\n help='type of the error model')\n parser.add_argument(\n '-p', '--preserve-punctuation', action='store_true', default=False,\n help='ignore edits to/from non-alphanumeric or non-space characters '\n '(only the \\'simple\\' model)')\n parser.add_argument(\n '--min-context', metavar='NUM', type=int, default=1,\n help='minimum size of context count edits at')\n parser.add_argument(\n '-C', '--max-context', metavar='NUM', type=int, default=3,\n help='maximum size of context count edits at')\n parser.add_argument(\n '-E', '--max-errors', metavar='NUM', type=int, default=3,\n help='maximum number of errors the resulting FST can correct '\n '(applicable within one window, i.e. a certain number of words)')\n # only ST error model:\n parser.add_argument(\n '-N', '--max-ngrams', metavar='NUM', type=int, default=1000,\n help='max. number of n-grams used in ST error model training')\n parser.add_argument(\n '-W', '--weight-threshold', metavar='NUM', type=float, default=5.0,\n help='max. cost of transformations included in the error model')\n parser.add_argument(\n '--crossentr-threshold', metavar='NUM', type=float, default=0.001,\n help='threshold on cross-entropy for stopping ST error model training')\n parser.add_argument(\n '--ngrams-file', metavar='FILE', type=str,\n help='')\n parser.add_argument(\n '--weights-file', metavar='FILE', type=str,\n help='')\n parser.add_argument(\n '--load-weights-from', metavar='FILE', type=str,\n help='')\n return parser.parse_args()\n\n\ndef main():\n\n def _load_training_pairs(args):\n if args.training_file is not None:\n return load_pairs_from_file(args.training_file)\n elif args.input_suffix is not None \\\n and args.gt_suffix is not None \\\n and args.directory is not None:\n ocr_dict = dict(load_pairs_from_dir(\\\n args.directory, args.input_suffix))\n gt_dict = dict(load_pairs_from_dir(\\\n args.directory, args.gt_suffix))\n return [(ocr_dict[key], gt_dict[key]) \\\n for key in set(ocr_dict) & set(gt_dict)]\n else:\n return []\n\n def _load_lexicon_training_data(args):\n training_dict = None\n training_pairs = _load_training_pairs(args)\n training_lines = list(map(itemgetter(1), training_pairs))\n if args.corpus_file is not None:\n training_lines.extend(load_lines_from_file(args.corpus_file))\n if args.wordlist_file is not None:\n training_dict = load_wordlist_from_file(args.wordlist_file)\n if not training_lines and not training_dict:\n logging.error('No training data supplied!')\n return training_lines, training_dict\n\n def _train_lexicon(args):\n training_lines, training_dict = _load_lexicon_training_data(args)\n lexicon = build_lexicon(training_lines, training_dict)\n tr = lexicon_to_fst(\\\n lexicon, punctuation=args.punctuation,\n added_word_cost=args.lexicon_added_word_cost,\n unweighted=args.unweighted_lexicon)\n tr.write(args.lexicon_file)\n\n def _train_simple_error_model(args):\n training_pairs = _load_training_pairs(args)\n # FIXME this is silly, instead refactor the simple error model training\n # so that it accepts input in form of line pairs\n ocr_dict, gt_dict = {}, {}\n for i, (ocr_line, gt_line) in enumerate(training_pairs):\n ocr_dict[i] = ocr_line\n gt_dict[i] = gt_line\n\n confusion_dicts = get_confusion_dicts(\\\n gt_dict, ocr_dict, args.max_context)\n single_error_transducers = \\\n [compile_single_error_transducer(\n confusion_dicts[i],\n preserve_punct=args.preserve_punctuation) \\\n for i in range(1, args.max_context+1)]\n combined_tr_dicts = combine_error_transducers(\n single_error_transducers,\n args.max_context,\n args.max_errors)\n # FIXME combine_error_transducers() should return a single FST instead\n # of this complicated dict structure\n target_context = \\\n ''.join(map(str, range(args.min_context, args.max_context+1)))\n for tr_dict in combined_tr_dicts:\n if tr_dict['max_error'] == args.max_errors and \\\n tr_dict['context'] == target_context:\n # save_transducer(args.error_model_file, tr_dict['transducer'])\n tr_dict['transducer'].write(args.error_model_file)\n\n def _train_st_error_model(args):\n # FIXME implement\n # if weight file given -> load weights from there, otherwise train them\n ngrams, probs, ngr_probs = None, None, None\n if args.load_weights_from is not None:\n ngrams = load_ngrams(args.ngrams_file)\n with np.load(args.load_weights_from) as data:\n probs, ngr_probs = data['probs'], data['ngr_probs']\n else:\n training_pairs = _load_training_pairs(args)\n ngr_training_pairs, ngrams = training_pairs_to_ngrams(\n training_pairs,\n max_n=args.max_context, max_ngrams=args.max_ngrams)\n if args.ngrams_file is not None:\n save_ngrams(args.ngrams_file, ngrams)\n probs, ngr_probs = fit(\n ngr_training_pairs, ngrams,\n threshold=args.crossentr_threshold)\n if args.weights_file is not None:\n np.savez(args.weights_file, probs=probs, ngr_probs=ngr_probs)\n\n mappings = matrix_to_mappings(\n probs, ngrams, weight_threshold=args.weight_threshold)\n for input_str, output_str, weight in mappings:\n print('\\''+input_str+'\\'', '\\''+output_str+'\\'', weight, sep='\\t')\n tr = compile_transducer(\n mappings, ngr_probs, max_errors=args.max_errors,\n max_context=args.max_context, weight_threshold=args.weight_threshold)\n tr.write(args.error_model_file)\n\n args = parse_arguments()\n logging.basicConfig(level=logging.getLevelName(args.log_level))\n if args.lexicon_file is not None:\n _train_lexicon(args)\n else:\n logging.info('Skipping lexicon training.')\n if args.error_model_file is not None:\n if args.error_model_type == 'simple':\n _train_simple_error_model(args)\n elif args.error_model_type == 'st':\n _train_st_error_model(args)\n else:\n logging.info('Skipping error model training.')\n\n\nif __name__ == '__main__':\n main()\n\n", "id": "1430917", "language": "Python", "matching_score": 3.1587281227111816, "max_stars_count": 11, "path": "ocrd_cor_asv_fst/scripts/train.py" }, { "content": "'''Error model based on a stochastic transducer.'''\n\nimport argparse\nfrom collections import defaultdict\nimport numpy as np\nfrom operator import itemgetter\nimport pynini\nimport tqdm\n\nfrom ..lib.helper import escape_for_pynini\n\n\ndef dicts_to_value_pairs(dict_1, dict_2):\n '''Convert dictionaries `{ key_i: val_1_i }` and\n `{ key_i: val_2_i }` to pairs `(val_1_i, val_2_i)` for each\n `key_i`.'''\n for key in sorted(dict_1):\n if key in dict_2:\n yield (dict_1[key], dict_2[key])\n\n\ndef count_ngrams(strings, max_n):\n '''Count character n-grams up to max_n (including spaces) in strings.'''\n counts = defaultdict(lambda: 0)\n for string in strings:\n for i in range(len(string)):\n for j in range(min(max_n, len(string)-i)):\n counts[string[i:i+j+1]] += 1\n return dict(counts)\n\n\ndef merge_counters(a, b):\n result = a.copy()\n for key, val in b.items():\n if key in a:\n result[key] += val\n else:\n result[key] = val\n return result\n\n\ndef select_ngrams(counter, num):\n '''Select all unigrams ant most frequent n-grams of higher orders.'''\n # select the unigrams\n ngrams = [key for key in counter.keys() if len(key) <= 1]\n if len(ngrams) > num:\n raise Exception('Number of unigrams exceeds the number of allowed '\n 'n-grams.')\n # add the most frequent n-grams for n > 1\n ngrams.extend(map(\n itemgetter(0),\n sorted(((key, val) for key, val in counter.items() if len(key) > 1),\n reverse=True, key=itemgetter(1))[:num-len(ngrams)]))\n return ngrams\n\n\ndef string_to_ngram_ids(string, ngrams):\n '''Convert a string of length `m` to a matrix `A` of size `m*n`,\n where `n` is the maximum n-gram length. The entry `a[i,j]`\n contains the ID (index in the `ngrams` list) of the n-gram\n `string[i:i+j]` or `-1` if this ngram is not present in the\n list.'''\n max_n = max(len(ngr) for ngr in ngrams)\n ngrams_idx = { ngr : i for i, ngr in enumerate(ngrams) }\n result = -np.ones((len(string), max_n), dtype=np.int32)\n for i in range(len(string)):\n for j in range(min(max_n, len(string)-i)):\n ngr = string[i:i+j+1]\n if ngr in ngrams_idx:\n result[i,j] = ngrams_idx[ngr]\n return result\n\n\ndef preprocess_training_data(ocr_dict, gt_dict, max_n=3, max_ngrams=1000):\n string_pairs = dicts_to_value_pairs(ocr_dict, gt_dict)\n ocr_ngrams = count_ngrams(ocr_dict.values(), max_n)\n gt_ngrams = count_ngrams(gt_dict.values(), max_n)\n ngrams = select_ngrams(merge_counters(ocr_ngrams, gt_ngrams), max_ngrams)\n training_pairs = []\n for (ocr_str, gt_str) in string_pairs:\n training_pairs.append((\n string_to_ngram_ids(gt_str, ngrams),\n string_to_ngram_ids(ocr_str, ngrams)))\n return training_pairs, ngrams\n\n\ndef training_pairs_to_ngrams(training_pairs, max_n=3, max_ngrams=1000):\n ocr_ngrams = count_ngrams(map(itemgetter(0), training_pairs), max_n)\n gt_ngrams = count_ngrams(map(itemgetter(1), training_pairs), max_n)\n ngrams = select_ngrams(merge_counters(ocr_ngrams, gt_ngrams), max_ngrams)\n ngr_training_pairs = []\n for (ocr_str, gt_str) in training_pairs:\n ngr_training_pairs.append((\n string_to_ngram_ids(gt_str, ngrams),\n string_to_ngram_ids(ocr_str, ngrams)))\n return ngr_training_pairs, ngrams\n\n\ndef normalize_probs(probs):\n '''Normalize the probability matrix so that each row sums up to 1.'''\n row_sums = np.sum(probs, axis=1)\n weights = np.divide(np.ones(row_sums.shape), row_sums, where=row_sums > 0)\n return probs * weights[:,None] \n\n\ndef initialize_probs(size, identity_weight=1, misc_weight=0.01):\n return normalize_probs(np.ones((size, size)))\n# return normalize_probs(\n# np.eye(size) * identity_weight + \\\n# np.ones((size, size)) * misc_weight)\n\n\ndef forward(input_seq, output_seq, probs, ngr_probs):\n '''Compute the forward matrix (alpha) for the given pair\n of sequences.'''\n result = np.zeros((input_seq.shape[0]+1, output_seq.shape[0]+1))\n result[0, 0] = 1\n for i in range(1, input_seq.shape[0]+1):\n for j in range(1, output_seq.shape[0]+1):\n for k in range(min(i, input_seq.shape[1])):\n for m in range(min(j, output_seq.shape[1])):\n x, y = input_seq[i-k-1,k], output_seq[j-m-1,m]\n if x > -1 and y > -1:\n result[i,j] += ngr_probs[k] * result[i-k-1,j-m-1] * probs[x,y]\n return result\n\n\ndef backward(input_seq, output_seq, probs, ngr_probs):\n '''Compute the backward matrix (beta) for the given pair\n of sequences.'''\n result = np.zeros((input_seq.shape[0]+1, output_seq.shape[0]+1))\n result[input_seq.shape[0], output_seq.shape[0]] = 1\n for i in range(input_seq.shape[0]-1, -1, -1):\n for j in range(output_seq.shape[0]-1, -1, -1):\n for k in range(min(input_seq.shape[0]-i, input_seq.shape[1])):\n for m in range(min(output_seq.shape[0]-j, output_seq.shape[1])):\n x, y = input_seq[i,k], output_seq[j,m]\n if x > -1 and y > -1:\n result[i,j] += ngr_probs[k] * probs[x,y] * result[i+k+1,j+m+1]\n return result\n\n\ndef compute_expected_counts(seq_pairs, probs, ngr_probs):\n counts = np.zeros(probs.shape)\n ngr_counts = np.zeros(ngr_probs.shape)\n for input_seq, output_seq in tqdm.tqdm(seq_pairs):\n alpha = forward(input_seq, output_seq, probs, ngr_probs)\n beta = backward(input_seq, output_seq, probs, ngr_probs)\n Z = alpha[input_seq.shape[0],output_seq.shape[0]]\n for i in range(1, input_seq.shape[0]+1):\n for j in range(1, output_seq.shape[0]+1):\n if alpha[i,j]*beta[i,j] == 0:\n continue\n co = np.zeros((min(i, input_seq.shape[1]),\n min(j, output_seq.shape[1])))\n for k in range(min(i, input_seq.shape[1])):\n for m in range(min(j, output_seq.shape[1])):\n x, y = input_seq[i-k-1,k], output_seq[j-m-1,m]\n if x > -1 and y > -1:\n c = alpha[i-k-1,j-m-1] * ngr_probs[k] * \\\n probs[x,y] * beta[i,j] / Z\n co[k,m] += c\n ngr_counts[k] += c\n counts[x,y] += c\n return counts, ngr_counts\n\n\ndef mean_kl_divergence(old, new):\n log_old = np.log(old, where=old > 0)\n log_new = np.log(new, where=new > 0)\n return np.sum(new*log_new - new*log_old) / new.shape[0]\n\n\ndef compute_new_probs(counts, probs):\n result = np.copy(probs)\n row_sums = np.sum(counts, axis=1)\n for i in range(counts.shape[0]):\n if row_sums[i] > 0:\n result[i,:] = counts[i,:] / row_sums[i]\n return result\n\n\ndef fit(seq_pairs, ngrams, threshold=0.0001):\n probs = initialize_probs(len(ngrams))\n ngr_probs = np.ones(3) / 3\n kl_div = np.inf\n while kl_div > threshold:\n counts, ngr_counts = compute_expected_counts(seq_pairs, probs, ngr_probs)\n new_probs = compute_new_probs(counts, probs)\n ngr_probs = ngr_counts / np.sum(ngr_counts)\n kl_div = mean_kl_divergence(probs, new_probs)\n probs = new_probs\n if np.any(probs > 1):\n raise RuntimeError('!')\n print('KL-DIV={}'.format(kl_div))\n print(ngr_probs)\n print(ngr_probs)\n return probs, ngr_probs\n\n\ndef matrix_to_mappings(probs, ngrams, weight_threshold=5.0):\n weights = -np.log(probs, where=(probs > 0))\n results = []\n for i in range(probs.shape[0]):\n for j in range(probs.shape[1]):\n if probs[i,j] > 0 and weights[i,j] < weight_threshold:\n results.append((ngrams[i], ngrams[j], float(weights[i,j])))\n return results\n\n\ndef compile_transducer(mappings, ngr_probs, max_errors=3, max_context=3,\n weight_threshold=5.0):\n ngr_weights = -np.log(ngr_probs)\n identity_trs, error_trs = {}, {}\n identity_mappings, error_mappings = {}, {}\n for i in range(max_context):\n identity_trs[i], error_trs[i] = [], []\n identity_mappings[i], error_mappings[i] = [], []\n for x, y, weight in mappings:\n mapping = (escape_for_pynini(x), escape_for_pynini(y), str(weight))\n if x == y:\n identity_mappings[len(x)-1].append(mapping)\n else:\n error_mappings[len(x)-1].append(mapping)\n for i in range(max_context):\n identity_trs[i] = pynini.string_map(identity_mappings[i])\n error_trs[i] = pynini.string_map(error_mappings[i])\n # TODO refactor as a subfunction\n # - build the \"master transducer\" containing ID-n and ERR-n symbols\n # on transitions for n in 1..max_context and containing ngr_weights[n] in\n # arcs leading to those\n state_dict = {}\n root = pynini.Fst()\n\n # FIXME refactor the merging of symbol tables into a separate function\n symbol_table = pynini.SymbolTable()\n for i in range(max_context):\n symbol_table = pynini.merge_symbol_table(symbol_table, identity_trs[i].input_symbols())\n symbol_table = pynini.merge_symbol_table(symbol_table, error_trs[i].input_symbols())\n symbol_table = pynini.merge_symbol_table(symbol_table, identity_trs[i].output_symbols())\n symbol_table = pynini.merge_symbol_table(symbol_table, error_trs[i].output_symbols())\n sym = symbol_table.add_symbol('id-{}'.format(i+1))\n sym = symbol_table.add_symbol('err-{}'.format(i+1))\n\n root.set_input_symbols(symbol_table)\n root.set_output_symbols(symbol_table)\n\n for i in range(max_errors+1):\n for j in range(max_context+1):\n s = root.add_state()\n state_dict[(i, j)] = s\n if j > 0:\n # (i, 0) -> (i, j) with epsilon\n root.add_arc(\n state_dict[(i, 0)],\n pynini.Arc(0, 0, ngr_weights[j-1], s))\n # (i, j) -> (i, 0) with identity\n sym = root.output_symbols().find('id-{}'.format(j))\n root.add_arc(\n s,\n pynini.Arc(0, sym, 0, state_dict[(i, 0)]))\n if i > 0:\n # arc: (i-1, j) -> (i, 0) with error\n sym = root.output_symbols().find('err-{}'.format(j))\n root.add_arc(\n state_dict[(i-1, j)],\n pynini.Arc(0, sym, 0, state_dict[(i, 0)]))\n root.set_final(state_dict[(i, 0)], 0)\n\n root.set_start(state_dict[(0, 0)])\n replacements = []\n for i in range(max_context):\n replacements.append(('id-{}'.format(i+1), identity_trs[i]))\n replacements.append(('err-{}'.format(i+1), error_trs[i]))\n result = pynini.replace(root, replacements)\n result.invert()\n result.optimize()\n return result\n\n\ndef load_ngrams(filename):\n result = []\n with open(filename) as fp:\n for line in fp:\n result.append(line.replace('\\n', '')) #.replace(' ', '~'))\n return result\n\n\ndef save_ngrams(filename, ngrams):\n with open(filename, 'w+') as fp:\n for ngr in ngrams:\n fp.write(ngr + '\\n')\n\n", "id": "729628", "language": "Python", "matching_score": 1.193231463432312, "max_stars_count": 11, "path": "ocrd_cor_asv_fst/lib/error_st.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2015-12-19 02:09:53\n# @Author : <NAME> (<EMAIL>)\n# @Link : https://github.com/primetang/pylsd\n# @Version : 0.0.1\n\nimport ctypes\nimport os\nimport glob\n\ndef load_lsd_library():\n # may fail if CWD (via sys.path) contains pylsd/bindings/__init__.py,\n # but otherwise contains the auto-built library for this platform/installation:\n lib_dir = os.path.dirname(os.path.dirname(__file__))\n lib_path = None\n for lib_name in ['lib.*.so', 'lib.*.dll', 'lib.*.dylib', 'lib.*.lib']:\n libs = glob.glob(os.path.join(lib_dir, lib_name))\n if libs:\n lib_path = libs[0]\n break\n if not lib_path:\n return None\n return ctypes.cdll[lib_path]\n\nlsdlib = load_lsd_library()\nif lsdlib == None:\n raise ImportError('Cannot load dynamic library. Did you compile LSD?')\n", "id": "8781998", "language": "Python", "matching_score": 1.9611002206802368, "max_stars_count": 1, "path": "pylsd/bindings/lsd_ctypes.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2019-01-08 09:32:00\n# @Author : <NAME> (<EMAIL>)\n# @Link : https://github.com/primetang/pylsd\n# @Version : 0.0.3\n\nfrom setuptools import setup, Extension\n\nclib = Extension('pylsd.lib',\n sources=['source/src/lsd.cpp'],\n include_dirs=['source/include'],\n depends=['source/include/lsd.h'],\n language=\"c++\")\n\nsetup(\n name='ocrd-fork-pylsd',\n version='0.0.4',\n description='pylsd is the python bindings for LSD - Line Segment Detector',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author='<NAME>',\n author_email='<EMAIL>',\n maintainer='kba',\n license='BSD',\n keywords=[\"LSD\", 'line segmentation'],\n url='https://github.com/kba/pylsd',\n packages=['pylsd', 'pylsd.bindings'],\n install_requires=['numpy'],\n ext_modules=[clib],\n)\n", "id": "4414153", "language": "Python", "matching_score": 0.9896395802497864, "max_stars_count": 1, "path": "setup.py" }, { "content": "import click\nfrom ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor\n\nfrom ocrd_pc_segmentation.ocrd_segmentation import PixelClassifierSegmentation\n\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_pc_segmentation(*args, **kwargs):\n return ocrd_cli_wrap_processor(PixelClassifierSegmentation, *args, **kwargs)\n\n\nif __name__ == '__main__':\n ocrd_pc_segmentation()", "id": "1047492", "language": "Python", "matching_score": 2.687331438064575, "max_stars_count": 7, "path": "ocrd_pc_segmentation/cli.py" }, { "content": "import click\n\nfrom ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor\nfrom align.aligner import Aligner\n\[email protected]()\n@ocrd_cli_options\ndef cis_ocrd_align(*args, **kwargs):\n # kwargs['cache_enabled'] = False\n return ocrd_cli_wrap_processor(Aligner, *args, **kwargs)\n", "id": "12857743", "language": "Python", "matching_score": 2.2985050678253174, "max_stars_count": 0, "path": "align/cli.py" }, { "content": "import click\n\nfrom ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor\nfrom ocrd_tesserocr.recognize import TesserocrRecognize\nfrom ocrd_tesserocr.segment_region import TesserocrSegmentRegion\nfrom ocrd_tesserocr.segment_line import TesserocrSegmentLine\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_tesserocr_segment_region(*args, **kwargs):\n return ocrd_cli_wrap_processor(TesserocrSegmentRegion, *args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_tesserocr_segment_line(*args, **kwargs):\n return ocrd_cli_wrap_processor(TesserocrSegmentLine, *args, **kwargs)\n\[email protected]()\n@ocrd_cli_options\ndef ocrd_tesserocr_recognize(*args, **kwargs):\n return ocrd_cli_wrap_processor(TesserocrRecognize, *args, **kwargs)\n", "id": "10002373", "language": "Python", "matching_score": 3.549179792404175, "max_stars_count": 0, "path": "ocrd_tesserocr/cli.py" }, { "content": "from .recognize import TesserocrRecognize\nfrom .segment_line import TesserocrSegmentLine\nfrom .segment_region import TesserocrSegmentRegion\n", "id": "3531265", "language": "Python", "matching_score": 1.2361575365066528, "max_stars_count": 0, "path": "ocrd_tesserocr/__init__.py" }, { "content": "import os\nimport shutil\n\nfrom test.base import TestCase, main, assets, skip\n\nfrom ocrd.resolver import Resolver\nfrom ocrd_tesserocr.segment_line import TesserocrSegmentLine\nfrom ocrd_tesserocr.segment_region import TesserocrSegmentRegion\nfrom ocrd_tesserocr.recognize import TesserocrRecognize\n\nWORKSPACE_DIR = '/tmp/pyocrd-test-recognizer'\n\nclass TestTesserocrRecognize(TestCase):\n\n def setUp(self):\n if os.path.exists(WORKSPACE_DIR):\n shutil.rmtree(WORKSPACE_DIR)\n os.makedirs(WORKSPACE_DIR)\n\n skip(\"Takes too long\")\n def runTest(self):\n resolver = Resolver(cache_enabled=True)\n # workspace = resolver.workspace_from_url(assets.url_of('SBB0000F29300010000/mets_one_file.xml'), directory=WORKSPACE_DIR)\n workspace = resolver.workspace_from_url(assets.url_of('kant_aufklaerung_1784/mets.xml'), directory=WORKSPACE_DIR)\n TesserocrSegmentRegion(\n workspace,\n input_file_grp=\"OCR-D-IMG\",\n output_file_grp=\"OCR-D-SEG-BLOCK\"\n ).process()\n workspace.save_mets()\n TesserocrSegmentLine(\n workspace,\n input_file_grp=\"OCR-D-SEG-BLOCK\",\n output_file_grp=\"OCR-D-SEG-LINE\"\n ).process()\n workspace.save_mets()\n TesserocrRecognize(\n workspace,\n input_file_grp=\"OCR-D-SEG-LINE\",\n output_file_grp=\"OCR-D-OCR-TESS\",\n parameter={'textequiv_level': 'word'}\n ).process()\n workspace.save_mets()\n\nif __name__ == '__main__':\n main()\n", "id": "10195350", "language": "Python", "matching_score": 6.456447124481201, "max_stars_count": 0, "path": "test/test_recognize.py" }, { "content": "import os\nimport shutil\n\nfrom test.base import TestCase, main, assets\n\nfrom ocrd.resolver import Resolver\nfrom ocrd_tesserocr.segment_region import TesserocrSegmentRegion\nfrom ocrd_tesserocr.segment_line import TesserocrSegmentLine\nfrom ocrd_tesserocr.segment_word import TesserocrSegmentWord\n\nWORKSPACE_DIR = '/tmp/pyocrd-test-segment-word-tesserocr'\n\nclass TestProcessorSegmentWordTesseract3(TestCase):\n\n def setUp(self):\n if os.path.exists(WORKSPACE_DIR):\n shutil.rmtree(WORKSPACE_DIR)\n os.makedirs(WORKSPACE_DIR)\n\n def runTest(self):\n resolver = Resolver(cache_enabled=True)\n # workspace = resolver.workspace_from_url(assets.url_of('SBB0000F29300010000/mets_one_file.xml'), directory=WORKSPACE_DIR)\n workspace = resolver.workspace_from_url(assets.url_of('kant_aufklaerung_1784-binarized/mets.xml'), directory=WORKSPACE_DIR)\n TesserocrSegmentRegion(workspace, input_file_grp=\"OCR-D-IMG\", output_file_grp=\"OCR-D-SEG-BLOCK\").process()\n TesserocrSegmentLine(workspace, input_file_grp=\"OCR-D-SEG-BLOCK\", output_file_grp=\"OCR-D-SEG-LINE\").process()\n TesserocrSegmentWord(workspace, input_file_grp=\"OCR-D-SEG-LINE\", output_file_grp=\"OCR-D-SEG-WORD\").process()\n workspace.save_mets()\n\nif __name__ == '__main__':\n main()\n", "id": "5195713", "language": "Python", "matching_score": 6.564851760864258, "max_stars_count": 0, "path": "test/test_segment_word.py" }, { "content": "import os\nimport shutil\n\nfrom test.base import TestCase, main, assets\n\nfrom ocrd.resolver import Resolver\nfrom ocrd_tesserocr.segment_region import TesserocrSegmentRegion\nfrom ocrd_tesserocr.segment_line import TesserocrSegmentLine\n\nMETS_HEROLD_SMALL = assets.url_of('SBB0000F29300010000/mets_one_file.xml')\n\nWORKSPACE_DIR = '/tmp/pyocrd-test-segment-line-tesserocr'\n\nclass TestProcessorSegmentLineTesseract3(TestCase):\n\n def setUp(self):\n if os.path.exists(WORKSPACE_DIR):\n shutil.rmtree(WORKSPACE_DIR)\n os.makedirs(WORKSPACE_DIR)\n\n def runTest(self):\n resolver = Resolver(cache_enabled=True)\n workspace = resolver.workspace_from_url(METS_HEROLD_SMALL, directory=WORKSPACE_DIR)\n TesserocrSegmentRegion(workspace, input_file_grp=\"INPUT\", output_file_grp=\"OCR-D-SEG-BLOCK\").process()\n # workspace.save_mets()\n TesserocrSegmentLine(workspace, input_file_grp=\"OCR-D-SEG-BLOCK\", output_file_grp=\"OCR-D-SEG-LINE\").process()\n workspace.save_mets()\n\nif __name__ == '__main__':\n main()\n", "id": "4036493", "language": "Python", "matching_score": 4.988207817077637, "max_stars_count": 0, "path": "test/test_segment_line.py" }, { "content": "from test.base import TestCase, main, assets\n\nfrom ocrd.resolver import Resolver\nfrom ocrd_tesserocr.segment_region import TesserocrSegmentRegion\n\nclass TestTesserocrRecognize(TestCase):\n\n def runTest(self):\n resolver = Resolver(cache_enabled=True)\n workspace = resolver.workspace_from_url(assets.url_of('SBB0000F29300010000/mets_one_file.xml'))\n TesserocrSegmentRegion(workspace).process()\n workspace.save_mets()\n\nif __name__ == '__main__':\n main()\n", "id": "8827458", "language": "Python", "matching_score": 0.4346984922885895, "max_stars_count": 0, "path": "test/test_segment_region.py" }, { "content": "from .util import unzip\nfrom .. import align\n\n\ndef test_left_empty():\n result = list(align('', 'foo'))\n expected = [(None, 'f'), (None, 'o'), (None, 'o')]\n assert result == expected\n\n\ndef test_right_empty():\n result = list(align('foo', ''))\n expected = [('f', None), ('o', None), ('o', None)]\n assert result == expected\n\n\ndef test_left_longer():\n result = list(align('food', 'foo'))\n expected = [('f', 'f'), ('o', 'o'), ('o', 'o'), ('d', None)]\n assert result == expected\n\n\ndef test_right_longer():\n result = list(align('foo', 'food'))\n expected = [('f', 'f'), ('o', 'o'), ('o', 'o'), (None, 'd')]\n assert result == expected\n\n\ndef test_some_diff():\n result = list(align('abcde', 'aaadef'))\n left, right = unzip(result)\n assert list(left) == ['a', 'b', 'c', 'd', 'e', None]\n assert list(right) == ['a', 'a', 'a', 'd', 'e', 'f']\n\n\ndef test_longer():\n s1 = 'Dies ist eine Tst!'\n s2 = 'Dies ist ein Test.'\n\n result = list(align(s1, s2)) # ; diffprint(*unzip(result))\n expected = [('D', 'D'), ('i', 'i'), ('e', 'e'), ('s', 's'), (' ', ' '),\n ('i', 'i'), ('s', 's'), ('t', 't'), (' ', ' '),\n ('e', 'e'), ('i', 'i'), ('n', 'n'), ('e', None), (' ', ' '),\n ('T', 'T'), (None, 'e'), ('s', 's'), ('t', 't'), ('!', '.')]\n assert result == expected\n\n\ndef test_completely_different():\n assert len(list(align('abcde', 'fghij'))) == 5\n\n\ndef test_with_some_fake_ocr_errors():\n result = list(align('Über die vielen Sorgen wegen desselben vergaß',\n 'SomeJunk MoreJunk Übey die vielen Sorgen wegen AdditionalJunk deffelben vcrgab'))\n left, right = unzip(result)\n\n # Beginning\n assert list(left[:18]) == [None]*18\n assert list(right[:18]) == list('SomeJunk MoreJunk ')\n\n # End\n assert list(left[-1:]) == ['ß']\n assert list(right[-1:]) == ['b']\n", "id": "11123218", "language": "Python", "matching_score": 1.1728476285934448, "max_stars_count": 0, "path": "qurator/dinglehopper/tests/test_align.py" }, { "content": "from itertools import zip_longest\nfrom typing import Iterable\n\nimport colorama\n\n\ndef diffprint(x, y):\n \"\"\"Print elements or lists x and y, with differences in red\"\"\"\n\n def _diffprint(x, y):\n if x != y:\n print(colorama.Fore.RED, x, y, colorama.Fore.RESET)\n else:\n print(x, y)\n\n if isinstance(x, Iterable):\n for xe, ye in zip_longest(x, y):\n _diffprint(xe, ye)\n else:\n _diffprint(x, y)\n\n\ndef unzip(l):\n return zip(*l)\n", "id": "8066462", "language": "Python", "matching_score": 0.048841848969459534, "max_stars_count": 0, "path": "qurator/dinglehopper/tests/util.py" }, { "content": "from __future__ import division, print_function\n\nimport unicodedata\nfrom functools import partial\n\nimport numpy as np\nfrom uniseg.graphemecluster import grapheme_clusters\n\n\ndef levenshtein_matrix(seq1, seq2):\n \"\"\"Compute the matrix commonly computed to produce the Levenshtein distance.\n\n This is also known as the Wagner-Fischer algorithm. The matrix element at the bottom right contains the desired\n edit distance.\n\n This algorithm is implemented here because we need an implementation that can work with sequences other than\n strings, e.g. lists of grapheme clusters or lists of word strings.\n \"\"\"\n m = len(seq1)\n n = len(seq2)\n\n def from_to(start, stop):\n return range(start, stop + 1, 1)\n\n D = np.zeros((m + 1, n + 1), np.int)\n D[0, 0] = 0\n for i in from_to(1, m):\n D[i, 0] = i\n for j in from_to(1, n):\n D[0, j] = j\n for i in from_to(1, m):\n for j in from_to(1, n):\n D[i, j] = min(\n D[i - 1, j - 1] + 1 * (seq1[i - 1] != seq2[j - 1]), # Same or Substitution\n D[i, j - 1] + 1, # Insertion\n D[i - 1, j] + 1 # Deletion\n )\n\n return D\n\n\ndef levenshtein(seq1, seq2):\n \"\"\"Compute the Levenshtein edit distance between two sequences\"\"\"\n m = len(seq1)\n n = len(seq2)\n\n D = levenshtein_matrix(seq1, seq2)\n return D[m, n]\n\n\ndef distance(s1, s2):\n \"\"\"Compute the Levenshtein edit distance between two Unicode strings\n\n Note that this is different from levenshtein() as this function knows about Unicode normalization and grapheme\n clusters. This should be the correct way to compare two Unicode strings.\n \"\"\"\n s1 = list(grapheme_clusters(unicodedata.normalize('NFC', s1)))\n s2 = list(grapheme_clusters(unicodedata.normalize('NFC', s2)))\n return levenshtein(s1, s2)\n\n\ndef seq_editops(seq1, seq2):\n seq1 = list(seq1)\n seq2 = list(seq2)\n m = len(seq1)\n n = len(seq2)\n D = levenshtein_matrix(seq1, seq2)\n\n def _tail_backtrace(i, j, accumulator):\n if i > 0 and D[i - 1, j] + 1 == D[i, j]:\n return partial(_tail_backtrace, i - 1, j, [('delete', i-1, j)] + accumulator)\n if j > 0 and D[i, j - 1] + 1 == D[i, j]:\n return partial(_tail_backtrace, i, j - 1, [('insert', i, j-1)] + accumulator)\n if i > 0 and j > 0 and D[i - 1, j - 1] + 1 == D[i, j]:\n return partial(_tail_backtrace, i - 1, j - 1, [('replace', i-1, j-1)] + accumulator)\n if i > 0 and j > 0 and D[i - 1, j - 1] == D[i, j]:\n return partial(_tail_backtrace, i - 1, j - 1, accumulator) # NOP\n return accumulator\n\n def backtrace(i, j):\n result = partial(_tail_backtrace, i, j, [])\n while isinstance(result, partial):\n result = result()\n\n return result\n\n b = backtrace(m, n)\n return b\n\n\ndef editops(word1, word2):\n # XXX Note that this returns indices to the _grapheme clusters_, not characters!\n word1 = list(grapheme_clusters(unicodedata.normalize('NFC', word1)))\n word2 = list(grapheme_clusters(unicodedata.normalize('NFC', word2)))\n return seq_editops(word1, word2)\n", "id": "1490949", "language": "Python", "matching_score": 2.7291946411132812, "max_stars_count": 0, "path": "qurator/dinglehopper/edit_distance.py" }, { "content": "from __future__ import division\n\nimport unicodedata\n\nfrom uniseg.graphemecluster import grapheme_clusters\n\nfrom qurator.dinglehopper.edit_distance import distance\n\n\ndef character_error_rate(reference, compared):\n d = distance(reference, compared)\n if d == 0:\n return 0\n\n n = len(list(grapheme_clusters(unicodedata.normalize('NFC', reference))))\n if n == 0:\n return float('inf')\n\n return d/n\n\n # XXX Should we really count newlines here?\n", "id": "2346619", "language": "Python", "matching_score": 1.9185941219329834, "max_stars_count": 0, "path": "qurator/dinglehopper/character_error_rate.py" } ]
1.9611
dsa110
[ { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 26 01:30:39 2019\r\n\r\n@author: hidir\r\n\"\"\"\r\n\r\n###############################################################################\r\n# LAbjack Analog Input Readout ( Single Ended Analog Inputs)\r\n###############################################################################\r\n\r\nimport u3 , signal , datetime\r\nimport numpy as np\r\nimport time\r\nfrom openpyxl import Workbook\r\nimport visa as v\r\nimport math\r\n#import matplotlib.pyplot as plt\r\n#from openpyxl.chart import (\r\n# ScatterChart,\r\n# Reference,\r\n# Serie,\r\n#)\r\n###############################################################################\r\n# User Settings\r\n###############################################################################\r\n#LabJack\r\nn_avg = 10;\r\nVoffset = -0.009248\r\n# Time Delay in [S] between measurements\r\ndelay = 0.1\r\n# Number of samples saved at a time:\r\nz = 1\r\n#file\r\nftsize = 12\r\n\r\n\r\n# Read Date and Time from PC clock\r\n# x = datetime.datetime.now()\r\ndate_time_string= time.strftime('%m%d%Y %H:%M:%S')\r\n# Format Time \r\nt = datetime.datetime.strptime(date_time_string,\"%m%d%Y %H:%M:%S\")\r\n\r\n\r\ndatafilename = str('FEB_Test_Score_Sheet')\r\nfolder = 'C:\\\\Users\\\\amplab\\\\Desktop\\\\FEB_BEB_test\\\\'\r\n###############################################################################\r\n# Configure Instrument\r\n###############################################################################\r\n#LabJack\r\nprint (' Configuring LabJack ')\r\nlj = u3.U3()\r\n\r\nlj.configAnalog(0)\r\nlj.configAnalog(1)\r\nlj.configAnalog(2)\r\nlj.configAnalog(3)\r\nlj.configAnalog(4)\r\nlj.configAnalog(5)\r\nlj.configAnalog(6)\r\nlj.configAnalog(7)\r\n\r\nlj.configAnalog(8)\r\nlj.configAnalog(9)\r\nlj.configAnalog(10)\r\nlj.configAnalog(11)\r\nlj.configAnalog(12)\r\nlj.configAnalog(13)\r\nlj.configAnalog(14)\r\nlj.configAnalog(15)\r\n\r\n#Siglent\r\nprint (' Connecting to Instrument ... ')\r\nrm = v.ResourceManager()\r\nrm.list_resources()\r\nspectrum_analyzer = rm.open_resource('USB0::0xF4EC::0x1300::SSA3XLBC1R0061::INSTR') #For Sandy's spectrum\r\n#spectrum_analyzer = rm.open_resource('USB0::0xF4EC::0x1300::SSA3XMCQ3R1188::INSTR') #For SJ's Spectrum\r\nprint(spectrum_analyzer.query(\"*IDN?\"))\r\n\r\nspectrum_analyzer.read_termination = '\\n'\r\nspectrum_analyzer.query('*OPC?')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.query(':SYSTem:TIME?')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.query(':SYSTem:DATE?')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':DISPlay:WINDow:TRACe:Y:RLEVel -40 DBM')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':POWer:ATTenuation 10')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':POWer:GAIN OFF') #preamp\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':UNIT:POWer DBM')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':DISPlay:WINDow:TRACe:Y:SPACing LOGarithmic')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':DISPlay:WINDow:TRACe:Y:PDIVision 1 dB')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':SENSe:CORRection:OFF')\r\ntime.sleep(0.1)\r\n#spectrum_analyzer.write(':BWID:AUTO On') # resolution BW\r\n#time.sleep(0.1)\r\n\r\nspectrum_analyzer.write(':BWID:AUTO Off') # resolution BW\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':BWID: 1 MHz') # resolution BW\r\ntime.sleep(0.1)\r\n\r\n#spectrum_analyzer.write(':BWIDth:VIDeo 1 MHz')\r\nspectrum_analyzer.write(':BWIDth:VIDeo 10 KHz')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':TRAC1:MODE WRITE')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':CALCulate:MARK1:STATe ON')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':CALCulate:MARK2:STATe ON')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':CALCulate:MARK3:STATe ON')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':CALCulate:MARKer1:X 0.36 GHz')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':CALCulate:MARKer2:X 0.375 GHz')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':CALCulate:MARKer3:X 0.39 GHz')\r\ntime.sleep(0.1)\r\n#spectrum_analyzer.write(':TRAC1:MODE AVERAGE')\r\n#time.sleep(0.1)\r\nspectrum_analyzer.write(':DETector:TRAC1 AVERage')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':AVERage:TRACe1:COUNt 16')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':CALCulate:MARKer:TABLe ON')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':SWEep:MODE AUTO')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':SWEep:TIME:AUTO ON')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':SWEep:SPEed ACCUracy')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':AVERage:TYPE POWer')\r\ntime.sleep(0.1)\r\n\r\n# Save data in XLSX file: \r\n#Create a new workbook\r\nwb = Workbook()\r\n#create new worksheets \r\nws1 = wb.worksheets[0]\r\n\r\n# TEMPLATE for Sandy\r\nws1['A1'] = 'Test Data for FEB Score Sheet'\r\nws1.merge_cells('A1:D1')\r\n#ws1['E1'] = 'Date & Time:'\r\n\r\n# Read Date and Time from PC clock\r\n# x = datetime.datetime.now()\r\ndate_time_string= time.strftime('%m%d%Y %H:%M:%S')\r\n# Format Time \r\nt = datetime.datetime.strptime(date_time_string,\"%m%d%Y %H:%M:%S\")\r\n\r\n#ws1['G1'] = str(t)\r\n#ws1.merge_cells('G1:I1')\r\n\r\nws1['A2'] = 'FEB_SN'\r\nws1['B2'] = 'BEB_SN'\r\nws1['C2'] = 'BEB Out, 375 MHz, NG OFF'\r\nws1['D2'] = 'BEB Out, 375 MHz, NG ON'\r\nws1['E2'] = 'FEB Y Factor dB'\r\nws1['F2'] = 'FEB Y Factor Ratio'\r\nws1['G2'] = 'Thot, K'\r\nws1['H2'] = 'FEB/BEB Tn, K'\r\nws1['I2'] = 'FEB/BEB NF dB'\r\nws1['J2'] = 'Tsys Contrib K'\r\nws1['K2'] = 'BEB Out, w. LNA, 300K in, dBm/MHz'\r\nws1['L2'] = 'BEB Out, w. LNA, 300K in, Total dBm'\r\nws1['M2'] = 'BEB Out, w. LNA, Tsys=26K, Total dBm'\r\nws1['N2'] = 'Test Date'\r\nws1['O2'] = 'By'\r\nws1['P2'] = 'FEB Temp C'\r\nws1['Q2'] = 'BEB PD mA'\r\nws1['R2'] = 'FEB mA'\r\nws1['S2'] = 'FEB dBm mV OFF'\r\nws1['T2'] = 'FEB dBm mV ON'\r\nws1['U2'] = 'BEB dBm mV OFF'\r\nws1['V2'] = 'BEB dBm mV ON'\r\nws1['W2'] = 'FEB LD V'\r\n\r\n\r\n#Siglent\r\n## Acquire Data\r\nfstart=125\r\nfstop=625\r\nsweep_count=1\r\nspectrum_analyzer.write('*WAI')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write('SENSE:FREQuency:STARt '+str(fstart)+' MHz')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write('SENSE:FREQuency:STOP '+str(fstop)+' MHz')\r\ntime.sleep(0.1)\r\nspectrum_analyzer.write(':SWEep:COUNt '+str(sweep_count))\r\n#time.sleep(0.1)\r\nfreqpoint=16 # total number of freq points to be averaged when calculating SA data\r\n\r\n###############################################################################\r\n# Interrupt handler \r\n###############################################################################\r\nclass GracefulInterruptHandler(object):\r\n\r\n def __init__(self, sig=signal.SIGINT):\r\n self.sig = sig\r\n\r\n def __enter__(self):\r\n\r\n self.interrupted = False\r\n self.released = False\r\n\r\n self.original_handler = signal.getsignal(self.sig)\r\n\r\n def handler(signum, frame):\r\n self.release()\r\n self.interrupted = True\r\n\r\n signal.signal(self.sig, handler)\r\n\r\n return self\r\n\r\n def __exit__(self, type, value, tb):\r\n self.release()\r\n\r\n def release(self):\r\n\r\n if self.released:\r\n return False\r\n\r\n signal.signal(self.sig, self.original_handler)\r\n \r\n self.released = True\r\n\r\nrow=3\r\nprint (' Please make sure that the switches are turned towards LabJack on the test box')\r\nprint (' Please keep the same BEB during FEB tests')\r\nTester= input(\"MOVE CURSOR; Testor Inititals? \")\r\nfilename=input(\"filename:\")\r\nBEB_SN = input(\"Please enter BEB SN :\")\r\nwhile(True):\r\n if ((BEB_SN[len(BEB_SN)-1])==('A')) or ((BEB_SN[len(BEB_SN)-1])==('B')):\r\n break\r\n else:\r\n BEB_SN = input(\"Please enter BEB SN with correct Channel (ie. 27A) :\")\r\n\r\nwhile(True):\r\n FEB_SN = input(\"Please enter FEB SN :\")\r\n ###############################################################################\r\n # Initialization Save Data \r\n ###############################################################################\r\n #LabJack\r\n a0_averages_NGENoff = []\r\n a1_averages_NGENoff = []\r\n a2_averages_NGENoff = []\r\n a3_averages_NGENoff = []\r\n a4_averages_NGENoff = []\r\n a5_averages_NGENoff = []\r\n a6_averages_NGENoff = []\r\n a7_averages_NGENoff = []\r\n a8_averages_NGENoff = []\r\n a9_averages_NGENoff = []\r\n a10_averages_NGENoff = []\r\n a11_averages_NGENoff = []\r\n a12_averages_NGENoff = []\r\n a13_averages_NGENoff = []\r\n a14_averages_NGENoff = []\r\n a15_averages_NGENoff = []\r\n \r\n a0_averages_NGENon = []\r\n a1_averages_NGENon = []\r\n a2_averages_NGENon = []\r\n a3_averages_NGENon = []\r\n a4_averages_NGENon = []\r\n a5_averages_NGENon = []\r\n a6_averages_NGENon = []\r\n a7_averages_NGENon = []\r\n a8_averages_NGENon = []\r\n a9_averages_NGENon = []\r\n a10_averages_NGENon = []\r\n a11_averages_NGENon = []\r\n a12_averages_NGENon = []\r\n a13_averages_NGENon = []\r\n a14_averages_NGENon = []\r\n a15_averages_NGENon = []\r\n save_values = []\r\n \r\n times = [] \r\n ################################################################################\r\n ## Measurement Loop\r\n ################################################################################\r\n #LabJack and SA\r\n dig_offset=0.024\r\n DAC1_VALUE = lj.voltageToDACBits(3+dig_offset, dacNumber = 1, is16Bits = False)#CAL voltage value is set to 3V\r\n lj.getFeedback(u3.DAC1_8(DAC1_VALUE))\r\n \r\n # Get data when NGEN is OFF\r\n \r\n DAC0_VALUE = lj.voltageToDACBits(0-dig_offset+0.010, dacNumber = 1, is16Bits = False)#NGEN is OFF\r\n lj.getFeedback(u3.DAC0_8(DAC0_VALUE)) \r\n k = 0\r\n with GracefulInterruptHandler() as h:\r\n while True:\r\n for i in range(n_avg):\r\n a0 = lj.getAIN(0)\r\n a1 = lj.getAIN(1)\r\n a2 = lj.getAIN(2)\r\n a3 = lj.getAIN(3)\r\n a4 = lj.getAIN(4)\r\n a5 = lj.getAIN(5)\r\n a6 = lj.getAIN(6)\r\n a7 = lj.getAIN(7)\r\n a8 = lj.getAIN(8)\r\n a9 = lj.getAIN(9)\r\n a10 = lj.getAIN(10)\r\n a11 = lj.getAIN(11)\r\n a12 = lj.getAIN(12)\r\n a13 = lj.getAIN(13)\r\n a14 = lj.getAIN(14)\r\n a15 = lj.getAIN(15)\r\n a0_averages_NGENoff.append(a0)\r\n a1_averages_NGENoff.append(a1)\r\n a2_averages_NGENoff.append(a2)\r\n a3_averages_NGENoff.append(a3)\r\n a4_averages_NGENoff.append(a4)\r\n a5_averages_NGENoff.append(a5)\r\n a6_averages_NGENoff.append(a6)\r\n a7_averages_NGENoff.append(a7)\r\n a8_averages_NGENoff.append(a8)\r\n a9_averages_NGENoff.append(a9)\r\n a10_averages_NGENoff.append(a10)\r\n a11_averages_NGENoff.append(a11)\r\n a12_averages_NGENoff.append(a12)\r\n a13_averages_NGENoff.append(a13)\r\n a14_averages_NGENoff.append(a14)\r\n a15_averages_NGENoff.append(a15)\r\n \r\n a0_avg_value_NGENoff = np.average(a0_averages_NGENoff) - Voffset\r\n a1_avg_value_NGENoff = np.average(a1_averages_NGENoff) - Voffset\r\n a2_avg_value_NGENoff = np.average(a2_averages_NGENoff) - Voffset\r\n a3_avg_value_NGENoff = np.average(a3_averages_NGENoff) - Voffset\r\n a4_avg_value_NGENoff = np.average(a4_averages_NGENoff) - Voffset\r\n a5_avg_value_NGENoff = np.average(a5_averages_NGENoff) - Voffset\r\n a6_avg_value_NGENoff = np.average(a6_averages_NGENoff) - Voffset\r\n a7_avg_value_NGENoff = np.average(a7_averages_NGENoff) - Voffset\r\n a8_avg_value_NGENoff = np.average(a8_averages_NGENoff) - Voffset\r\n a9_avg_value_NGENoff = np.average(a9_averages_NGENoff) - Voffset\r\n a10_avg_value_NGENoff = np.average(a10_averages_NGENoff) - Voffset\r\n a11_avg_value_NGENoff = np.average(a11_averages_NGENoff) - Voffset\r\n a12_avg_value_NGENoff = np.average(a12_averages_NGENoff) - Voffset\r\n a13_avg_value_NGENoff = np.average(a13_averages_NGENoff) - Voffset\r\n a14_avg_value_NGENoff = np.average(a14_averages_NGENoff) - Voffset\r\n a15_avg_value_NGENoff = np.average(a15_averages_NGENoff) - Voffset\r\n print (\" NGENoff LJ data is saved \")\r\n break\r\n if np.remainder(k,z) == 0 and k != 0:\r\n #if np.remainder(k,z-1) == 0 and k <> 0:\r\n print (\" I saved \" + str(z) + \" values for You! \")\r\n break\r\n k = -1\r\n avg_value = []\r\n averages = []\r\n time.sleep(delay)\r\n k= k + 1\r\n if h.interrupted:\r\n print (\" Exiting Gracefully ...........\") \r\n #save_data(times,save_values)\r\n #datafilename.close()\r\n lj.close()\r\n break\r\n #Siglent\r\n time.sleep(10) #comment this line if using 100kHz video BW\r\n # Download LgPwr Data \r\n LgPwr_off = []\r\n lgpwr_off = []\r\n spectrum_analyzer.write('*WAI')\r\n time.sleep(0.5)\r\n lgpwr_off = spectrum_analyzer.query(':TRACe:DATA? 1') #This query command returns the current displayed data\r\n time.sleep(0.5)\r\n spectrum_analyzer.write('*WAI')\r\n time.sleep(0.5)\r\n print (\" NGENoff SA data is saved \")\r\n lgpwr_off = lgpwr_off.rsplit(',')\r\n #lgpwr = lgpwr.replace(\"\\x00\\n\", \"\")\r\n nfreq=len(lgpwr_off)-1\r\n freq = np.linspace(fstart,fstop,nfreq)\r\n \r\n for i in range(int(nfreq)):\r\n LgPwr_off.append(np.float(lgpwr_off[i]))\r\n \r\n #SA_data_NGENoff = np.float(lgpwr_off[int((len(lgpwr_off)/2)-1)])\r\n \r\n SA_data_NGENoff_mat=[]\r\n favstart=int(len(lgpwr_off)/2)-1-round(freqpoint/2) # average start frequency\r\n for i in range(freqpoint):\r\n SA_data_NGENoff_mat.append(np.float(lgpwr_off[favstart+i])) \r\n SA_data_NGENoff=np.average(SA_data_NGENoff_mat) # averaged NGEN off SA power data\r\n \r\n # Get data when NGEN is ON\r\n \r\n DAC0_VALUE = lj.voltageToDACBits(1-dig_offset+0.010, dacNumber = 1, is16Bits = False)#NGEN is ON\r\n lj.getFeedback(u3.DAC0_8(DAC0_VALUE))\r\n \r\n k = 0\r\n with GracefulInterruptHandler() as h:\r\n while True:\r\n for i in range(n_avg):\r\n a0 = lj.getAIN(0)\r\n a1 = lj.getAIN(1)\r\n a2 = lj.getAIN(2)\r\n a3 = lj.getAIN(3)\r\n a4 = lj.getAIN(4)\r\n a5 = lj.getAIN(5)\r\n a6 = lj.getAIN(6)\r\n a7 = lj.getAIN(7)\r\n a8 = lj.getAIN(8)\r\n a9 = lj.getAIN(9)\r\n a10 = lj.getAIN(10)\r\n a11 = lj.getAIN(11)\r\n a12 = lj.getAIN(12)\r\n a13 = lj.getAIN(13)\r\n a14 = lj.getAIN(14)\r\n a15 = lj.getAIN(15)\r\n a0_averages_NGENon.append(a0)\r\n a1_averages_NGENon.append(a1)\r\n a2_averages_NGENon.append(a2)\r\n a3_averages_NGENon.append(a3)\r\n a4_averages_NGENon.append(a4)\r\n a5_averages_NGENon.append(a5)\r\n a6_averages_NGENon.append(a6)\r\n a7_averages_NGENon.append(a7)\r\n a8_averages_NGENon.append(a8)\r\n a9_averages_NGENon.append(a9)\r\n a10_averages_NGENon.append(a10)\r\n a11_averages_NGENon.append(a11)\r\n a12_averages_NGENon.append(a12)\r\n a13_averages_NGENon.append(a13)\r\n a14_averages_NGENon.append(a14)\r\n a15_averages_NGENon.append(a15)\r\n \r\n a0_avg_value_NGENon = np.average(a0_averages_NGENon) - Voffset\r\n a1_avg_value_NGENon = np.average(a1_averages_NGENon) - Voffset\r\n a2_avg_value_NGENon = np.average(a2_averages_NGENon) - Voffset\r\n a3_avg_value_NGENon = np.average(a3_averages_NGENon) - Voffset\r\n a4_avg_value_NGENon = np.average(a4_averages_NGENon) - Voffset\r\n a5_avg_value_NGENon = np.average(a5_averages_NGENon) - Voffset\r\n a6_avg_value_NGENon = np.average(a6_averages_NGENon) - Voffset\r\n a7_avg_value_NGENon = np.average(a7_averages_NGENon) - Voffset\r\n a8_avg_value_NGENon = np.average(a8_averages_NGENon) - Voffset\r\n a9_avg_value_NGENon = np.average(a9_averages_NGENon) - Voffset\r\n a10_avg_value_NGENon = np.average(a10_averages_NGENon) - Voffset\r\n a11_avg_value_NGENon = np.average(a11_averages_NGENon) - Voffset\r\n a12_avg_value_NGENon = np.average(a12_averages_NGENon) - Voffset\r\n a13_avg_value_NGENon = np.average(a13_averages_NGENon) - Voffset\r\n a14_avg_value_NGENon = np.average(a14_averages_NGENon) - Voffset\r\n a15_avg_value_NGENon = np.average(a15_averages_NGENon) - Voffset\r\n print (\" NGENon LJ data is saved \")\r\n break\r\n if np.remainder(k,z) == 0 and k != 0:\r\n #if np.remainder(k,z-1) == 0 and k <> 0:\r\n print (\" I saved \" + str(z) + \" values for You! \")\r\n break\r\n k = -1\r\n avg_value = []\r\n averages = []\r\n time.sleep(delay)\r\n k= k + 1\r\n if h.interrupted:\r\n print (\" Exiting Gracefully ...........\") \r\n #save_data(times,save_values)\r\n #datafilename.close()\r\n lj.close()\r\n break\r\n #Siglent\r\n #spectrum_analyzer.write(':TRAC1:MODE WRITE')\r\n #time.sleep(0.1)\r\n time.sleep(10) #comment this line if using 100kHz video BW\r\n # Download LgPwr Data \r\n LgPwr_on = []\r\n lgpwr_on = []\r\n spectrum_analyzer.write('*WAI')\r\n time.sleep(0.5)\r\n lgpwr_on = spectrum_analyzer.query(':TRACe:DATA? 1') #This query command returns the current displayed data\r\n time.sleep(0.5)\r\n spectrum_analyzer.write('*WAI')\r\n time.sleep(0.5)\r\n print (\" NGENon SA data is saved \")\r\n lgpwr_on = lgpwr_on.rsplit(',')\r\n #lgpwr = lgpwr.replace(\"\\x00\\n\", \"\")\r\n nfreq=len(lgpwr_on)-1\r\n freq = np.linspace(fstart,fstop,nfreq)\r\n for i in range(int(nfreq)):\r\n LgPwr_on.append(np.float(lgpwr_on[i]))\r\n \r\n #SA_data_NGENon = np.float(lgpwr_on[int((len(lgpwr_on)/2)-1)])\r\n SA_data_NGENon_mat=[]\r\n favstart=int(len(lgpwr_on)/2)-1-round(freqpoint/2) # average start frequency\r\n for i in range(freqpoint):\r\n SA_data_NGENon_mat.append(np.float(lgpwr_on[favstart+i])) \r\n SA_data_NGENon=np.average(SA_data_NGENon_mat) # averaged NGEN on SA power data\r\n \r\n ### Write to Excel\r\n try:\r\n Y_dB=SA_data_NGENon-SA_data_NGENoff\r\n Y_ratio=10**(((SA_data_NGENon-SA_data_NGENoff)/10))\r\n #Nextfor NC3206+15dB\r\n NoiseSource_ENR=9.48\r\n Tcold=300\r\n Thot=Tcold+290*10**(NoiseSource_ENR/10)\r\n Tn=(Thot-Tcold*Y_ratio)/(Y_ratio-1) \r\n \r\n # print (\" Tn = \",Tn)\r\n FEB_BEB_NF_dB=10*math.log10(1+Tn/290)\r\n BEB_out_dBm_per_MHz=SA_data_NGENoff+35-10*math.log10(1+Tn/290)\r\n BEB_out_300k_totdBm=BEB_out_dBm_per_MHz+26\r\n BEB_out_26k_totdBm=BEB_out_300k_totdBm-10.6\r\n \r\n if ((BEB_SN[len(BEB_SN)-1])==('A')):\r\n BEB_PD_mA=2000*a15_avg_value_NGENoff/1000 \r\n BEB_IF_MON_NGENoff=2000*a12_avg_value_NGENoff\r\n BEB_IF_MON_NGENon=2000*a12_avg_value_NGENon\r\n \r\n if ((BEB_SN[len(BEB_SN)-1])==('B')):\r\n BEB_PD_mA=2000*a11_avg_value_NGENoff/1000 \r\n BEB_IF_MON_NGENoff=2000*a8_avg_value_NGENoff\r\n BEB_IF_MON_NGENon=2000*a8_avg_value_NGENon\r\n \r\n FEB_temp=(2000*a3_avg_value_NGENoff-500)/10 \r\n FEB_mA=2000*a2_avg_value_NGENoff\r\n FEB_IF_MON_NGENoff=2000*a1_avg_value_NGENoff/10\r\n FEB_IF_MON_NGENon=2000*a1_avg_value_NGENon/10 \r\n FEB_LD_MON_NGENoff=2000*a0_avg_value_NGENoff/1000\r\n \r\n except:\r\n \r\n print (\"Cannot turn noise gen on or off. Please check 28V or noise gen connectivity\")\r\n ws1['E1'] = str(t) # put date and time\r\n print (\"FEB Score Sheet data is saved\")\r\n wb.save(str(folder+filename+'.xlsx'))\r\n \r\n print (\" NF is : \",FEB_BEB_NF_dB,\"dB\")\r\n \r\n ws1['A'+str(row)] = FEB_SN\r\n ws1['B'+str(row)] = BEB_SN\r\n ws1['C'+str(row)] = float(SA_data_NGENoff)\r\n ws1['D'+str(row)] = float(SA_data_NGENon)\r\n ws1['E'+str(row)] = float(Y_dB)\r\n ws1['F'+str(row)] = float(Y_ratio)\r\n ws1['G'+str(row)] = float(Thot)\r\n ws1['H'+str(row)] = float(Tn)\r\n ws1['I'+str(row)] = float(FEB_BEB_NF_dB) \r\n ws1['J'+str(row)] = float(Tn/3162)\r\n ws1['K'+str(row)] = float(BEB_out_dBm_per_MHz)\r\n ws1['L'+str(row)] = float(BEB_out_300k_totdBm)\r\n ws1['M'+str(row)] = float(BEB_out_26k_totdBm)\r\n ws1['N'+str(row)] = str(t)\r\n ws1['O'+str(row)] = Tester\r\n ws1['P'+str(row)] = float(FEB_temp)\r\n ws1['Q'+str(row)] = float(BEB_PD_mA) # BEB channel A\r\n ws1['R'+str(row)] = float(FEB_mA) \r\n ws1['S'+str(row)] = float(FEB_IF_MON_NGENoff)\r\n ws1['T'+str(row)] = float(FEB_IF_MON_NGENon)\r\n ws1['U'+str(row)] = float(BEB_IF_MON_NGENoff) # BEB channel A\r\n ws1['V'+str(row)] = float(BEB_IF_MON_NGENon) # BEB channel A\r\n ws1['W'+str(row)] = float(FEB_LD_MON_NGENoff)\r\n \r\n wb.save(str(folder+filename+'.xlsx'))\r\n \r\n print (\"Continue file (y) or output file (n)?\")\r\n while(True):\r\n ans=input(\"Please enter y or n:\")\r\n if ((ans=='n') or (ans=='y')):\r\n break\r\n \r\n if ((ans=='n') or (ans!='y')):\r\n break\r\n row=row+1\r\n \r\nws1['E1'] = str(t) # put date and time\r\n\r\nprint (\"FEB Score Sheet data is saved\")\r\nwb.save(str(folder+filename+'.xlsx'))", "id": "6181460", "language": "Python", "matching_score": 1.3751921653747559, "max_stars_count": 0, "path": "FEB_test_v3.py" }, { "content": "\"\"\"Dedisperse a measurement set.\n\"\"\"\nimport numpy as np\nfrom numba import jit\nfrom casacore.tables import table\nimport astropy.units as u\nfrom dsacalib.ms_io import extract_vis_from_ms\n\n# Same as tempo2\n# TODO: Verify same DM constant used in T1\nDISPERSION_DELAY_CONSTANT = u.s/2.41e-4*u.MHz**2*u.cm**3/u.pc\n\ndef time_delay(dispersion_measure, freq, ref_freq):\n r\"\"\"Time delay due to dispersion.\n\n Parameters\n ----------\n dispersion_measure : astropy quantity\n Dispersion measure in units of pc / cm3.\n freq : astropy quantity\n Frequency at which to evaluate the dispersion delay.\n ref_freq : astropy quantity\n Reference frequency relative to which the dispersion delay is\n evaluated.\n\n Returns\n -------\n quantity\n The dispersion delay of `freq` relative to `ref_freq`.\n \"\"\"\n ref_freq_inv2 = 1/ref_freq**2\n return (dispersion_measure*DISPERSION_DELAY_CONSTANT*(\n 1/freq**2-ref_freq_inv2)).to(u.ms)\n\ndef disperse(msname, dispersion_measure, ref_freq=1.405*u.GHz):\n \"\"\"Disperses measurement set incoherently.\n\n Parameters\n ----------\n msname : str\n The full path to the measurement set to dedisperse, without the `.ms`\n extension.\n dispersion_measure : astropy quantity\n Dispersion measure to remove in pc / cm3.\n ref_freq : astropy quantity\n Reference frequency relative to which the dispersion delay is\n evaluated.\n \"\"\"\n # TODO: Pad to reduce corruption of signal at the edges.\n # Get data from ms\n data, time, freq, _, _, _, _, spw, orig_shape = \\\n extract_vis_from_ms(msname, swapaxes=False)\n spwaxis = orig_shape.index('spw')\n timeaxis = orig_shape.index('time')\n # To speed up dedispersion, we are hardcoding for the order of the axes\n # Check that the order is consistent with our assumptions\n assert spwaxis == 2\n assert timeaxis == 0\n freq = freq.reshape(-1, data.shape[3])*u.GHz\n freq = freq[spw, :]\n time = ((time - time[0])*u.d).to(u.ms)\n dtime = np.diff(time)\n assert np.all(np.abs((dtime - dtime[0])/dtime[0]) < 1e-2)\n dtime = np.median(dtime)\n\n # Calculate dispersion delay and roll axis\n dispersion_delay = time_delay(dispersion_measure, freq, ref_freq)\n dispersion_bins = np.rint(dispersion_delay/dtime).astype(np.int)\n # Jit provides a very moderate speed up of ~6 percent\n disperse_worker(data, dispersion_bins)\n\n # Write out the data to the ms\n data = data.reshape(-1, data.shape[3], data.shape[4])\n with table('{0}.ms'.format(msname), readonly=False) as tb:\n tb.putcol('DATA', data)\n\ndef dedisperse(msname, dispersion_measure, ref_freq=1.405*u.GHz):\n \"\"\"Dedisperses measurement set incoherently.\n\n Parameters\n ----------\n msname : str\n The full path to the measurement set to dedisperse, without the `.ms`\n extension.\n dispersion_measure : astropy quantity\n Dispersion measure to remove in pc / cm3.\n ref_freq : astropy quantity\n Reference frequency relative to which the dispersion delay is\n evaluated.\n \"\"\"\n disperse(msname, -1*dispersion_measure, ref_freq)\n\n@jit(nopython=True)\ndef disperse_worker(data, dispersion_bins):\n \"\"\"Roll with jit.\n\n Parameters\n ----------\n data : ndarray\n Dimensions (time, baseline, spw, freq, pol).\n dispersion_bins : ndarray\n The number of bins to shift by. Dimensions (spw, freq).\n \"\"\"\n for bidx in range(data.shape[1]):\n for i in range(data.shape[2]):\n for j in range(data.shape[3]):\n for pidx in range(data.shape[4]):\n data[:, bidx, i, j, pidx] = np.roll(\n data[:, bidx, i, j, pidx],\n dispersion_bins[i, j],\n )\n\ndef disperse_python(data, dispersion_bins):\n \"\"\"Numpy roll.\n\n Parameters\n ----------\n data : ndarray\n Dimensions (time, baseline, spw, freq, pol).\n dispersion_bins : ndarray\n The number of bins to shift by. Dimensions (spw, freq).\n \"\"\"\n for i in range(data.shape[2]):\n for j in range(data.shape[3]):\n data[..., i, j, :] = np.roll(\n data[..., i, j, :],\n dispersion_bins[i, j],\n axis=0\n )\n", "id": "11707164", "language": "Python", "matching_score": 1.8711798191070557, "max_stars_count": 1, "path": "dsacalib/dispersion.py" }, { "content": "\"\"\"Functions for calibration of DSA-110 visibilities.\n\nThese functions use the CASA package casatools to calibrate\nvisibilities stored as measurement sets.\n\nAuthor: <NAME>, <EMAIL>, 10/2019\n\n\"\"\"\nimport os\n# Always import scipy before casatools\nfrom scipy.fftpack import fft, fftshift, fftfreq\nfrom scipy.signal import medfilt\nimport numpy as np\nimport casatools as cc\nfrom casacore.tables import table\nfrom dsacalib.ms_io import read_caltable\n\ndef delay_calibration_worker(msname, sourcename, refant, t, combine_spw, name):\n r\"\"\"Calibrates delays using CASA.\n\n Uses CASA to calibrate delays and write the calibrated visibilities to the\n corrected_data column of the measurement set.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The measurement set `msname`.ms will\n be opened.\n sourcename : str\n The name of the calibrator source. The calibration table will be\n written to `msname`\\_`sourcename`\\_kcal.\n refant : str\n The reference antenna to use in calibration. If type *str*, this is\n the name of the antenna. If type *int*, it is the index of the antenna\n in the measurement set.\n t : str\n The integration time to use before calibrating, e.g. ``'inf'`` or\n ``'60s'``. See the CASA documentation for more examples. Defaults to\n ``'inf'`` (averaging over the entire observation time).\n combine_spw : boolean\n If True, distinct spws in the same ms will be combined before delay\n calibration.\n name : str\n The suffix for the calibration table.\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n if combine_spw:\n combine = 'field,scan,obs,spw'\n else:\n combine = 'field,scan,obs'\n error = 0\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n error += not cb.setsolve(\n type='K',\n t=t,\n refant=refant,\n combine=combine,\n table='{0}_{1}_{2}'.format(msname, sourcename, name)\n )\n error += not cb.solve()\n error += not cb.close()\n return error\n\ndef delay_calibration(msname, sourcename, refants, t1='inf', t2='60s',\n combine_spw=False):\n r\"\"\"Calibrates delays using CASA.\n\n Uses CASA to calibrate delays and write the calibrated visibilities to the\n corrected_data column of the measurement set.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The measurement set `msname`.ms will\n be opened.\n sourcename : str\n The name of the calibrator source. The calibration table will be\n written to `msname`\\_`sourcename`\\_kcal.\n refants : list(str)\n The reference antennas to use in calibration. If list items are type\n *str*, this is the name of the antenna. If type *int*, it is the index\n of the antenna in the measurement set. An average is done over all\n reference antennas to get the final delay table.\n t1 : str\n The integration time to use before calibrating to generate the final\n delay table, e.g. ``'inf'`` or ``'60s'``. See the CASA documentation\n for more examples. Defaults to ``'inf'`` (averaging over the entire\n observation time).\n t2 : str\n The integration time to use before fast delay calibrations used to flag\n antennas with poor delay solutions.\n combine_spw : boolean\n If True, distinct spws in the same ms will be combined before delay\n calibration.\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n assert isinstance(refants, list)\n error = 0\n refant = None\n for t in [t1, t2]:\n kcorr = None\n for refant in refants:\n if isinstance(refant, str):\n refantidx = int(refant)-1\n else:\n refantidx = refant\n error += delay_calibration_worker(\n msname,\n sourcename,\n refant,\n t,\n combine_spw,\n 'ref{0}_{1}kcal'.format(refant, '' if t==t1 else '2')\n )\n if kcorr is None:\n kcorr, _, flags, _, ant2 = read_caltable(\n '{0}_{1}_ref{2}_{3}kcal'.format(\n msname,\n sourcename,\n refant,\n '' if t==t1 else '2'\n ),\n cparam=False,\n reshape=False\n )\n else:\n kcorrtmp, _, flagstmp, _, ant2tmp = read_caltable(\n '{0}_{1}_ref{2}_{3}kcal'.format(\n msname,\n sourcename,\n refant,\n '' if t==t1 else '2'\n ),\n cparam=False,\n reshape=False\n )\n antflags = np.abs(\n flags.reshape(flags.shape[0], -1).mean(axis=1)-1) < 1e-5\n assert antflags[refantidx] == 0, \\\n 'Refant {0} is flagged in kcorr!'.format(refant) + \\\n 'Choose refants that are separated in uv-space.'\n kcorr[antflags, ...] = kcorrtmp[antflags, ...]-\\\n kcorr[refantidx, ...]\n ant2[antflags, ...] = ant2tmp[antflags, ...]\n flags[antflags, ...] = flagstmp[antflags, ...]\n # write out to a table\n with table(\n '{0}_{1}_ref{2}_{3}kcal'.format(\n msname,\n sourcename,\n refant,\n '' if t==t1 else '2'\n ),\n readonly=False\n ) as tb:\n tb.putcol('FPARAM', kcorr)\n tb.putcol('FLAG', flags)\n tb.putcol('ANTENNA2', ant2)\n os.rename(\n '{0}_{1}_ref{2}_{3}kcal'.format(\n msname,\n sourcename,\n refant,\n '' if t==t1 else '2'\n ),\n '{0}_{1}_{2}kcal'.format(\n msname,\n sourcename,\n '' if t==t1 else '2'\n )\n )\n return error\n\ndef gain_calibration(\n msname, sourcename, refant, blbased=False, forsystemhealth=False,\n keepdelays=False, tbeam='30s', interp_thresh=1.5, interp_polyorder=7\n):\n r\"\"\"Use CASA to calculate bandpass and complex gain solutions.\n\n Saves solutions to calibration tables and calibrates the measurement set by\n applying delay, bandpass, and complex gain solutions. Uses baseline-based\n calibration routines within CASA.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The MS `msname`.ms will be opened.\n sourcename : str\n The name of the calibrator source. The calibration table will be\n written to `msname`\\_`sourcename`\\_kcal.\n refant : str\n The reference antenna to use in calibration. If type *str*, this is\n the name of the antenna. If type *int*, it is the index of the antenna\n in the measurement set.\n blbased : boolean\n Set to True if baseline-based calibration desired.\n forsystemhealth : boolean\n Set to True if gain calibration is for system health monitoring. Delays\n will be kept at full resolution. If set to False, then at least some of\n the delay will be incorporated into the bandpass gain table.\n keepdelays : boolean\n Set to True if you want to update the delays currently set in the\n system. In this case, delay changes of integer 2 ns will be kept in the\n delay calibration table, and residual delays will be incorporated into\n the bandpass gain table. If set to False, all of the delay will be\n incorporated into the bandpass gain table.\n tbeam : str\n The integration time to use when measuring gain variations over time,\n e.g. ``'inf'`` or ``'60s'``. See the CASA documentation for more\n examples.\n interp_thresh : float\n Sets flagging of bandpass solutions before interpolating in order to\n smooth the solutions. After median baselining, any points that deviate\n by more than interp_thresh*std are flagged.\n interp_polyorder : int\n The order of the polynomial used to smooth bandpass solutions.\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n combine = 'field,scan,obs'\n spwmap = [-1]\n error = 0\n fref_snaps = 0.03 # SNAPs correct to freq of 30 MHz\n\n # Convert delay calibration into a bandpass representation\n caltables = [{\n 'table': '{0}_{1}_kcal'.format(msname, sourcename),\n 'type': 'K',\n 'spwmap': spwmap\n }]\n\n if not forsystemhealth:\n with table('{0}.ms/SPECTRAL_WINDOW'.format(msname)) as tb:\n fobs = np.array(tb.CHAN_FREQ[:]).squeeze(0)/1e9\n fref = np.array(tb.REF_FREQUENCY[:])/1e9\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += not cb.setsolve(\n type='MF' if blbased else 'B',\n combine=combine,\n table='{0}_{1}_bkcal'.format(msname, sourcename),\n refant=refant,\n apmode='a',\n solnorm=True\n )\n error += not cb.solve()\n error += not cb.close()\n\n with table(\n '{0}_{1}_kcal'.format(msname, sourcename), readonly=False\n ) as tb:\n kcorr = np.array(tb.FPARAM[:])\n tb.putcol('FPARAM', np.zeros(kcorr.shape, kcorr.dtype))\n\n with table(\n '{0}_{1}_bkcal'.format(msname, sourcename),\n readonly=False\n ) as tb:\n bpass = np.array(tb.CPARAM[:])\n bpass = np.ones(bpass.shape, bpass.dtype)\n kcorr = kcorr.squeeze()\n bpass *= np.exp(\n 2j*np.pi*(fobs[:, np.newaxis]-fref)*(\n kcorr[:, np.newaxis, :]#-kcorr[int(refant)-1, :]\n )\n )\n tb.putcol('CPARAM', bpass)\n caltables += [\n {\n 'table': '{0}_{1}_bkcal'.format(msname, sourcename),\n 'type': 'B',\n 'spwmap': spwmap\n }\n ]\n\n # Rough bandpass calibration\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += cb.setsolve(\n type='B',\n combine=combine,\n table='{0}_{1}_bcal'.format(msname, sourcename),\n refant=refant,\n apmode='ap',\n t='inf',\n solnorm=True\n )\n error += cb.solve()\n error += cb.close()\n\n caltables += [\n {\n 'table': '{0}_{1}_bcal'.format(msname, sourcename),\n 'type': 'B',\n 'spwmap': spwmap\n }\n ]\n\n # Gain calibration\n cb = cc.calibrater()\n error += cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += cb.setsolve(\n type='G',\n combine=combine,\n table='{0}_{1}_gacal'.format(msname, sourcename),\n refant=refant,\n apmode='a',\n t='inf'\n )\n error += cb.solve()\n error += cb.close()\n\n caltables += [\n {\n 'table': '{0}_{1}_gacal'.format(msname, sourcename),\n 'type': 'G',\n 'spwmap': spwmap\n }\n ]\n\n cb = cc.calibrater()\n error += cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += cb.setsolve(\n type='G',\n combine=combine,\n table='{0}_{1}_gpcal'.format(msname, sourcename),\n refant=refant,\n apmode='p',\n t='inf'\n )\n error += cb.solve()\n error += cb.close()\n\n # Final bandpass calibration\n caltables = [\n {\n 'table': '{0}_{1}_gacal'.format(msname, sourcename),\n 'type': 'G',\n 'spwmap': spwmap\n },\n {\n 'table': '{0}_{1}_gpcal'.format(msname, sourcename),\n 'type': 'G',\n 'spwmap': spwmap\n }\n ]\n\n if not forsystemhealth:\n caltables += [\n {\n 'table': '{0}_{1}_bkcal'.format(msname, sourcename),\n 'type': 'B',\n 'spwmap': spwmap\n }\n ]\n caltables += [\n {\n 'table': '{0}_{1}_kcal'.format(msname, sourcename),\n 'type': 'K',\n 'spwmap': spwmap\n }\n ]\n\n cb = cc.calibrater()\n error += cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += cb.setsolve(\n type='B',\n combine=combine,\n table='{0}_{1}_bacal'.format(msname, sourcename),\n refant=refant,\n apmode='a',\n t='inf',\n solnorm=True\n )\n error += cb.solve()\n error += cb.close()\n\n if not forsystemhealth:\n interpolate_bandpass_solutions(\n msname,\n sourcename,\n thresh=interp_thresh,\n polyorder=interp_polyorder,\n mode='a'\n )\n\n caltables += [\n {\n 'table': '{0}_{1}_bacal'.format(msname, sourcename),\n 'type': 'B',\n 'spwmap': spwmap\n }\n ]\n\n cb = cc.calibrater()\n error += cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += cb.setsolve(\n type='B',\n combine=combine,\n table='{0}_{1}_bpcal'.format(msname, sourcename),\n refant=refant,\n apmode='p',\n t='inf',\n solnorm=True\n )\n error += cb.solve()\n error += cb.close()\n\n if not forsystemhealth: # and not keepdelays:\n interpolate_bandpass_solutions(\n msname,\n sourcename,\n thresh=interp_thresh,\n polyorder=interp_polyorder,\n mode='p'\n )\n\n if not forsystemhealth and keepdelays:\n with table(\n '{0}_{1}_kcal'.format(msname, sourcename),\n readonly=False\n ) as tb:\n fparam = np.array(tb.FPARAM[:])\n newparam = np.round(kcorr[:, np.newaxis, :]/2)*2\n print('kcal', fparam.shape, newparam.shape)\n tb.putcol('FPARAM', newparam)\n with table(\n '{0}_{1}_bkcal'.format(msname, sourcename),\n readonly=False\n ) as tb:\n bpass = np.array(tb.CPARAM[:])\n print(newparam.shape, bpass.shape, fobs.shape)\n bpass *= np.exp(\n -2j*np.pi*(fobs[:, np.newaxis]-fref_snaps)*\n newparam\n )\n print(bpass.shape)\n tb.putcol('CPARAM', bpass)\n\n if forsystemhealth:\n caltables += [\n {\n 'table': '{0}_{1}_bpcal'.format(msname, sourcename),\n 'type': 'B',\n 'spwmap': spwmap\n }\n ]\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += not cb.setsolve(\n type='M' if blbased else 'G',\n combine=combine,\n table='{0}_{1}_2gcal'.format(msname, sourcename),\n refant=refant,\n apmode='ap',\n t=tbeam\n )\n error += not cb.solve()\n error += not cb.close()\n\n return error\n\n\ndef flag_antenna(msname, antenna, datacolumn='data', pol=None):\n \"\"\"Flags an antenna in a measurement set using CASA.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The MS `msname`.ms will be opened.\n antenna : str\n The antenna to flag. If type *str*, this is the name of the antenna. If\n type *int*, the index of the antenna in the measurement set.\n datacolumn : str\n The column of the measurement set to flag. Options are ``'data'``,\n ``'model'``, ``'corrected'`` for the uncalibrated visibilities, the\n visibility model (used by CASA to calculate calibration solutions), the\n calibrated visibilities. Defaults to ``'data'``.\n pol : str\n The polarization to flag. Must be `'A'` (which is mapped to\n polarization 'XX' of the CASA measurement set) or `'B'` (mapped to\n polarization 'YY'). Can also be `None`, for which both polarizations\n are flagged. Defaults to `None`.\n\n Returns\n -------\n int\n The number of errors that occured during flagging.\n \"\"\"\n if isinstance(antenna, int):\n antenna = str(antenna)\n error = 0\n ag = cc.agentflagger()\n error += not ag.open('{0}.ms'.format(msname))\n error += not ag.selectdata()\n rec = {}\n rec['mode'] = 'manual'\n #rec['clipoutside'] = False\n rec['datacolumn'] = datacolumn\n rec['antenna'] = antenna\n if pol is not None:\n rec['correlation'] = 'XX' if pol == 'A' else 'YY'\n else:\n rec['correlation'] = 'XX,YY'\n error += not ag.parseagentparameters(rec)\n error += not ag.init()\n error += not ag.run()\n error += not ag.done()\n\n return error\n\ndef flag_manual(msname, key, value, datacolumn='data', pol=None):\n \"\"\"Flags a measurement set in CASA using a flagging string.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The MS `msname`.ms will be opened.\n key : str\n The CASA-interpreted flagging specifier.\n datacolumn : str\n The column of the measurement set to flag. Options are ``'data'``,\n ``'model'``, ``'corrected'`` for the uncalibrated visibilities, the\n visibility model (used by CASA to calculate calibration solutions), the\n calibrated visibilities. Defaults to ``'data'``.\n pol : str\n The polarization to flag. Must be `'A'` (which is mapped to\n polarization 'XX' of the CASA measurement set) or `'B'` (mapped to\n polarization 'YY'). Can also be `None`, for which both polarizations\n are flagged. Defaults to `None`.\n\n Returns\n -------\n int\n The number of errors that occured during flagging.\n \"\"\"\n error = 0\n ag = cc.agentflagger()\n error += not ag.open('{0}.ms'.format(msname))\n error += not ag.selectdata()\n rec = {}\n rec['mode'] = 'manual'\n rec['datacolumn'] = datacolumn\n rec[key] = value\n if pol is not None:\n rec['correlation'] = 'XX' if pol == 'A' else 'YY'\n else:\n rec['correlation'] = 'XX,YY'\n error += not ag.parseagentparameters(rec)\n error += not ag.init()\n error += not ag.run()\n error += not ag.done()\n\n return error\n\ndef flag_baselines(msname, datacolumn='data', uvrange='2~15m'):\n \"\"\"Flags an antenna in a measurement set using CASA.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The MS `msname`.ms will be opened.\n datacolumn : str\n The column of the measurement set to flag. Options are ``'data'``,\n ``'model'``, ``'corrected'`` for the uncalibrated visibilities, the\n visibility model (used by CASA to calculate calibration solutions), the\n calibrated visibilities. Defaults to ``'data'``.\n uvrange : str\n The uvrange to flag. Should be CASA-interpretable.\n\n Returns\n -------\n int\n The number of errors that occured during flagging.\n \"\"\"\n error = 0\n ag = cc.agentflagger()\n error += not ag.open('{0}.ms'.format(msname))\n error += not ag.selectdata()\n rec = {}\n rec['mode'] = 'manual'\n rec['datacolumn'] = datacolumn\n rec['uvrange'] = uvrange\n error += not ag.parseagentparameters(rec)\n error += not ag.init()\n error += not ag.run()\n error += not ag.done()\n\n return error\n\ndef reset_flags(msname, datacolumn=None):\n \"\"\"Resets all flags in a measurement set, so that all data is unflagged.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The MS `msname`.ms will be opened.\n datacolumn : str\n The column of the measurement set to flag. Options are ``'data'``,\n ``'model'``, ``'corrected'`` for the uncalibrated visibilities, the\n visibility model (used by CASA to calculate calibration solutions), the\n calibrated visibilities. Defaults to ``'data'``.\n\n Returns\n -------\n int\n The number of errors that occured during flagging.\n \"\"\"\n error = 0\n ag = cc.agentflagger()\n error += not ag.open('{0}.ms'.format(msname))\n error += not ag.selectdata()\n rec = {}\n rec['mode'] = 'unflag'\n if datacolumn is not None:\n rec['datacolumn'] = datacolumn\n rec['antenna'] = ''\n error += not ag.parseagentparameters(rec)\n error += not ag.init()\n error += not ag.run()\n error += not ag.done()\n\n return error\n\ndef flag_zeros(msname, datacolumn='data'):\n \"\"\"Flags all zeros in a measurement set.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The MS `msname`.ms will be opened.\n datacolumn : str\n The column of the measurement set to flag. Options are ``'data'``,\n ``'model'``, ``'corrected'`` for the uncalibrated visibilities, the\n visibility model (used by CASA to calculate calibration solutions), the\n calibrated visibilities. Defaults to ``'data'``.\n\n Returns\n -------\n int\n The number of errors that occured during flagging.\n \"\"\"\n error = 0\n ag = cc.agentflagger()\n error += not ag.open('{0}.ms'.format(msname))\n error += not ag.selectdata()\n rec = {}\n rec['mode'] = 'clip'\n rec['clipzeros'] = True\n rec['datacolumn'] = datacolumn\n error += not ag.parseagentparameters(rec)\n error += not ag.init()\n error += not ag.run()\n error += not ag.done()\n\n return error\n\n# TODO: Change times to not use mjds, but mjd instead\ndef flag_badtimes(msname, times, bad, nant, datacolumn='data', verbose=False):\n \"\"\"Flags bad time bins for each antenna in a measurement set using CASA.\n\n Could use some work to select antennas to flag in a smarter way.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. The MS `msname`.ms will be opened.\n times : ndarray\n A 1-D array of times, type float, seconds since MJD=0. Times should be\n equally spaced and cover the entire time range of the measurement set,\n but can be coarser than the resolution of the measurement set.\n bad : ndarray\n A 1-D boolean array with dimensions (len(`times`), `nant`). Should have\n a value of ``True`` if the corresponding timebins should be flagged.\n nant : int\n The number of antennas in the measurement set (includes ones not in the\n visibilities).\n datacolumn : str\n The column of the measurement set to flag. Options are ``'data'``,\n ``'model'``, ``'corrected'`` for the uncalibrated visibilities, the\n visibility model (used by CASA to calculate calibration solutions), the\n calibrated visibilities. Defaults to ``'data'``.\n verbose : boolean\n If ``True``, will print information about the antenna/time pairs being\n flagged. Defaults to ``False``.\n\n Returns\n -------\n int\n The number of errors that occured during flagging.\n \"\"\"\n error = 0\n tdiff = np.median(np.diff(times))\n ag = cc.agentflagger()\n error += not ag.open('{0}.ms'.format(msname))\n error += not ag.selectdata()\n for i in range(nant):\n rec = {}\n rec['mode'] = 'clip'\n rec['clipoutside'] = False\n rec['datacolumn'] = datacolumn\n rec['antenna'] = str(i)\n rec['polarization_type'] = 'XX'\n tstr = ''\n for j, timesj in enumerate(times):\n if bad[j]:\n if len(tstr) > 0:\n tstr += '; '\n tstr += '{0}~{1}'.format(timesj-tdiff/2, timesj+tdiff/2)\n if verbose:\n print('For antenna {0}, flagged: {1}'.format(i, tstr))\n error += not ag.parseagentparameters(rec)\n error += not ag.init()\n error += not ag.run()\n\n rec['polarization_type'] = 'YY'\n tstr = ''\n for j, timesj in enumerate(times):\n if bad[j]:\n if len(tstr) > 0:\n tstr += '; '\n tstr += '{0}~{1}'.format(timesj-tdiff/2, timesj+tdiff/2)\n if verbose:\n print('For antenna {0}, flagged: {1}'.format(i, tstr))\n error += not ag.parseagentparameters(rec)\n error += not ag.init()\n error += not ag.run()\n error += not ag.done()\n\n return error\n\ndef calc_delays(vis, df, nfavg=5, tavg=True):\n \"\"\"Calculates power as a function of delay from the visibilities.\n\n This uses scipy fftpack to fourier transform the visibilities along the\n frequency axis. The power as a function of delay can then be used in\n fringe-fitting.\n\n Parameters\n ----------\n vis : ndarray\n The complex visibilities. 4 dimensions, (baseline, time, frequency,\n polarization).\n df : float\n The width of the frequency channels in GHz.\n nfavg : int\n The number of frequency channels to average by after the Fourier\n transform. Defaults to 5.\n tavg : boolean\n If ``True``, the visibilities are averaged in time before the Fourier\n transform. Defaults to ``True``.\n\n Returns\n -------\n vis_ft : ndarray\n The complex visibilities, Fourier-transformed along the time axis. 3\n (or 4, if `tavg` is set to False) dimensions, (baseline, delay,\n polarization) (or (baseline, time, delay, polarization) if `tavg` is\n set to False).\n delay_arr : ndarray\n Float, the values of the delay pixels in nanoseconds.\n \"\"\"\n nfbins = vis.shape[-2]//nfavg*nfavg\n npol = vis.shape[-1]\n if tavg:\n vis_ft = fftshift(fft(np.pad(vis[..., :nfbins, :].mean(1),\n ((0, 0), (0, nfbins), (0, 0))), axis=-2),\n axes=-2)\n vis_ft = vis_ft.reshape(vis_ft.shape[0], -1, 2*nfavg, npol).mean(-2)\n else:\n vis_ft = fftshift(fft(np.pad(vis[..., :nfbins, :],\n ((0, 0), (0, 0), (0, nfbins), (0, 0))),\n axis=-2), axes=-2)\n vis_ft = vis_ft.reshape(vis_ft.shape[0], vis_ft.shape[1], -1, 2*nfavg,\n npol).mean(-2)\n delay_arr = fftshift(fftfreq(nfbins))/df\n delay_arr = delay_arr.reshape(-1, nfavg).mean(-1)\n\n return vis_ft, delay_arr\n\n# def get_bad_times(msname, sourcename, refant, tint='59s', combine_spw=False,\n# nspw=1):\n# r\"\"\"Flags bad times in the calibrator data.\n\n# Calculates delays on short time periods and compares them to the delay\n# calibration solution. Can only be run after delay calibration.\n\n# Parameters\n# ----------\n# msname : str\n# The name of the measurement set. The MS `msname`.ms will be opened.\n# sourcename : str\n# The name of the calibrator source. The calibration table will be\n# written to `msname`\\_`sourcename`\\_kcal.\n# refant : str\n# The reference antenna to use in calibration. If type *str*, the name of\n# the reference antenna, if type *int*, the index of the antenna in the\n# CASA measurement set. This must be the same as the reference antenna\n# used in the delay calibration, or unexpected errors may occur.\n# tint : str\n# The timescale on which to calculate the delay solutions (and evaluate\n# the data quality). Must be a CASA-interpreted string, e.g. ``'inf'``\n# (average all of the data) or ``'60s'`` (average data to 60-second bins\n# before delay calibration). Defaults to ``'59s'``.\n# combine_spw : bool\n# Set to True if the spws were combined before delay calibration.\n# nspw : int\n# The number of spws in the measurement set.\n\n# Returns\n# -------\n# bad_times : array\n# Booleans, ``True`` if the data quality is poor and the time-bin should\n# be flagged, ``False`` otherwise. Same dimensions as times.\n# times : array\n# Floats, the time (mjd) for each delay solution.\n# error : int\n# The number of errors that occured during calibration.\n# \"\"\"\n# if combine_spw:\n# combine = 'field,scan,obs,spw'\n# else:\n# combine = 'field,scan,obs'\n# error = 0\n# # Solve the calibrator data on minute timescales\n# cb = cc.calibrater()\n# error += not cb.open('{0}.ms'.format(msname))\n# error += not cb.setsolve(type='K', t=tint, refant=refant, combine=combine,\n# table='{0}_{1}_2kcal'.format(msname, sourcename))\n# error += not cb.solve()\n# error += not cb.close()\n# # Pull the solutions for the entire timerange and the 60-s data from the\n# # measurement set tables\n# antenna_delays, times, _flags, _ant1, _ant2 = read_caltable(\n# '{0}_{1}_2kcal'.format(msname, sourcename), cparam=False)\n# npol = antenna_delays.shape[-1]\n# kcorr, _tkcorr, _flags, _ant1, _ant2 = read_caltable(\n# '{0}_{1}_kcal'.format(msname, sourcename), cparam=False)\n# # Shapes (baseline, time, spw, frequency, pol)\n# # Squeeze on the freqeuncy axis\n# antenna_delays = antenna_delays.squeeze(axis=3)\n# kcorr = kcorr.squeeze(axis=3)\n# nant = antenna_delays.shape[0]\n# nspw = antenna_delays.shape[2]\n\n# threshold = nant*nspw*npol//2\n# bad_pixels = (np.abs(antenna_delays-kcorr) > 1.5)\n# bad_times = (bad_pixels.reshape(bad_pixels.shape[0],\n# bad_pixels.shape[1], -1)\n# .sum(axis=-1).sum(axis=0) > threshold)\n# # bad_times[:, np.sum(np.sum(bad_times, axis=0), axis=1) > threshold, :] \\\n# # = np.ones((nant, 1, npol))\n\n# return bad_times, times, error\n\ndef apply_calibration(msname, calname, msnamecal=None, combine_spw=False,\n nspw=1, blbased=False):\n r\"\"\"Applies the calibration solution.\n\n Applies delay, bandpass and complex gain tables to a measurement set.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set to apply calibration solutions to.\n Opens `msname`.ms\n calname : str\n The name of the calibrator. Tables that start with\n `msnamecal`\\_`calname` will be applied to the measurement set.\n msnamecal : str\n The name of the measurement set used to model the calibration solutions\n Calibration tables prefixed with `msnamecal`\\_`calname` will be opened\n and applied. If ``None``, `msnamecal` is set to `msname`. Defaults to\n ``None``.\n combine_spw : bool\n Set to True if multi-spw ms and spws in the ms were combined before\n calibration.\n nspw : int\n The number of spws in the ms.\n blbased : bool\n Set to True if the calibration was baseline-based.\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n if combine_spw:\n spwmap = [0]*nspw\n else:\n spwmap = [-1]\n if msnamecal is None:\n msnamecal = msname\n caltables = [{'table': '{0}_{1}_kcal'.format(msnamecal, calname),\n 'type': 'K',\n 'spwmap': spwmap},\n {'table': '{0}_{1}_bcal'.format(msnamecal, calname),\n 'type': 'MF' if blbased else 'B',\n 'spwmap': spwmap},\n {'table': '{0}_{1}_gacal'.format(msnamecal, calname),\n 'type': 'M' if blbased else 'G',\n 'spwmap': spwmap},\n {'table': '{0}_{1}_gpcal'.format(msnamecal, calname),\n 'type': 'M' if blbased else 'G',\n 'spwmap': spwmap}]\n error = apply_calibration_tables(msname, caltables)\n return error\n\ndef apply_delay_bp_cal(\n msname, calname, blbased=False, msnamecal=None, combine_spw=False, nspw=1\n):\n r\"\"\"Applies delay and bandpass calibration.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set containing the visibilities. The\n measurement set `msname`.ms will be opened.\n calname : str\n The name of the calibrator source used in calibration of the\n measurement set. The tables `msname`\\_`calname`_kcal and\n `msnamecal`\\_`calname`_bcal will be applied to the measurement set.\n blbased : boolean\n Set to True if baseline-based calibration routines were done. Defaults\n False.\n msnamecal : str\n The prefix of the measurement set used to derive the calibration\n solutions. If None, set to `msname`.\n combine_spw : boolean\n Set to True if the spws were combined when deriving the solutions.\n Defaults False.\n nspw : int\n The number of spws in the dataset. Only used if `combine_spw` is set\n to True. Defaults 1.\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n if combine_spw:\n spwmap = [0]*nspw\n else:\n spwmap = [-1]\n if msnamecal is None:\n msnamecal = msname\n error = 0\n caltables = [{'table': '{0}_{1}_kcal'.format(msnamecal, calname),\n 'type': 'K',\n 'spwmap': spwmap},\n {'table': '{0}_{1}_bcal'.format(msnamecal, calname),\n 'type': 'MF' if blbased else 'B',\n 'spwmap': spwmap}]\n error += apply_and_correct_calibrations(msname, caltables)\n return error\n\n\ndef fill_antenna_gains(gains, flags=None):\n \"\"\"Fills in the antenna gains for triple-antenna calibration.\n\n Takes the gains from baseline-based calibration for a trio of antennas and\n calculates the corresponding antenna gains using products of the baseline\n gains. Also propagates flag information for the input baseline gains to\n the antenna gains.\n\n Parameters\n ----------\n gains : narray\n The complex gains matrix, first dimension is baseline. Indices 1, 2 and\n 4 contain the gains for the cross-correlations. Information in indices\n 0, 3 and 5 is ignored and overwritten.\n flags : ndarray\n A boolean array, containing flag information for the `gains` array. 1\n if the data is flagged, 0 otherwise. If ``None``, assumes no flag\n information available. The first dimension is baseline. Indices 1, 2\n and 4 contain the flags for the cross-correlations. Information in\n indices 0, 3 and 5 is ignored and overwritten.\n\n Returns\n -------\n gains : ndarray\n The complex gains matrix, first dimension is baseline. Indices 1, 2 and\n 4 contain the gains for the cross-correlations. Indices 0, 3 and 5\n contain the calculated values for the antennas.\n flags : ndarray\n A boolean array, containing flag information for the `gains` array. 1\n if the data is flagged, 0 otherwise. If None, assumes no flag\n information available. The first dimension is baseline. Indices 1, 2\n and 4 contain the flags for the cross-correlations. Indices 0,3 and 5\n contain the calculated values for the antennas.\n \"\"\"\n assert gains.shape[0] == 6, 'Will only calculate antenna gains for trio'\n# use ant1 and ant2 to do this?\n# for i in range(6):\n# if ant1[i] != ant2[i]:\n# idxp = np.where((ant1 == ant1[i]) & (ant2 == ant1[i]))[0][0]\n# idxn = np.where((ant1 == ant2[i]) & (ant2 == ant2[i]))[0][0]\n# idxd = np.where((ant1 == ant2) & (ant1 != ant1[i]) &\n# (ant1 != ant2[i]))[0][0]\n# gains[i] = np.conjugate(gains[idxn])*gains[idxp]/gains[idxd]\n gains[0] = np.conjugate(gains[1])*gains[2]/gains[4]\n gains[3] = gains[1]*gains[4]/gains[2]\n gains[5] = gains[2]*np.conjugate(gains[4])/gains[1]\n\n if flags is not None:\n flags[[0, 3, 5], ...] = np.min(np.array([flags[1]+flags[2]+flags[4],\n np.ones(flags[0].shape,\n dtype=int)]), axis=0)\n return gains, flags\n return gains\n\ndef calibrate_gain(msname, calname, caltable_prefix, refant, tga, tgp,\n blbased=False, combined=False):\n \"\"\"Calculates gain calibration only.\n\n Uses existing solutions for the delay and bandpass.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set for gain calibration.\n calname : str\n The name of the calibrator used in calibration.\n caltable_prefix : str\n The prefix of the delay and bandpass tables to be applied.\n refant : str\n The name of the reference antenna.\n tga : str\n A casa-understood integration time for gain amplitude calibration.\n tgp : str\n A casa-understood integration time for gain phase calibration.\n blbased : boolean\n Set to True if using baseline-based calibration for gains. Defaults\n False.\n combined : boolean\n Set to True if spectral windows are combined for calibration.\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n if combined:\n spwmap = [0]\n else:\n spwmap = [-1]\n if blbased:\n gtype = 'M'\n bptype = 'MF'\n else:\n gtype = 'G'\n bptype = 'B'\n combine='scan,field,obs'\n caltables = [{'table': '{0}_kcal'.format(caltable_prefix),\n 'type': 'K',\n 'spwmap': spwmap}\n ]\n error = 0\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += not cb.setsolve(type=bptype, combine=combine,\n table='{0}_{1}_bcal'.format(msname, calname),\n minblperant=1, refant=refant)\n error += not cb.solve()\n error += not cb.close()\n caltables += [\n {'table': '{0}_bcal'.format(caltable_prefix),\n 'type': bptype,\n 'spwmap': spwmap}\n ]\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, caltables)\n error += not cb.setsolve(type=gtype, combine=combine,\n table='{0}_{1}_gpcal'.format(msname, calname),\n t=tgp, minblperant=1, refant=refant, apmode='p')\n error += not cb.solve()\n error += not cb.close()\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n caltables += [{'table': '{0}_{1}_gpcal'.format(msname, calname),\n 'type': gtype,\n 'spwmap': spwmap}]\n error += apply_calibration_tables(cb, caltables)\n error += not cb.setsolve(type=gtype, combine=combine,\n table='{0}_{1}_gacal'.format(msname, calname),\n t=tga, minblperant=1, refant=refant, apmode='a')\n error += not cb.solve()\n error += not cb.close()\n return error\n\ndef apply_and_correct_calibrations(msname, calibration_tables):\n \"\"\"Applies and corrects calibration tables in an ms.\n\n Parameters\n ----------\n msname : str\n The measurement set filepath. Will open `msname`.ms.\n calibration_tables : list\n Calibration tables to apply. Each entry is a dictionary containing the\n keywords 'type' (calibration type, e.g. 'K'), 'spwmap' (spwmap for the\n calibration), and 'table' (full path to the calibration table).\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n error = 0\n cb = cc.calibrater()\n error += not cb.open('{0}.ms'.format(msname))\n error += apply_calibration_tables(cb, calibration_tables)\n error += not cb.correct()\n error += not cb.close()\n return error\n\ndef apply_calibration_tables(cb, calibration_tables):\n \"\"\"Applies calibration tables to an open calibrater object.\n\n Parameters\n ----------\n cb : cc.calibrater() instance\n Measurement set should be opened already.\n calibration_tables : list\n Calibration tables to apply. Each entry is a dictionary containing the\n keywords 'type' (calibration type, e.g. 'K'), 'spwmap' (spwmap for the\n calibration), and 'table' (full path to the calibration table).\n\n Returns\n -------\n int\n The number of errors that occured during calibration.\n \"\"\"\n error = 0\n for caltable in calibration_tables:\n error += not cb.setapply(type=caltable['type'],\n spwmap=caltable['spwmap'],\n table=caltable['table'])\n return error\n\ndef interpolate_bandpass_solutions(\n msname, calname, thresh=1.5, polyorder=7, mode='ap'\n):\n r\"\"\"Interpolates bandpass solutions.\n\n Parameters\n ----------\n msname : str\n The measurement set filepath (with the `.ms` extension omitted).\n calname : str\n The name of the calibrator source. Calibration tables starting with\n `msname`\\_`calname` will be opened.\n thresh : float\n Sets flagging of bandpass solutions before interpolating in order to\n smooth the solutions. After median baselining, any points that deviate\n by more than interp_thresh*std are flagged.\n polyorder : int\n The order of the polynomial used to smooth bandpass solutions.\n mode : str\n The bandpass calibration mode. Must be one of \"a\", \"p\" or \"ap\".\n \"\"\"\n if mode=='a':\n tbname = 'bacal'\n elif mode=='p':\n tbname = 'bpcal'\n elif mode=='ap':\n tbname = 'bcal'\n else:\n raise RuntimeError('mode must be one of \"a\", \"p\" or \"ap\"')\n\n with table('{0}_{1}_{2}'.format(msname, calname, tbname)) as tb:\n bpass = np.array(tb.CPARAM[:])\n flags = np.array(tb.FLAG[:])\n\n with table('{0}.ms'.format(msname)) as tb:\n antennas = np.unique(np.array(tb.ANTENNA1[:]))\n\n with table('{0}.ms/SPECTRAL_WINDOW'.format(msname)) as tb:\n fobs = np.array(tb.CHAN_FREQ[:]).squeeze(0)/1e9\n\n bpass_amp = np.abs(bpass)\n bpass_ang = np.angle(bpass)\n bpass_amp_out = np.ones(bpass.shape, dtype=bpass.dtype)\n bpass_ang_out = np.zeros(bpass.shape, dtype=bpass.dtype)\n\n # Interpolate amplitudes\n if mode in ('a', 'ap'):\n std = bpass_amp.std(axis=1, keepdims=True)\n for ant in antennas:\n for j in range(bpass.shape[-1]):\n offset = np.abs(\n bpass_amp[ant-1, :, j]-medfilt(\n bpass_amp[ant-1, :, j], 9\n )\n )/std[ant-1, :, j]\n idx = offset < thresh\n idx[flags[ant-1, :, j]] = 1\n if sum(idx) > 0:\n z_fit = np.polyfit(\n fobs[idx],\n bpass_amp[ant-1, idx, j],\n polyorder\n )\n p_fit = np.poly1d(z_fit)\n bpass_amp_out[ant-1, :, j] = p_fit(fobs)\n\n # Interpolate phase\n if mode in ('p', 'ap'):\n std = bpass_ang.std(axis=1, keepdims=True)\n for ant in antennas:\n for j in range(bpass.shape[-1]):\n offset = np.abs(\n bpass_ang[ant-1, :, j]-medfilt(\n bpass_ang[ant-1, :, j], 9\n )\n )/std[ant-1, :, j]\n idx = offset < thresh\n idx[flags[ant-1, :, j]] = 1\n if sum(idx) > 0:\n z_fit = np.polyfit(\n fobs[idx],\n bpass_ang[ant-1, idx, j],\n 7\n )\n p_fit = np.poly1d(z_fit)\n bpass_ang_out[ant-1, :, j] = p_fit(fobs)\n\n with table(\n '{0}_{1}_{2}'.format(msname, calname, tbname), readonly=False\n ) as tb:\n tb.putcol('CPARAM', bpass_amp_out*np.exp(1j*bpass_ang_out))\n # Reset flags for the interpolated solutions\n tbflag = np.array(tb.FLAG[:])\n tb.putcol('FLAG', np.zeros(tbflag.shape, tbflag.dtype))\n\ndef calibrate_phases(filenames, refant, msdir='/mnt/data/dsa110/calibration/'):\n \"\"\"Calibrate phases only for a group of calibrator passes.\n\n Parameters\n ----------\n filenames : dict\n A dictionary containing information on the calibrator passes to be\n calibrated. Same format as dictionary returned by\n dsacalib.utils.get_filenames()\n refant : str\n The reference antenna name to use. If int, will be interpreted as the\n reference antenna index instead.\n msdir : str\n The full path to the measurement set, with the `.ms` extension omitted.\n \"\"\"\n for date in filenames.keys():\n for cal in filenames[date].keys():\n msname = '{0}/{1}_{2}'.format(msdir, date, cal)\n if os.path.exists('{0}.ms'.format(msname)):\n reset_flags(msname)\n flag_baselines(msname, '2~27m')\n cb = cc.calibrater()\n cb.open('{0}.ms'.format(msname))\n cb.setsolve(\n type='B',\n combine='field,scan,obs',\n table='{0}_{1}_bpcal'.format(msname, cal),\n refant=refant,\n apmode='p',\n t='inf'\n )\n cb.solve()\n cb.close()\n\ndef calibrate_phase_single_ms(msname, refant, calname):\n cb = cc.calibrater()\n cb.open('{0}.ms'.format(msname))\n cb.setsolve(\n type='B',\n combine='field,scan,obs',\n table='{0}_{1}_bpcal'.format(msname, calname),\n refant=refant,\n apmode='p',\n t='inf'\n )\n cb.solve()\n cb.close()\n", "id": "11221193", "language": "Python", "matching_score": 4.040725231170654, "max_stars_count": 1, "path": "dsacalib/calib.py" }, { "content": "\"\"\"Visibility and calibration solution plotting routines.\n\nPlotting routines to visualize the visibilities and\nthe calibration solutions for DSA-110.\n\nAuthor: <NAME>, <EMAIL>, 10/2019\n\"\"\"\nimport os\nimport glob\nimport h5py\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport astropy.units as u\nimport scipy # pylint: disable=unused-import\nfrom casacore.tables import table\nfrom dsacalib.ms_io import read_caltable, extract_vis_from_ms\nimport dsacalib.constants as ct\nfrom dsautils import cnf\nCONF = cnf.Conf()\n\ndef plot_dyn_spec(vis, fobs, mjd, bname, normalize=False, outname=None,\n show=True, nx=None):\n \"\"\"Plots the dynamic spectrum of the real part of the visibility.\n\n Parameters\n ----------\n vis : ndarray\n The visibilities to plot. Dimensions (baseline, time, freq,\n polarization).\n fobs : ndarray\n The center frequency of each channel in GHz.\n mjd : ndarray\n The center of each subintegration in MJD.\n bname : list\n The name of each baseline in the visibilities.\n normalize : boolean\n If set to ``True``, the visibilities are normalized before plotting.\n Defaults to ``False``.\n outname : str\n If provided and not set to ``None``, the plot will be saved to the file\n `outname`_dynspec.png Defaults to ``None``.\n show : boolean\n If set to ``False`` the plot will be closed after rendering. If set to\n ``True`` the plot is left open. Defaults to ``True``.\n nx : int\n The number of subplots in the horizontal direction of the figure. If\n not provided, or set to ``None``, `nx` is set to the minimum of 5 and\n the number of baselines in the visibilities. Defaults to ``None``.\n \"\"\"\n (nbl, nt, nf, npol) = vis.shape\n if nx is None:\n nx = min(nbl, 5)\n ny = (nbl*2)//nx\n if (nbl*2)%nx != 0:\n ny += 1\n\n _, ax = plt.subplots(ny, nx, figsize=(8*nx, 8*ny))\n ax = ax.flatten()\n\n if len(mjd) > 125:\n dplot = np.nanmean(vis[:, :nt//125*125, ...].reshape(nbl, 125, -1, nf,\n npol), 2)\n else:\n dplot = vis.copy()\n if len(fobs) > 125:\n dplot = np.nanmean(dplot[:, :, :nf//125*125, :].reshape(nbl,\n dplot.shape[1],\n 125, -1, npol),\n 3)\n dplot = dplot.real\n dplot = dplot/(dplot.reshape(nbl, -1, npol).mean(axis=1)\n [:, np.newaxis, np.newaxis, :])\n\n if normalize:\n dplot = dplot/np.abs(dplot)\n vmin = -1\n vmax = 1\n else:\n vmin = -100\n vmax = 100\n dplot = dplot - 1\n\n if len(mjd) > 125:\n x = mjd[:mjd.shape[0]//125*125].reshape(125, -1).mean(-1)\n else:\n x = mjd\n x = ((x-x[0])*u.d).to_value(u.min)\n if len(fobs) > 125:\n y = fobs[:nf//125*125].reshape(125, -1).mean(-1)\n else:\n y = fobs\n for i in range(nbl):\n for j in range(npol):\n ax[j*nbl+i].imshow(dplot[i, :, :, j].T, origin='lower',\n interpolation='none', aspect='auto',\n vmin=vmin, vmax=vmax,\n extent=[x[0], x[-1], y[0], y[-1]])\n ax[j*nbl+i].text(0.1, 0.9,\n '{0}, pol {1}'.format(bname[i], 'A' if j == 0 else\n 'B'),\n transform=ax[j*nbl+i].transAxes,\n size=22, color='white')\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n for i in range((ny-1)*nx, ny*nx):\n ax[i].set_xlabel('time (min)')\n for i in np.arange(ny)*nx:\n ax[i].set_ylabel('freq (GHz)')\n if outname is not None:\n plt.savefig('{0}_dynspec.png'.format(outname))\n if not show:\n plt.close()\n\ndef plot_vis_freq(vis, fobs, bname, outname=None, show=True, nx=None):\n r\"\"\"Plots visibilities against frequency.\n\n Creates plots of the amplitude and phase of the visibilities `vis` as a\n function of frequency `fobs`. Two separate figures are opened, one for the\n amplitude and one for the phases. If `outname` is passed, these are saved\n as `outname`\\_amp_freq.png and `outname`\\_phase_freq.png\n\n Parameters\n ----------\n vis : ndarray\n The visibilities to plot. Dimensions (baseline, time, freq,\n polarization).\n fobs : ndarray\n The center frequency of each channel in GHz.\n bname : list\n The name of each baseline in the visibilities.\n outname : str\n If provided and not set to ``None``, the plots will be saved to the\n files `outname`\\_amp_freq.png and `outname`\\_phase_freq.png Defaults to\n ``None``.\n show : boolean\n If set to ``False`` the plot will be closed after rendering. If set to\n ``True`` the plot is left open. Defaults to ``True``.\n nx : int\n The number of subplots in the horizontal direction of the figure. If\n not provided, or set to ``None``, `nx` is set to the minimum of 5 and\n the number of baselines in the visibilities. Defaults to ``None``.\n \"\"\"\n nbl = vis.shape[0]\n if nx is None:\n nx = min(nbl, 5)\n ny = nbl//nx\n if nbl%nx != 0:\n ny += 1\n\n dplot = vis.mean(1)\n x = fobs\n\n _, ax = plt.subplots(ny, nx, figsize=(8*nx, 8*ny))\n ax = ax.flatten()\n for i in range(nbl):\n ax[i].plot(x, np.abs(dplot[i, :, 0]), label='A')\n ax[i].plot(x, np.abs(dplot[i, :, 1]), label='B')\n ax[i].text(0.1, 0.9, '{0}: amp'.format(bname[i]),\n transform=ax[i].transAxes,\n size=22)\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n ax[0].legend()\n for i in range((ny-1)*nx, ny*nx):\n ax[i].set_xlabel('freq (GHz)')\n if outname is not None:\n plt.savefig('{0}_amp_freq.png'.format(outname))\n if not show:\n plt.close()\n\n _, ax = plt.subplots(ny, nx, figsize=(8*nx, 8*ny))\n ax = ax.flatten()\n for i in range(nbl):\n ax[i].plot(x, np.angle(dplot[i, :, 0]), label='A')\n ax[i].plot(x, np.angle(dplot[i, :, 1]), label='B')\n ax[i].text(0.1, 0.9, '{0}: phase'.format(bname[i]),\n transform=ax[i].transAxes, size=22)\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n ax[0].legend()\n for i in range((ny-1)*nx, ny*nx):\n ax[i].set_xlabel('freq (GHz)')\n if outname is not None:\n plt.savefig('{0}_phase_freq.png'.format(outname))\n if not show:\n plt.close()\n\ndef plot_vis_time(vis, mjd, bname, outname=None, show=True, nx=None):\n r\"\"\"Plots visibilities against time of observation.\n\n Creates plots of the amplitude and phase of the visibilities `vis` as the\n time of observation `mjd`. Two separate figures are opened, one for the\n amplitude and one for the phases. If `outname` is passed, these are saved\n as `outname`\\_amp_time.png and `outname`\\_phase_time.png\n\n Parameters\n ----------\n vis : ndarray\n The visibilities to plot. Dimensions (baseline, time, freq,\n polarization).\n mjd : ndarray\n The center of each subintegration in MJD.\n bname : list\n The name of each baseline in the visibilities.\n outname : str\n If provided and not set to ``None``, the plot will be saved to the file\n `outname`\\_dynspec.png Defaults to ``None``.\n show : boolean\n If set to ``False`` the plot will be closed after rendering. If set to\n ``True`` the plot is left open. Defaults to ``True``.\n nx : int\n The number of subplots in the horizontal direction of the figure. If\n not provided, or set to ``None``, `nx` is set to the minimum of 5 and\n the number of baselines in the visibilities. Defaults to ``None``.\n \"\"\"\n nbl = vis.shape[0]\n if nx is None:\n nx = min(nbl, 5)\n ny = nbl//nx\n\n if nbl%nx != 0:\n ny += 1\n dplot = vis.mean(-2)\n x = ((mjd-mjd[0])*u.d).to_value(u.min)\n\n _, ax = plt.subplots(ny, nx, figsize=(8*nx, 8*ny))\n ax = ax.flatten()\n for i in range(nbl):\n ax[i].plot(x, np.abs(dplot[i, :, 0]), label='A')\n ax[i].plot(x, np.abs(dplot[i, :, 1]), label='B')\n ax[i].text(0.1, 0.9, '{0}: amp'.format(bname[i]),\n transform=ax[i].transAxes, size=22)\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n ax[0].legend()\n for i in range((ny-1)*nx, ny):\n ax[i].set_xlabel('time (min)')\n if outname is not None:\n plt.savefig('{0}_amp_time.png'.format(outname))\n if not show:\n plt.close()\n\n _, ax = plt.subplots(ny, nx, figsize=(8*nx, 8*ny))\n ax = ax.flatten()\n for i in range(nbl):\n ax[i].plot(x, np.angle(dplot[i, :, 0]), label='A')\n ax[i].plot(x, np.angle(dplot[i, :, 1]), label='B')\n ax[i].text(0.1, 0.9, '{0}: phase'.format(bname[i]),\n transform=ax[i].transAxes, size=22)\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n ax[0].legend()\n for i in range((ny-1)*nx, ny):\n ax[i].set_xlabel('time (min)')\n if outname is not None:\n plt.savefig('{0}_phase_time.png'.format(outname))\n if not show:\n plt.close()\n\ndef plot_uv_track(bu, bv, outname=None, show=True):\n \"\"\"Plots the uv track provided.\n\n Parameters\n ----------\n bu : ndarray\n The u-coordinates of the baselines in meters. Dimensions (baselines,\n time).\n bv : ndarray\n The v-coordinates of the baselines in meters. Dimensions (baselines,\n time).\n outname : str\n If provided and not set to ``None``, the plot will be saved to the file\n `outname`_dynspec.png Defaults to ``None``.\n show : boolean\n If set to ``False`` the plot will be closed after rendering. If set to\n ``True`` the plot is left open. Defaults to ``True``.\n \"\"\"\n _, ax = plt.subplots(1, 1, figsize=(8, 8))\n for i in range(bu.shape[0]):\n ax.plot(bu[i, :], bv[i, :])\n ax.set_xlim(-1500, 1500)\n ax.set_ylim(-1500, 1500)\n ax.text(-1200, 1200, 'UV Coverage')\n ax.set_xlabel('$u$ (m)')\n ax.set_ylabel('$v$ (m)')\n if outname is not None:\n plt.savefig('{0}_uv.png'.format(outname))\n if not show:\n plt.close()\n\ndef rebin_vis(arr, nb1, nb2):\n \"\"\"Rebins a 2-D array for plotting.\n\n Excess bins along either axis are discarded.\n\n Parameters\n ----------\n arr : ndarray\n The two-dimensional array to rebin.\n nb1 : int\n The number of bins to rebin by along axis 0.\n nb2 : int\n The number of bins to rebin by along the axis 1.\n\n Returns\n -------\n arr: ndarray\n The rebinned array.\n \"\"\"\n arr = arr[:arr.shape[0]//nb1*nb1, :arr.shape[1]//nb2*nb2].reshape(\n -1, nb1, arr.shape[1]).mean(1)\n arr = arr.reshape(arr.shape[0], -1, nb2).mean(-1)\n return arr\n\ndef plot_calibrated_vis(vis, vis_cal, mjd, fobs, bidx, pol,\n outname=None, show=True):\n \"\"\"Plots the calibrated and uncalibrated visibilities for comparison.\n\n Parameters\n ----------\n vis : ndarray\n The complex, uncalibrated visibilities, with dimensions\n (baseline, time, freq, pol).\n vis_cal : ndarray\n The complex calibrated visibilities, with dimensions\n (baseline, time, freq, pol).\n mjd : ndarray\n The midpoint time of each subintegration in MJD.\n fobs : ndarray\n The center frequency of each channel in GHz.\n bidx : int\n The index along the baseline dimension to plot.\n pol : int\n The index along the polarization dimension to plot.\n outname : str\n The base to use for the name of the png file the plot is saved to. The\n plot will be saved to `outname`_cal_vis.png if `outname` is provided,\n otherwise no plot will be saved. Defaults ``None``.\n show : boolean\n If `show` is passed ``False``, the plot will be closed after being\n generated. Otherwise, it is left open. Defaults ``True``.\n \"\"\"\n x = mjd[:mjd.shape[0]//128*128].reshape(-1, 128).mean(-1)\n x = ((x-x[0])*u.d).to_value(u.min)\n y = fobs[:fobs.shape[0]//5*5].reshape(-1, 5).mean(-1)\n\n _, ax = plt.subplots(2, 2, figsize=(16, 16), sharex=True, sharey=True)\n\n vplot = rebin_vis(vis[bidx, ..., pol], 128, 5).T\n ax[0, 0].imshow(vplot.real, interpolation='none', origin='lower',\n aspect='auto', vmin=-1, vmax=1,\n extent=[x[0], x[-1], y[0], y[-1]])\n ax[0, 0].text(0.1, 0.9, 'Before cal, real', transform=ax[0, 0].transAxes,\n size=22, color='white')\n ax[1, 0].imshow(vplot.imag, interpolation='none', origin='lower',\n aspect='auto', vmin=-1, vmax=1,\n extent=[x[0], x[-1], y[0], y[-1]])\n ax[1, 0].text(0.1, 0.9, 'Before cal, imag', transform=ax[1, 0].transAxes,\n size=22, color='white')\n\n vplot = rebin_vis(vis_cal[bidx, ..., pol], 128, 5).T\n ax[0, 1].imshow(vplot.real, interpolation='none', origin='lower',\n aspect='auto', vmin=-1, vmax=1,\n extent=[x[0], x[-1], y[0], y[-1]])\n ax[0, 1].text(0.1, 0.9, 'After cal, real', transform=ax[0, 1].transAxes,\n size=22, color='white')\n ax[1, 1].imshow(vplot.imag, interpolation='none', origin='lower',\n aspect='auto', vmin=-1, vmax=1,\n extent=[x[0], x[-1], y[0], y[-1]])\n ax[1, 1].text(0.1, 0.9, 'After cal, imag', transform=ax[1, 1].transAxes,\n size=22, color='white')\n for i in range(2):\n ax[1, i].set_xlabel('time (min)')\n ax[i, 0].set_ylabel('freq (GHz)')\n plt.subplots_adjust(hspace=0, wspace=0)\n if outname is not None:\n plt.savefig('{0}_{1}_cal_vis.png'.format(outname, 'A' if pol == 0 else\n 'B'))\n if not show:\n plt.close()\n\ndef plot_delays(vis_ft, labels, delay_arr, bname, nx=None, outname=None,\n show=True):\n \"\"\"Plots the visibility amplitude against delay.\n\n For a given visibility Fourier transformed along the frequency axis,\n plots the amplitude against delay and calculates the location of the\n fringe peak in delay. Returns the peak delay for each visibility,\n which can be used to calculate the antenna delays.\n\n Parameters\n ----------\n vis_ft : ndarray\n The complex visibilities, dimensions (visibility type, baselines,\n delay). Note that the visibilities must have been Fourier transformed\n along the frequency axis and scrunched along the time axis before being\n passed to `plot_delays`.\n labels : list\n The labels of the types of visibilities passed. For example,\n ``['uncalibrated', 'calibrated']``.\n delay_arr : ndarray\n The center of the delay bins in nanoseconds.\n bname : list\n The baseline labels.\n nx : int\n The number of plots to tile along the horizontal axis. If `nx` is\n given a value of ``None``, this is set to the number of baselines or 5,\n if there are more than 5 baselines.\n outname : str\n The base to use for the name of the png file the plot is saved to. The\n plot will be saved to `outname`_delays.png if an outname is provided.\n If `outname` is given a value of ``None``, no plot will be saved.\n Defaults ``None``.\n show : boolean\n If `show` is given a value of ``False`` the plot will be closed.\n Otherwise, the plot will be left open. Defaults ``True``.\n\n Returns\n -------\n delays : ndarray\n The peak delay for each visibility, in nanoseconds.\n \"\"\"\n nvis = vis_ft.shape[0]\n nbl = vis_ft.shape[1]\n npol = vis_ft.shape[-1]\n if nx is None:\n nx = min(nbl, 5)\n ny = nbl//nx\n if nbl%nx != 0:\n ny += 1\n\n alpha = 0.5 if nvis > 2 else 1\n delays = delay_arr[np.argmax(np.abs(vis_ft), axis=2)]\n # could use scipy.signal.find_peaks instead\n for pidx in range(npol):\n _, ax = plt.subplots(ny, nx, figsize=(8*nx, 8*ny), sharex=True)\n ax = ax.flatten()\n for i in range(nbl):\n ax[i].axvline(0, color='black')\n for j in range(nvis):\n ax[i].plot(delay_arr, np.log10(np.abs(vis_ft[j, i, :, pidx])),\n label=labels[j], alpha=alpha)\n ax[i].axvline(delays[j, i, pidx], color='red')\n ax[i].text(0.1, 0.9, '{0}: {1}'.format(bname[i], 'A' if pidx == 0\n else 'B'),\n transform=ax[i].transAxes, size=22)\n plt.subplots_adjust(wspace=0.1, hspace=0.1)\n ax[0].legend()\n for i in range((ny-1)*nx, ny*nx):\n ax[i].set_xlabel('delay (ns)')\n if outname is not None:\n plt.savefig(\n '{0}_{1}_delays.png'.format(\n outname, 'A' if pidx == 0 else 'B'\n ),\n bbox_inches='tight'\n )\n if not show:\n plt.close()\n\n return delays\n\ndef plot_antenna_delays(msname, calname, plabels=None, outname=None, show=True):\n r\"\"\"Plots antenna delay variations between two delay calibrations.\n\n Compares the antenna delays used in the calibration solution on the\n timescale of the entire calibrator pass (assumed to be in a CASA table\n ending in 'kcal') to those calculated on a shorter (e.g. 60s) timescale\n (assumed to be in a CASA table ending in '2kcal').\n\n Parameters\n ----------\n msname : str\n The prefix of the measurement set (`msname`.ms), used to identify the\n correct delay calibration tables.\n calname : str\n The calibrator source name, used to identify the correct delay\n calibration tables. The tables `msname`\\_`calname`\\_kcal (for a single\n delay calculated using the entire calibrator pass) and\n `msname`\\_`calname`\\_2kcal (for delays calculated on a shorter\n timescale) will be opened.\n antenna_order : ndarray\n The antenna names in order.\n outname : str\n The base to use for the name of the png file the plot is saved to. The\n plot will be saved to `outname`\\_antdelays.png. If `outname` is set to\n ``None``, the plot is not saved. Defaults ``None``.\n plabels : list\n The labels along the polarization axis. Defaults to ['B', 'A'].\n outname : string\n The base to use for the name of the png file the plot is saved to. The\n plot will be saved to `outname`_antdelays.png if an outname is provided.\n If `outname` is ``None``, the image is not saved. Defaults ``None``.\n show : boolean\n If set to ``False`` the plot will be closed after it is generated.\n Defaults ``True``.\n\n Returns\n -------\n times : ndarray\n The times at which the antenna delays (on short timescales) were\n calculated, in MJD.\n antenna_delays : ndarray\n The delays of the antennas on short timescales, in nanoseconds.\n Dimensions (polarization, time, antenna).\n kcorr : ndarray\n The delay correction calculated using the entire observation time, in\n nanoseconds. Dimensions (polarization, 1, antenna).\n antenna_order : list\n The antenna indices.\n \"\"\"\n if plabels is None:\n plabels = ['B', 'A']\n\n ccyc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\n # Pull the solutions for the entire timerange and the\n # 60-s data from the measurement set tables\n antenna_delays, times, _flags, _ant1, _ant2 = \\\n read_caltable('{0}_{1}_2kcal'.format(msname, calname), cparam=False)\n npol = antenna_delays.shape[-1]\n kcorr, _tkcorr, _flags, antenna_order, _ant2 = \\\n read_caltable('{0}_{1}_kcal'.format(msname, calname), cparam=False)\n nant = len(antenna_order)\n\n val_to_plot = (antenna_delays - kcorr).squeeze(axis=3).mean(axis=2)\n mean_along_time = np.abs(val_to_plot.reshape(nant, -1, npol).mean(1))\n idx_to_plot = np.where(\n (mean_along_time[..., 0] > 1e-10) |\n (mean_along_time[..., 1] > 1e-10))[0]\n tplot = (times-times[0])*ct.SECONDS_PER_DAY/60.\n ny = max(len(idx_to_plot)//10+1, 2)\n _, ax = plt.subplots(ny, 1, figsize=(10, 8*ny))\n lcyc = ['.', 'x']\n for i, bidx in enumerate(idx_to_plot):\n for j in range(npol):\n ax[i//10].plot(\n tplot,\n val_to_plot[bidx, :, j],\n marker=lcyc[j%len(lcyc)],\n label='{0} {1}'.format(antenna_order[bidx]+1, plabels[j]),\n alpha=0.5,\n color=ccyc[i%len(ccyc)]\n )\n for i in range(ny):\n ax[i].set_ylim(-5, 5)\n ax[i].set_ylabel('delay (ns)')\n ax[i].legend(\n ncol=len(idx_to_plot)//15+1,\n fontsize='small',\n bbox_to_anchor=(1.05, 1),\n loc='upper left'\n )\n ax[i].set_xlabel('time (min)')\n ax[i].axhline(1.5)\n ax[i].axhline(-1.5)\n if outname is not None:\n plt.savefig('{0}_antdelays.png'.format(outname), bbox_inches='tight')\n if not show:\n plt.close()\n\n return times, antenna_delays, kcorr, antenna_order\n\ndef plot_gain_calibration(msname, calname, plabels=None, outname=None,\n show=True):\n r\"\"\"Plots the gain calibration solutions from the gacal and gpcal tables.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. Used to identify the gain calibration\n tables.\n calname : str\n The calibrator used in gain calibration. Used to identify the gain\n calibration tables. The tables `msname`\\_`calname`\\_gacal (assumed to\n contain the gain amplitude soutions) and `msname`\\_`calname`\\_gpcal\n (assumed to contain the phase amplitude solutions) will be opened.\n plabels : list\n The names of the polarizations in the calibration solutions. Defaults\n to ``['A', 'B']``.\n outname : str\n The base to use for the name of the png file the plot is saved to. The\n plot is saved to `outname`\\_gaincal.png if `outname` is not ``None``.\n If `outname` is set to ``None, the plot is not saved. Defaults\n ``None``.\n show : boolean\n If set to ``False`` the plot is closed after being generated.\n\n Returns\n -------\n time_days : ndarray\n The times of gain calibration, in days.\n gain_amp : ndarray\n The gain amplitudes. Dimensions (antenna or baseline, time, pol).\n gain_phase : ndarray\n The gain phases. Dimensions (antenna or baseline, time, pol).\n labels : ndarray\n The labels of the antennas or baselines along the 0th axis of the gain\n solutions.\n \"\"\"\n if plabels is None:\n plabels = ['A', 'B']\n\n gain_phase, time_phase, _flags, ant1, ant2 = \\\n read_caltable('{0}_{1}_gpcal'.format(msname, calname), cparam=True)\n gain_phase = gain_phase.squeeze(axis=3)\n gain_phase = gain_phase.mean(axis=2)\n npol = gain_phase.shape[-1]\n if np.all(ant2 == ant2[0]):\n labels = ant1\n else:\n labels = np.array([ant1, ant2]).T\n nlab = labels.shape[0]\n\n gain_amp, time, _flags, _ant1, _ant2 = \\\n read_caltable('{0}_{1}_gacal'.format(msname, calname), cparam=True)\n gain_amp = gain_amp.squeeze(axis=3)\n gain_amp = gain_amp.mean(axis=2)\n time_days = time.copy()\n t0 = time[0]\n time = ((time-t0)*u.d).to_value(u.min)\n time_phase = ((time_phase-t0)*u.d).to_value(u.min)\n\n idx_to_plot = np.where(np.abs(gain_amp.reshape(nlab, -1).mean(1)-1)\n > 1e-10)[0]\n\n ccyc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n lcyc = ['-', ':']\n _, ax = plt.subplots(1, 2, figsize=(16, 6), sharex=True)\n\n if gain_amp.shape[1] > 1:\n tplot = time\n gplot = gain_amp\n else:\n tplot = [0, 1]\n gplot = np.tile(gain_amp, [1, 2, 1])\n\n for i, bidx in enumerate(idx_to_plot):\n for pidx in range(npol):\n ax[0].plot(tplot, np.abs(gplot[bidx, :, pidx]),\n label='{0} {1}'.format(labels[bidx]+1, plabels[pidx]),\n color=ccyc[i%len(ccyc)], ls=lcyc[pidx])\n\n if gain_phase.shape[1] > 1:\n tplot = time_phase\n gplot = gain_phase\n else:\n tplot = [tplot[0], tplot[-1]] # use the time from the gains\n gplot = np.tile(gain_phase, [1, 2, 1])\n\n for i, bidx in enumerate(idx_to_plot):\n for pidx in range(npol):\n ax[1].plot(tplot, np.angle(gplot[bidx, :, pidx]),\n label='{0} {1}'.format(labels[bidx]+1, plabels[pidx]),\n color=ccyc[i%len(ccyc)], ls=lcyc[pidx])\n\n ax[0].set_xlim(tplot[0], tplot[-1])\n ax[1].set_ylim(-np.pi, np.pi)\n ax[0].legend(ncol=10, fontsize='x-small', bbox_to_anchor=(0.05, -0.1),\n loc='upper left')\n ax[0].set_xlabel('time (min)')\n ax[1].set_xlabel('time (min)')\n ax[0].set_ylabel('Abs of gain')\n ax[1].set_ylabel('Phase of gain')\n if outname is not None:\n plt.savefig('{0}_gaincal.png'.format(outname), bbox_inches='tight')\n if not show:\n plt.close()\n return time_days, gain_amp, gain_phase, labels\n\ndef plot_bandpass(msname, calname,\n plabels=None, outname=None, show=True):\n r\"\"\"Plots the bandpass calibration solutions in the bcal table.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. Used to identify the calibration table\n to plot.\n calname : str\n The name of the calibrator used in calibration. The calibration table\n `msname`\\_`calname`\\_bcal is opened.\n plabels : list\n The labels for the polarizations. Defaults ``['A', 'B']``.\n outname : str\n The base to use for the name of the png file the plot is saved to. The\n plot is saved to `outname`\\_bandpass.png if an `outname` is not set to\n ``None``. If `outname` is set to ``None``, the plot is not saved.\n Defaults ``None``.\n show : boolean\n If set to ``False``, the plot is closed after it is generated.\n\n Returns\n -------\n bpass : ndarray\n The bandpass solutions, dimensions (antenna or baseline, frequency).\n fobs : array\n The frequencies at which the bandpass solutions are calculated, in GHz.\n labels : ndarray\n The antenna or baseline labels along the 0th axis of the bandpass\n solutions.\n \"\"\"\n if plabels is None:\n plabels = ['A', 'B']\n\n bpass, _tbpass, _flags, ant1, ant2 = read_caltable('{0}_{1}_bcal'.format(\n msname, calname), cparam=True)\n # squeeze along the time axis\n # baseline, time, spw, frequency, pol\n bpass = bpass.squeeze(axis=1)\n bpass = bpass.reshape(bpass.shape[0], -1, bpass.shape[-1])\n npol = bpass.shape[-1]\n\n with table('{0}.ms/SPECTRAL_WINDOW'.format(msname)) as tb:\n fobs = (np.array(tb.col('CHAN_FREQ')[:])/1e9).reshape(-1)\n\n if bpass.shape[1] != fobs.shape[0]:\n nint = fobs.shape[0]//bpass.shape[1]\n fobs_plot = np.mean(fobs[:nint]) + \\\n np.arange(bpass.shape[1])*np.median(np.diff(fobs))*nint\n else:\n fobs_plot = fobs.copy()\n\n if np.all(ant2 == ant2[0]):\n labels = ant1+1\n else:\n labels = np.array([ant1+1, ant2+1]).T\n nant = len(ant1)\n\n idxs_to_plot = np.where(np.abs(np.abs(bpass).reshape(nant, -1).mean(1)-1) >\n 1e-5)[0]\n\n ccyc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n lcyc = ['-', ':']\n _, ax = plt.subplots(1, 2, figsize=(16, 6), sharex=True)\n for i, bidx in enumerate(idxs_to_plot):\n for pidx in range(npol):\n ax[0].plot(fobs_plot, np.abs(bpass[bidx, :, pidx]), '.',\n label='{0} {1}'.format(labels[bidx], plabels[pidx]),\n alpha=0.5, ls=lcyc[pidx], color=ccyc[i%len(ccyc)])\n ax[1].plot(fobs_plot, np.angle(bpass[bidx, :, pidx]), '.',\n label='{0} {1}'.format(labels[bidx], plabels[pidx]),\n alpha=0.5, ls=lcyc[pidx], color=ccyc[i%len(ccyc)])\n ax[0].set_xlabel('freq (GHz)')\n ax[1].set_xlabel('freq (GHz)')\n ax[0].set_ylabel('B cal amp')\n ax[1].set_ylabel('B cal phase (rad)')\n ax[0].legend(ncol=3, fontsize='small')\n if outname is not None:\n plt.savefig('{0}_bandpass.png'.format(outname), bbox_inches='tight')\n if not show:\n plt.close()\n\n return bpass, fobs, labels\n\ndef plot_autocorr(UV):\n \"\"\"Plots autocorrelations from UVData object.\n\n Parameters\n ----------\n UV : UVData object\n The UVData object for which to plot autocorrelations.\n \"\"\"\n freq = UV.freq_array.squeeze()\n ant1 = UV.ant_1_array.reshape(UV.Ntimes, -1)[0, :]\n ant2 = UV.ant_2_array.reshape(UV.Ntimes, -1)[0, :]\n time = UV.time_array.reshape(UV.Ntimes, -1)[:, 0]\n vis = UV.data_array.reshape(UV.Ntimes, -1, UV.Nfreqs, UV.Npols)\n autocorrs = np.where(ant1 == ant2)[0]\n ccyc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n _, ax = plt.subplots(\n len(autocorrs)//len(ccyc)+1,\n 1,\n figsize=(8, 4*len(autocorrs)//len(ccyc)+1),\n sharex=True,\n sharey=True\n )\n for j in range(len(autocorrs)//len(ccyc)+1):\n for i, ac in enumerate(autocorrs[len(ccyc)*j:len(ccyc)*(j+1)]):\n ax[j].plot(\n freq.squeeze()/1e9,\n np.abs(np.nanmean(vis[:, ac, ..., 0], axis=0)),\n alpha=0.5,\n color=ccyc[i%len(ccyc)],\n ls='-',\n label=ant1[ac]+1\n )\n ax[j].plot(\n freq.squeeze()/1e9,\n np.abs(np.nanmean(vis[:, ac, ..., 1], axis=0)),\n alpha=0.5,\n color=ccyc[i%len(ccyc)],\n ls=':'\n )\n ax[j].legend()\n ax[-1].set_xlabel('freq (GHz)')\n ax[0].set_yscale('log')\n plt.subplots_adjust(hspace=0)\n\n _, ax = plt.subplots(\n len(autocorrs)//len(ccyc)+1,\n 1,\n figsize=(8, 4*len(autocorrs)//len(ccyc)+1),\n sharex=True,\n sharey=True\n )\n for j in range(len(autocorrs)//len(ccyc)+1):\n for i, ac in enumerate(autocorrs[len(ccyc)*j:len(ccyc)*(j+1)]):\n ax[j].plot(\n (time-time[0])*24*60,\n np.abs(vis[:, ac, ..., 0].mean(axis=1)),\n alpha=0.5,\n color=ccyc[i%len(ccyc)],\n ls='-',\n label=ant1[ac]+1\n )\n ax[j].plot(\n (time-time[0])*24*60,\n np.abs(vis[:, ac, ..., 1].mean(axis=1)),\n alpha=0.5,\n color=ccyc[i%len(ccyc)],\n ls=':'\n )\n ax[j].legend()\n ax[-1].set_xlabel('time (min)')\n ax[0].set_yscale('log')\n plt.subplots_adjust(hspace=0)\n\ndef summary_plot(msname, calname, npol, plabels, antennas):\n r\"\"\"Generates a summary plot showing the calibration solutions.\n\n Parameters\n ----------\n msname : str\n The path to the measurement set is ``msname``.ms.\n calname : str\n The name of the calibrator. Calibration tables starting with\n ``msname``\\_``calname`` will be opened.\n npol : int\n The number of polarization indices. Currently not in use.\n plabels : list\n The labels of the polarizations.\n antennas: list\n The antennas to plot.\n\n Returns\n -------\n matplotlib.pyplot.figure\n The handle for the generated figure.\n \"\"\"\n # TODO: remove npol, and assert that plabels and npol have the same shapes\n ny = len(antennas)//10\n if len(antennas)%10 != 0:\n ny += 1\n ccyc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n mcyc = ['.', 'x']\n lcyc = ['-', '--']\n\n fig, ax = plt.subplots(4, ny*2, figsize=(12, 12))\n ax = ax.reshape(4, ny, 2).swapaxes(0, 1)\n ax[0, 0, 0].axis('off')\n\n # Plot kcal\n if os.path.exists('{0}_{1}_2kcal'.format(msname, calname)):\n antenna_delays, times, _flags, _ant1, _ant2 = read_caltable(\n '{0}_{1}_2kcal'.format(msname, calname),\n cparam=False\n )\n npol = antenna_delays.shape[-1]\n kcorr, _tkcorr, _flags, _antenna_order, _ant2 = read_caltable(\n '{0}_{1}_kcal'.format(msname, calname),\n cparam=False\n )\n val_to_plot = (antenna_delays - kcorr).squeeze(axis=3).mean(axis=2)\n tplot = (times-times[0])*ct.SECONDS_PER_DAY/60.\n\n for i, ant in enumerate(antennas):\n for j in range(npol):\n ax[i//10, 0, 1].plot(\n tplot,\n val_to_plot[ant-1, :, j],\n marker=mcyc[j%len(mcyc)],\n linestyle=lcyc[j%len(lcyc)],\n label='{0} {1}'.format(ant, plabels[j]),\n alpha=0.5,\n color=ccyc[i%len(ccyc)]\n )\n for i in range(ny):\n ax[i, 0, 1].set_ylim(-5, 5)\n ax[i, 0, 1].axhline(1.5)\n ax[i, 0, 1].axhline(-1.5)\n ax[0, 0, 1].legend(ncol=3, loc='upper left', bbox_to_anchor=(-1, 1))\n\n if os.path.exists('{0}_{1}_bcal'.format(\n msname,\n calname\n )):\n bpass, _tbpass, _flags, ant1, ant2 = read_caltable(\n '{0}_{1}_bcal'.format(\n msname, calname\n ),\n cparam=True\n )\n bpass = bpass.squeeze(axis=1)\n bpass = bpass.reshape(bpass.shape[0], -1, bpass.shape[-1])\n npol = bpass.shape[-1]\n\n with table('{0}.ms/SPECTRAL_WINDOW'.format(msname)) as tb:\n fobs = (np.array(tb.col('CHAN_FREQ')[:])/1e9).reshape(-1)\n\n if bpass.shape[1] != fobs.shape[0]:\n nint = fobs.shape[0]//bpass.shape[1]\n fobs_plot = np.mean(fobs[:nint]) + \\\n np.arange(bpass.shape[1])*np.median(np.diff(fobs))*nint\n else:\n fobs_plot = fobs.copy()\n\n for i, ant in enumerate(antennas):\n for pidx in range(npol):\n ax[i//10, 1, 0].plot(\n fobs_plot,\n np.abs(bpass[ant-1, :, pidx]),\n label='{0} {1}'.format(ant, plabels[pidx]),\n alpha=0.5,\n ls=lcyc[pidx%len(lcyc)],\n color=ccyc[i%len(ccyc)]\n )\n ax[i//10, 2, 0].plot(\n fobs_plot,\n np.angle(bpass[ant-1, :, pidx]),\n label='{0} {1}'.format(ant, plabels[pidx]),\n alpha=0.5,\n ls=lcyc[pidx],\n color=ccyc[i%len(ccyc)]\n )\n ax[i//10, 1, 0].set_yscale('log')\n\n if os.path.exists('{0}_{1}_2gcal'.format(msname, calname)):\n gain, time, _flags, ant1, ant2 = \\\n read_caltable('{0}_{1}_2gcal'.format(msname, calname), cparam=True)\n gain = gain.squeeze(axis=3)\n gain = gain.mean(axis=2)\n t0 = time[0]\n time = ((time-t0)*u.d).to_value(u.min)\n\n for i, ant in enumerate(antennas):\n for pidx in range(npol):\n ax[i//10, 1, 1].plot(\n time,\n np.abs(gain[ant-1, :, pidx]),\n label='{0} {1}'.format(ant, plabels[pidx]),\n color=ccyc[i%len(ccyc)],\n ls=lcyc[pidx%len(lcyc)],\n marker=mcyc[pidx%len(mcyc)]\n )\n\n for i, ant in enumerate(antennas):\n for pidx in range(npol):\n ax[i//10, 2, 1].plot(\n time,\n np.angle(gain[ant-1, :, pidx]),\n label='{0} {1}'.format(ant, plabels[pidx]),\n color=ccyc[i%len(ccyc)],\n ls=lcyc[pidx]\n )\n for i in range(ny):\n ax[i, 1, 1].set_xlim(tplot[0], tplot[-1])\n ax[i, 2, 1].set_xlim(tplot[0], tplot[-1])\n ax[i, 2, 1].set_ylim(-np.pi/10, np.pi/10)\n else:\n t0 = None\n\n vis, time, fobs, _, ant1, ant2, _, _, _ = extract_vis_from_ms(msname)\n autocorr_idx = np.where(ant1 == ant2)[0]\n vis_autocorr = vis[autocorr_idx, ...]\n vis_time = np.median(\n vis_autocorr.reshape(\n vis_autocorr.shape[0], vis_autocorr.shape[1], -1, 2\n ), axis=-2\n )\n vis_freq = np.median(\n vis_autocorr.reshape(\n vis_autocorr.shape[0], vis_autocorr.shape[1], -1, 2\n ), axis=1\n )\n if t0 is None:\n t0 = time[0]\n time = ((time-t0)*u.d).to_value(u.min)\n vis_ant_order = ant1[autocorr_idx]\n for i, ant in enumerate(antennas):\n vis_idx = np.where(vis_ant_order == ant-1)[0]\n if len(vis_idx) > 0:\n vis_idx = vis_idx[0]\n for pidx in range(npol):\n ax[i//10, 3, 1].plot(\n time-time[0],\n np.abs(vis_time[vis_idx, :, pidx]),\n label='{0} {1}'.format(ant, plabels[pidx]),\n color=ccyc[i%len(ccyc)],\n ls=lcyc[pidx%len(lcyc)],\n alpha=0.5\n )\n ax[i//10, 3, 0].plot(\n fobs,\n np.abs(vis_freq[vis_idx, :, pidx]),\n label='{0} {1}'.format(ant, plabels[pidx]),\n color=ccyc[i%len(ccyc)],\n ls=lcyc[pidx%len(lcyc)],\n alpha=0.5\n )\n\n for i in range(ny):\n ax[i, 3, 1].set_xlabel('time (min)')\n ax[i, 3, 0].set_xlabel('freq (GHz)')\n ax[i, 3, 1].set_ylabel('autocorr power')\n ax[i, 3, 0].set_ylabel('autocorr power')\n ax[i, 3, 0].set_yscale('log')\n ax[i, 3, 1].set_yscale('log')\n ax[i, 1, 1].set_ylabel('Abs of gain')\n ax[i, 2, 1].set_ylabel('Phase of gain')\n ax[i, 1, 0].set_ylabel('B cal amp')\n ax[i, 2, 0].set_ylabel('B cal phase (rad)')\n ax[i, 0, 1].set_ylabel('delay (ns)')\n fig.suptitle('{0}'.format(msname))\n return fig\n\ndef plot_current_beamformer_solutions(\n filenames, calname, date, beamformer_name, corrlist=np.arange(1, 16+1),\n antennas_to_plot=None, antennas=None, outname=None, show=True,\n gaindir='/home/user/beamformer_weights/',\n hdf5dir='/mnt/data/dsa110/correlator/'\n):\n r\"\"\"Plots the phase difference between the two polarizations.\n\n Applies the beamformer weights to the given hdf5 files, and then plots the\n remaining phase difference between the two polarizations for each antenna.\n\n Parameters\n ----------\n filenames : list\n A list of the hdf5 filenames for which to plot. Each filename should\n omit the directory and the .hdf5 extension. E.g.\n `['2020-10-06T15:32:01', '2020-10-06T15:47:01']` would a valid\n argument to plot data for 30 minutes starting at 2020-10-06T16:32:01.\n calname : str\n The name of the source or event that you are plotting. Used in the\n title of the plot.\n date : str\n The date of the source or event that you are plotting. Used in the\n title of the plot. e.g. '2020-10-06'\n beamformer_name : str\n The title of the beamformer weights.\n e.g. 'J141120+521209_2021-02-19T12:05:51'\n corrlist : list(int)\n A list of the correlator indices to plot. Defaults to correlators 01\n through 16.\n antennas_to_plot : array(int)\n The names of the antennas to plot beamformer solutions for. Defaults to\n `antennas`.\n antennas : array(int)\n The names of the antennas for which beamformer solutions were\n generated. Defaults to values in dsautils.cnf\n outname : str\n The base to use for the name of the png file the plot is saved to. The\n plot is saved to `outname`\\_beamformerweights.png if `outname` is not\n ``None``. f `outname` is set to ``None, the plot is not saved.\n Defaults `None``.\n show : boolean\n If set to ``False`` the plot is closed after being generated.\n gaindir : str\n The full path to the directory in which the beamformer weights are\n stored.\n hdf5dir : str\n The full path to the directory in which the correlated hdf5 files are\n stored. Files were be searched for in `hdf5dir`/corr??/\n \"\"\"\n if antennas is None:\n antennas = np.array(list(CONF.get('corr')['antenna_order'].values))\n assert len(antennas) == 64\n if antennas_to_plot is None:\n antennas_to_plot = antennas\n # Should be generalized to different times, baselines\n visdata_corr = np.zeros(\n (len(filenames)*280, 325, 16, 48, 2),\n dtype=np.complex\n )\n for corridx, corr in enumerate(corrlist):\n visdata = np.zeros(\n (len(filenames), 91000, 1, 48, 2),\n dtype=np.complex\n )\n for i, filename in enumerate(filenames):\n files = sorted(glob.glob(\n '{0}/corr{1:02d}/{2}??.hdf5'.format(\n hdf5dir,\n corr,\n filename[:-2]\n )\n ))\n if len(files) > 0:\n with h5py.File(files[0], 'r') as f:\n visdata[i, ...] = np.array(f['Data']['visdata'][:])\n ant1 = np.array(f['Header']['ant_1_array'][:])\n ant2 = np.array(f['Header']['ant_2_array'][:])\n visdata = visdata.reshape((-1, 325, 48, 2))\n ant1 = ant1.reshape(-1, 325)[0, :]\n ant2 = ant2.reshape(-1, 325)[0, :]\n with open(\n '{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(\n gaindir,\n corr,\n beamformer_name\n ),\n 'rb'\n ) as f:\n data = np.fromfile(f, '<f4')\n gains = data[64:].reshape(64, 48, 2, 2)\n gains = gains[..., 0]+1.0j*gains[..., 1]\n my_gains = np.zeros((325, 48, 2), dtype=np.complex)\n for i in range(325):\n idx1 = np.where(antennas == ant1[i]+1)[0][0]\n idx2 = np.where(antennas == ant2[i]+1)[0][0]\n my_gains[i, ...] = np.conjugate(gains[idx1, ...])*gains[idx2, ...]\n visdata_corr[:, :, corridx, ...] = (\n visdata*my_gains[np.newaxis, :, :, :]\n )\n visdata_corr = visdata_corr.reshape((-1, 325, 16*48, 2))\n\n fig, ax = plt.subplots(5, 4, figsize=(25, 12), sharex=True, sharey=True)\n for axi in ax[-1, :]:\n axi.set_xlabel('freq channel')\n for axi in ax[:, 0]:\n axi.set_ylabel('time bin')\n ax = ax.flatten()\n for i, ant in enumerate(antennas_to_plot-1):\n idx = np.where((ant1 == 23) & (ant2 == ant))[0][0]\n ax[i].imshow(\n np.angle(\n visdata_corr[:, idx, :, 0]/visdata_corr[:, idx, :, 1]\n ),\n aspect='auto',\n origin='lower',\n interpolation='none',\n cmap=plt.get_cmap('RdBu'),\n vmin=-np.pi,\n vmax=np.pi\n )\n ax[i].set_title(\n '24-{0}, {1:.2f}'.format(\n ant+1,\n np.angle(\n np.mean(\n visdata_corr[:, idx, :, 0]/visdata_corr[:, idx, :, 1]\n )\n )\n )\n )\n fig.suptitle('{0} {1}'.format(date, calname))\n if outname is not None:\n plt.savefig('{0}_beamformerweights.png'.format(outname))\n if not show:\n plt.close()\n\ndef plot_bandpass_phases(\n filenames, antennas, refant=24, outname=None, show=True,\n msdir='/mnt/data/dsa110/calibration/'\n):\n r\"\"\"Plot the bandpass phase observed over multiple calibrators.\n\n Parameters:\n -----------\n filenames : dict\n The details of the calibrator passes to plot.\n antennas : list\n The antenna names (as ints) to plot.\n refant : int\n The reference antenna to plot phases against.\n outname : str\n The base to use for the name of the png file the plot is saved to. The\n plot is saved to `outname`\\_phase.png if `outname` is not ``None``. If\n `outname` is set to ``None, the plot is not saved. Defaults `None``.\n show : boolean\n If set to ``False`` the plot is closed after being generated.\n \"\"\"\n nentries = 0\n for date in filenames.keys():\n nentries += len(filenames[date].keys())\n gains = [None]*nentries\n calnames = [None]*nentries\n gshape = None\n\n i = 0\n for date in filenames.keys():\n cals = filenames[date].keys()\n transit_times = [filenames[date][cal]['transit_time'] for cal in cals]\n transit_times, cals = zip(*sorted(zip(transit_times, cals)))\n for cal in cals:\n calnames[i] = cal\n msname = '{0}/{1}_{2}'.format(msdir, date, cal)\n if os.path.exists('{0}_{1}_bpcal'.format(msname, cal)):\n with table('{0}_{1}_bpcal'.format(msname, cal)) as tb:\n gains[i] = np.array(tb.CPARAM[:])\n gshape = gains[i].shape\n i += 1\n for i, gain in enumerate(gains):\n if gain is None:\n gains[i] = np.zeros(gshape, dtype=np.complex64)\n gains = np.array(gains)\n nx = 4\n ny = len(antennas)//nx\n if len(antennas)%nx > 0:\n ny += 1\n _, ax = plt.subplots(\n ny,\n nx,\n figsize=(3*nx, 3*ny),\n sharex=True,\n sharey=True\n )\n ax[0, 0].set_yticks(np.arange(nentries))\n for axi in ax[0, :]:\n axi.set_yticklabels(calnames)\n ax = ax.flatten()\n for i in np.arange(len(antennas)):\n ax[i].imshow(\n np.angle(gains[:, antennas[i]-1, :, 0]/gains[:, refant-1, :, 0]),\n vmin=-np.pi,\n vmax=np.pi,\n aspect='auto',\n origin='lower',\n interpolation='None',\n cmap=plt.get_cmap('RdBu')\n )\n ax[i].annotate(\n '{0}'.format(antennas[i]),\n (0, 1),\n xycoords='axes fraction'\n )\n ax[i].set_xlabel('Frequency channel')\n if outname is not None:\n plt.savefig('{0}_phases.png'.format(outname))\n if not show:\n plt.close()\n\ndef plot_beamformer_weights(\n beamformer_names,\n corrlist=np.arange(1, 16+1),\n antennas_to_plot=None,\n antennas=None,\n outname=None,\n pols=None,\n show=True,\n gaindir='/home/user/beamformer_weights/'\n):\n \"\"\"Plot beamformer weights from a number of beamformer solutions.\n\n Parameters\n ----------\n beamformer_names : list(str)\n The postfixes of the beamformer weight files to plot. Will open\n beamformer_weights_corr??_`beamformer_name`.dat for each item in\n beamformer_names.\n corrlist : list(int)\n The corrnode numbers.\n antennas_to_plot : list(int)\n The names of the antennas to plot. Defaults to `antennas`.\n antennas : list(int)\n The names of the antennas in the beamformer weight files. Defaults to\n list in dsautils.cnf\n outname : str\n The prefix of the file to save the plot to. If None, no figure is saved.\n pols : list(str)\n The order of the pols in the beamformer weight files.\n show : bool\n If False, the plot is closed after saving.\n gaindir : str\n The directory in which the beamformer weight files are saved.\n\n Returns\n -------\n ndarray\n The beamformer weights.\n \"\"\"\n if pols is None:\n pols = ['B', 'A']\n if antennas is None:\n antennas = np.array(list(CONF.get('corr')['antenna_order'].values()))\n assert len(antennas) == 64\n if antennas_to_plot is None:\n antennas_to_plot = antennas\n # Set shape of the figure\n nplots = 4\n nx = 5\n ny = len(antennas_to_plot)//nx\n if len(antennas_to_plot)%nx != 0:\n ny += 1\n gains = np.zeros(\n (len(beamformer_names), len(antennas), len(corrlist), 48, 2),\n dtype=np.complex\n )\n for i, beamformer_name in enumerate(beamformer_names):\n for corridx, corr in enumerate(corrlist):\n with open(\n '{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(\n gaindir,\n corr,\n beamformer_name\n ),\n 'rb'\n ) as f:\n data = np.fromfile(f, '<f4')\n temp = data[64:].reshape(64, 48, 2, 2)\n gains[i, :, corridx, :, :] = temp[..., 0]+1.0j*temp[..., 1]\n gains = gains.reshape(\n (len(beamformer_names), len(antennas), len(corrlist)*48, 2)\n )\n #ymax = np.nanmax(np.log10(np.abs(gains)))\n #ymin = np.nanmin(np.log10(np.abs(gains)))\n # Phase, polarization B\n _fig, ax = plt.subplots(\n nplots*ny,\n nx,\n figsize=(6*nx, 2.5*ny*nplots),\n sharex=True,\n sharey=False\n )\n for axi in ax[-1, :]:\n axi.set_xlabel('freq channel')\n for nplot in range(nplots):\n polidx = nplot%2\n angle = nplot//2\n axi = ax[ny*nplot:ny*(nplot+1), :]\n for axii in axi[:, 0]:\n axii.set_ylabel('phase (rad)' if angle else 'amplitude (arb)')\n axi = axi.flatten()\n for i, ant in enumerate(antennas_to_plot):\n for bnidx, beamformer_name in enumerate(beamformer_names):\n idx = np.where(antennas == ant)[0][0]\n axi[i].plot(\n np.angle(gains[bnidx, idx, :, polidx]) if angle else \\\n np.abs(gains[bnidx, idx, :, polidx]),\n alpha=0.4,\n ls='None',\n marker='.',\n label=beamformer_name\n )\n axi[i].set_title('{0} {1}: {2}'.format(\n ant, pols[polidx], 'phase' if angle else 'amp'\n ))\n if angle:\n axi[i].set_ylim(-np.pi, np.pi)\n #else:\n # axi[i].set_ylim(ymin, ymax)\n axi[0].legend()\n\n if outname is not None:\n plt.savefig('{0}_averagedweights.png'.format(outname))\n if not show:\n plt.close()\n return gains\n", "id": "11646000", "language": "Python", "matching_score": 6.244538307189941, "max_stars_count": 1, "path": "dsacalib/plotting.py" }, { "content": "\"\"\"\nDsacalib/MS_IO.PY\n\n<NAME>, <EMAIL>, 10/2019\n\nRoutines to interact with CASA measurement sets and calibration tables.\n\"\"\"\n\n# To do:\n# Replace to_deg w/ astropy versions\n\n# Always import scipy before importing casatools.\nimport shutil\nimport os\nimport glob\nimport traceback\n#from scipy.interpolate import interp1d\nimport numpy as np\n#from pkg_resources import resource_filename\nimport yaml\nimport scipy # pylint: disable=unused-import\nimport astropy.units as u\nimport astropy.constants as c\nimport casatools as cc\nfrom casatasks import importuvfits, virtualconcat\nfrom casacore.tables import addImagingColumns, table\nfrom pyuvdata import UVData\nfrom dsautils import dsa_store\nfrom dsautils import calstatus as cs\nimport dsautils.cnf as dsc\nfrom dsamfs.fringestopping import calc_uvw_blt\nfrom dsacalib import constants as ct\nimport dsacalib.utils as du\nfrom dsacalib.fringestopping import calc_uvw, amplitude_sky_model\nfrom antpos.utils import get_itrf # pylint: disable=wrong-import-order\nfrom astropy.utils import iers # pylint: disable=wrong-import-order\niers.conf.iers_auto_url_mirror = ct.IERS_TABLE\niers.conf.auto_max_age = None\nfrom astropy.time import Time # pylint: disable=wrong-import-position wrong-import-order\n\nde = dsa_store.DsaStore()\n\nCONF = dsc.Conf()\nCORR_PARAMS = CONF.get('corr')\nREFMJD = CONF.get('fringe')['refmjd']\n\ndef simulate_ms(ofile, tname, anum, xx, yy, zz, diam, mount, pos_obs, spwname,\n freq, deltafreq, freqresolution, nchannels, integrationtime,\n obstm, dt, source, stoptime, autocorr, fullpol):\n \"\"\"Simulates a measurement set with cross-correlations only.\n\n WARNING: Not simulating autocorrelations correctly regardless of inclusion\n of autocorr parameter.\n\n Parameters\n ----------\n ofile : str\n The full path to which the measurement set will be written.\n tname : str\n The telescope name.\n xx, yy, zz : arrays\n The X, Y and Z ITRF coordinates of the antennas, in meters.\n diam : float\n The dish diameter in meters.\n mount : str\n The mount type, e.g. 'alt-az'.\n pos_obs : CASA measure instance\n The location of the observatory.\n spwname : str\n The name of the spectral window, e.g. 'L-band'.\n freq : str\n The central frequency, as a CASA-recognized string, e.g. '1.4GHz'.\n deltafreq : str\n The size of each channel, as a CASA-recognized string, e.g. '1.24kHz'.\n freqresolution : str\n The frequency resolution, as a CASA-recognized string, e.g. '1.24kHz'.\n nchannels : int\n The number of frequency channels.\n integrationtime : str\n The subintegration time, i.e. the width of each time bin, e.g. '1.4s'.\n obstm : float\n The start time of the observation in MJD.\n dt : float\n The offset between the CASA start time and the true start time in days.\n source : dsacalib.utils.source instance\n The source observed (or the phase-center).\n stoptime : float\n The end time of the observation in MJD. DS: should be s?\n autocorr : boolean\n Set to ``True`` if the visibilities include autocorrelations, ``False``\n if the only include crosscorrelations.\n \"\"\"\n me = cc.measures()\n qa = cc.quanta()\n sm = cc.simulator()\n sm.open(ofile)\n sm.setconfig(\n telescopename=tname,\n x=xx,\n y=yy,\n z=zz,\n dishdiameter=diam,\n mount=mount,\n antname=anum,\n coordsystem='global',\n referencelocation=pos_obs\n )\n sm.setspwindow(\n spwname=spwname,\n freq=freq,\n deltafreq=deltafreq,\n freqresolution=freqresolution,\n nchannels=nchannels,\n stokes='XX XY YX YY' if fullpol else 'XX YY'\n )\n # TODO: use hourangle instead\n sm.settimes(\n integrationtime=integrationtime,\n usehourangle=False,\n referencetime=me.epoch('utc', qa.quantity(obstm-dt, 'd'))\n )\n sm.setfield(\n sourcename=source.name,\n sourcedirection=me.direction(\n source.epoch,\n qa.quantity(source.ra.to_value(u.rad), 'rad'),\n qa.quantity(source.dec.to_value(u.rad), 'rad')\n )\n )\n sm.setauto(autocorrwt=1.0 if autocorr else 0.0)\n sm.observe(source.name, spwname, starttime='0s', stoptime=stoptime)\n sm.close()\n\ndef convert_to_ms(source, vis, obstm, ofile, bname, antenna_order,\n tsamp=ct.TSAMP*ct.NINT, nint=1, antpos=None, model=None,\n dt=ct.CASA_TIME_OFFSET, dsa10=True):\n \"\"\" Writes visibilities to an ms.\n\n Uses the casa simulator tool to write the metadata to an ms, then uses the\n casa ms tool to replace the visibilities with the observed data.\n\n Parameters\n ----------\n source : source class instance\n The calibrator (or position) used for fringestopping.\n vis : ndarray\n The complex visibilities, dimensions (baseline, time, channel,\n polarization).\n obstm : float\n The start time of the observation in MJD.\n ofile : str\n The name for the created ms. Writes to `ofile`.ms.\n bname : list\n The list of baselines names in the form [[ant1, ant2],...].\n antenna_order: list\n The list of the antennas, in CASA ordering.\n tsamp : float\n The sampling time of the input visibilities in seconds. Defaults to\n the value `tsamp`*`nint` as defined in `dsacalib.constants`.\n nint : int\n The number of time bins to integrate by before saving to a measurement\n set. Defaults 1.\n antpos : str\n The full path to the text file containing ITRF antenna positions or the\n csv file containing the station positions in longitude and latitude.\n Defaults `dsacalib.constants.PKG_DATA_PATH`/antpos_ITRF.txt.\n model : ndarray\n The visibility model to write to the measurement set (and against which\n gain calibration will be done). Must have the same shape as the\n visibilities `vis`. If given a value of ``None``, an array of ones will\n be used as the model. Defaults ``None``.\n dt : float\n The offset between the CASA start time and the data start time in days.\n Defaults to the value of `casa_time_offset` in `dsacalib.constants`.\n dsa10 : boolean\n Set to ``True`` if the data are from the dsa10 correlator. Defaults\n ``True``.\n \"\"\"\n if antpos is None:\n antpos = '{0}/antpos_ITRF.txt'.format(ct.PKG_DATA_PATH)\n vis = vis.astype(np.complex128)\n if model is not None:\n model = model.astype(np.complex128)\n\n nant = len(antenna_order)\n\n me = cc.measures()\n\n # Observatory parameters\n tname = 'OVRO_MMA'\n diam = 4.5 # m\n obs = 'OVRO_MMA'\n mount = 'alt-az'\n pos_obs = me.observatory(obs)\n\n # Backend\n if dsa10:\n spwname = 'L_BAND'\n freq = '1.4871533196875GHz'\n deltafreq = '-0.244140625MHz'\n freqresolution = deltafreq\n else:\n spwname = 'L_BAND'\n freq = '1.28GHz'\n deltafreq = '40.6901041666667kHz'\n freqresolution = deltafreq\n (_, _, nchannels, npol) = vis.shape\n\n # Rebin visibilities\n integrationtime = '{0}s'.format(tsamp*nint)\n if nint != 1:\n npad = nint-vis.shape[1]%nint\n if npad == nint:\n npad = 0\n vis = np.nanmean(np.pad(vis, ((0, 0), (0, npad), (0, 0), (0, 0)),\n mode='constant',\n constant_values=(np.nan, )).reshape(\n vis.shape[0], -1, nint, vis.shape[2],\n vis.shape[3]), axis=2)\n if model is not None:\n model = np.nanmean(np.pad(model,\n ((0, 0), (0, npad), (0, 0), (0, 0)),\n mode='constant',\n constant_values=(np.nan, )).reshape(\n model.shape[0], -1, nint,\n model.shape[2], model.shape[3]),\n axis=2)\n stoptime = '{0}s'.format(vis.shape[1]*tsamp*nint)\n\n anum, xx, yy, zz = du.get_antpos_itrf(antpos)\n # Sort the antenna positions\n idx_order = sorted([int(a)-1 for a in antenna_order])\n anum = np.array(anum)[idx_order]\n xx = np.array(xx)\n yy = np.array(yy)\n zz = np.array(zz)\n xx = xx[idx_order]\n yy = yy[idx_order]\n zz = zz[idx_order]\n\n nints = np.zeros(nant, dtype=int)\n for i, an in enumerate(anum):\n nints[i] = np.sum(np.array(bname)[:, 0] == an)\n nints, anum, xx, yy, zz = zip(*sorted(zip(nints, anum, xx, yy, zz),\n reverse=True))\n\n # Check that the visibilities are ordered correctly by checking the order\n # of baselines in bname\n idx_order = []\n autocorr = bname[0][0] == bname[0][1]\n\n for i in range(nant):\n for j in range(i if autocorr else i+1, nant):\n idx_order += [bname.index([anum[i], anum[j]])]\n assert idx_order == list(np.arange(len(bname), dtype=int)), \\\n 'Visibilities not ordered by baseline'\n anum = [str(a) for a in anum]\n\n simulate_ms(\n '{0}.ms'.format(ofile), tname, anum, xx, yy, zz, diam, mount, pos_obs,\n spwname, freq, deltafreq, freqresolution, nchannels, integrationtime,\n obstm, dt, source, stoptime, autocorr, fullpol=False\n )\n\n # Check that the time is correct\n ms = cc.ms()\n ms.open('{0}.ms'.format(ofile))\n tstart_ms = ms.summary()['BeginTime']\n ms.close()\n\n print('autocorr :', autocorr)\n\n if np.abs(tstart_ms-obstm) > 1e-10:\n dt = dt+(tstart_ms-obstm)\n print('Updating casa time offset to {0}s'.format(\n dt*ct.SECONDS_PER_DAY))\n print('Rerunning simulator')\n simulate_ms(\n '{0}.ms'.format(ofile), tname, anum, xx, yy, zz, diam, mount,\n pos_obs, spwname, freq, deltafreq, freqresolution, nchannels,\n integrationtime, obstm, dt, source, stoptime, autocorr,\n fullpol=False\n )\n\n # Reopen the measurement set and write the observed visibilities\n ms = cc.ms()\n ms.open('{0}.ms'.format(ofile), nomodify=False)\n ms.selectinit(datadescid=0)\n\n rec = ms.getdata([\"data\"])\n # rec['data'] has shape [scan, channel, [time*baseline]]\n vis = vis.T.reshape((npol, nchannels, -1))\n rec['data'] = vis\n ms.putdata(rec)\n ms.close()\n\n ms = cc.ms()\n ms.open('{0}.ms'.format(ofile), nomodify=False)\n if model is None:\n model = np.ones(vis.shape, dtype=complex)\n else:\n model = model.T.reshape((npol, nchannels, -1))\n rec = ms.getdata([\"model_data\"])\n rec['model_data'] = model\n ms.putdata(rec)\n ms.close()\n\n # Check that the time is correct\n ms = cc.ms()\n ms.open('{0}.ms'.format(ofile))\n tstart_ms = ms.summary()['BeginTime']\n tstart_ms2 = ms.getdata('TIME')['time'][0]/ct.SECONDS_PER_DAY\n ms.close()\n\n assert np.abs(tstart_ms-(tstart_ms2-tsamp*nint/ct.SECONDS_PER_DAY/2)) \\\n < 1e-10, 'Data start time does not agree with MS start time'\n\n assert np.abs(tstart_ms - obstm) < 1e-10, \\\n 'Measurement set start time does not agree with input tstart'\n print('Visibilities writing to ms {0}.ms'.format(ofile))\n\ndef extract_vis_from_ms(msname, data='data', swapaxes=True):\n \"\"\"Extracts visibilities from a CASA measurement set.\n\n Parameters\n ----------\n msname : str\n The measurement set. Opens `msname`.ms\n data : str\n The visibilities to extract. Can be `data`, `model` or `corrected`.\n\n Returns\n -------\n vals : ndarray\n The visibilities, dimensions (baseline, time, spw, freq, pol).\n time : array\n The time of each integration in days.\n fobs : array\n The frequency of each channel in GHz.\n flags : ndarray\n Flags for the visibilities, same shape as vals. True if flagged.\n ant1, ant2 : array\n The antenna indices for each baselines in the visibilities.\n pt_dec : float\n The pointing declination of the array. (Note: Not the phase center, but\n the physical pointing of the antennas.)\n spw : array\n The spectral window indices.\n orig_shape : list\n The order of the first three axes in the ms.\n \"\"\"\n with table('{0}.ms'.format(msname)) as tb:\n ant1 = np.array(tb.ANTENNA1[:])\n ant2 = np.array(tb.ANTENNA2[:])\n vals = np.array(tb.getcol(data.upper())[:])\n flags = np.array(tb.FLAG[:])\n time = np.array(tb.TIME[:])\n spw = np.array(tb.DATA_DESC_ID[:])\n with table('{0}.ms/SPECTRAL_WINDOW'.format(msname)) as tb:\n fobs = (np.array(tb.col('CHAN_FREQ')[:])/1e9).reshape(-1)\n\n baseline = 2048*(ant1+1)+(ant2+1)+2**16\n\n time, vals, flags, ant1, ant2, spw, orig_shape = reshape_calibration_data(\n vals, flags, ant1, ant2, baseline, time, spw, swapaxes)\n\n with table('{0}.ms/FIELD'.format(msname)) as tb:\n pt_dec = tb.PHASE_DIR[:][0][0][1]\n\n return vals, time/ct.SECONDS_PER_DAY, fobs, flags, ant1, ant2, pt_dec, \\\n spw, orig_shape\n\ndef read_caltable(tablename, cparam=False, reshape=True):\n \"\"\"Requires that each spw has the same number of frequency channels.\n\n Parameters\n ----------\n tablename : str\n The full path to the calibration table.\n cparam : bool\n If True, reads the column CPARAM in the calibrtion table. Otherwise\n reads FPARAM.\n\n Returns\n -------\n vals : ndarray\n The visibilities, dimensions (baseline, time, spw, freq, pol).\n time : array\n The time of each integration in days.\n flags : ndarray\n Flags for the visibilities, same shape as vals. True if flagged.\n ant1, ant2 : array\n The antenna indices for each baselines in the visibilities.\n \"\"\"\n with table(tablename) as tb:\n try:\n spw = np.array(tb.SPECTRAL_WINDOW_ID[:])\n except AttributeError:\n spw = np.array([0])\n time = np.array(tb.TIME[:])\n if cparam:\n vals = np.array(tb.CPARAM[:])\n else:\n vals = np.array(tb.FPARAM[:])\n flags = np.array(tb.FLAG[:])\n ant1 = np.array(tb.ANTENNA1[:])\n ant2 = np.array(tb.ANTENNA2[:])\n baseline = 2048*(ant1+1)+(ant2+1)+2**16\n\n if reshape:\n time, vals, flags, ant1, ant2, _, _ = reshape_calibration_data(\n vals, flags, ant1, ant2, baseline, time, spw)\n\n return vals, time/ct.SECONDS_PER_DAY, flags, ant1, ant2\n\ndef reshape_calibration_data(\n vals, flags, ant1, ant2, baseline, time, spw, swapaxes=True\n):\n \"\"\"Reshape calibration or measurement set data.\n\n Reshapes the 0th axis of the input data `vals` and `flags` from a\n combined (baseline-time-spw) axis into 3 axes (baseline, time, spw).\n\n Parameters\n ----------\n vals : ndarray\n The input values, shape (baseline-time-spw, freq, pol).\n flags : ndarray\n Flag array, same shape as vals.\n ant1, ant2 : array\n The antennas in the baseline, same length as the 0th axis of `vals`.\n baseline : array\n The baseline index, same length as the 0th axis of `vals`.\n time : array\n The time of each integration, same length as the 0th axis of `vals`.\n spw : array\n The spectral window index of each integration, same length as the 0th\n axis of `vals`.\n\n Returns\n -------\n time : array\n Unique times, same length as the time axis of the output `vals`.\n vals, flags : ndarray\n The reshaped input arrays, dimensions (baseline, time, spw, freq, pol)\n ant1, ant2 : array\n ant1 and ant2 for unique baselines, same length as the baseline axis of\n the output `vals`.\n orig_shape : list\n The original order of the time, baseline and spw axes in the ms.\n \"\"\"\n if len(np.unique(ant1))==len(np.unique(ant2)):\n nbl = len(np.unique(baseline))\n else:\n nbl = max([len(np.unique(ant1)), len(np.unique(ant2))])\n nspw = len(np.unique(spw))\n ntime = len(time)//nbl//nspw\n nfreq = vals.shape[-2]\n npol = vals.shape[-1]\n if np.all(baseline[:ntime*nspw] == baseline[0]):\n if np.all(time[:nspw] == time[0]):\n orig_shape = ['baseline', 'time', 'spw']\n # baseline, time, spw\n time = time.reshape(nbl, ntime, nspw)[0, :, 0]\n vals = vals.reshape(nbl, ntime, nspw, nfreq, npol)\n flags = flags.reshape(nbl, ntime, nspw, nfreq, npol)\n ant1 = ant1.reshape(nbl, ntime, nspw)[:, 0, 0]\n ant2 = ant2.reshape(nbl, ntime, nspw)[:, 0, 0]\n spw = spw.reshape(nbl, ntime, nspw)[0, 0, :]\n else:\n # baseline, spw, time\n orig_shape = ['baseline', 'spw', 'time']\n assert np.all(spw[:ntime] == spw[0])\n time = time.reshape(nbl, nspw, ntime)[0, 0, :]\n vals = vals.reshape(nbl, nspw, ntime, nfreq, npol)\n flags = flags.reshape(nbl, nspw, ntime, nfreq, npol)\n if swapaxes:\n vals = vals.swapaxes(1, 2)\n flags = flags.swapaxes(1, 2)\n ant1 = ant1.reshape(nbl, nspw, ntime)[:, 0, 0]\n ant2 = ant2.reshape(nbl, nspw, ntime)[:, 0, 0]\n spw = spw.reshape(nbl, nspw, ntime)[0, :, 0]\n elif np.all(time[:nspw*nbl] == time[0]):\n if np.all(baseline[:nspw] == baseline[0]):\n # time, baseline, spw\n orig_shape = ['time', 'baseline', 'spw']\n time = time.reshape(ntime, nbl, nspw)[:, 0, 0]\n vals = vals.reshape(ntime, nbl, nspw, nfreq, npol)\n flags = flags.reshape(ntime, nbl, nspw, nfreq, npol)\n if swapaxes:\n vals = vals.swapaxes(0, 1)\n flags = flags.swapaxes(0, 1)\n ant1 = ant1.reshape(ntime, nbl, nspw)[0, :, 0]\n ant2 = ant2.reshape(ntime, nbl, nspw)[0, :, 0]\n spw = spw.reshape(ntime, nbl, nspw)[0, 0, :]\n else:\n orig_shape = ['time', 'spw', 'baseline']\n assert np.all(spw[:nbl] == spw[0])\n time = time.reshape(ntime, nspw, nbl)[:, 0, 0]\n vals = vals.reshape(ntime, nspw, nbl, nfreq, npol)\n flags = flags.reshape(ntime, nspw, nbl, nfreq, npol)\n if swapaxes:\n vals = vals.swapaxes(1, 2).swapaxes(0, 1)\n flags = flags.swapaxes(1, 2).swapaxes(0, 1)\n ant1 = ant1.reshape(ntime, nspw, nbl)[0, 0, :]\n ant2 = ant2.reshape(ntime, nspw, nbl)[0, 0, :]\n spw = spw.reshape(ntime, nspw, nbl)[0, :, 0]\n else:\n assert np.all(spw[:nbl*ntime] == spw[0])\n if np.all(baseline[:ntime] == baseline[0]):\n # spw, baseline, time\n orig_shape = ['spw', 'baseline', 'time']\n time = time.reshape(nspw, nbl, ntime)[0, 0, :]\n vals = vals.reshape(nspw, nbl, ntime, nfreq, npol)\n flags = flags.reshape(nspw, nbl, ntime, nfreq, npol)\n if swapaxes:\n vals = vals.swapaxes(0, 1).swapaxes(1, 2)\n flags = flags.swapaxes(0, 1).swapaxes(1, 2)\n ant1 = ant1.reshape(nspw, nbl, ntime)[0, :, 0]\n ant2 = ant2.reshape(nspw, nbl, ntime)[0, :, 0]\n spw = spw.reshape(nspw, nbl, ntime)[:, 0, 0]\n else:\n assert np.all(time[:nbl] == time[0])\n # spw, time, bl\n orig_shape = ['spw', 'time', 'baseline']\n time = time.reshape(nspw, ntime, nbl)[0, :, 0]\n vals = vals.reshape(nspw, ntime, nbl, nfreq, npol)\n flags = flags.reshape(nspw, ntime, nbl, nfreq, npol)\n if swapaxes:\n vals = vals.swapaxes(0, 2)\n flags = flags.swapaxes(0, 2)\n ant1 = ant1.reshape(nspw, ntime, nbl)[0, 0, :]\n ant2 = ant2.reshape(nspw, ntime, nbl)[0, 0, :]\n spw = spw.reshape(nspw, ntime, nbl)[:, 0, 0]\n return time, vals, flags, ant1, ant2, spw, orig_shape\n\ndef caltable_to_etcd(\n msname, calname, caltime, status, pols=None, logger=None\n):\n r\"\"\"Copies calibration values from delay and gain tables to etcd.\n\n The dictionary passed to etcd should look like: {\"ant_num\": <i>,\n \"time\": <d>, \"pol\", [<s>, <s>], \"gainamp\": [<d>, <d>],\n \"gainphase\": [<d>, <d>], \"delay\": [<i>, <i>], \"calsource\": <s>,\n \"gaincaltime_offset\": <d>, \"delaycaltime_offset\": <d>, 'sim': <b>,\n 'status': <i>}\n\n Parameters\n ----------\n msname : str\n The measurement set name, will use solutions created from the\n measurement set `msname`.ms.\n calname : str\n The calibrator name. Will open the calibration tables\n `msname`\\_`calname`\\_kcal and `msname`\\_`calname`\\_gcal_ant.\n caltime : float\n The time of calibration transit in mjd.\n status : int\n The status of the calibration. Decode with dsautils.calstatus.\n pols : list\n The names of the polarizations. If ``None``, will be set to\n ``['B', 'A']``. Defaults ``None``.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n \"\"\"\n if pols is None:\n pols = ['B', 'A']\n\n try:\n # Complex gains for each antenna.\n amps, tamp, flags, ant1, ant2 = read_caltable(\n '{0}_{1}_gacal'.format(msname, calname),\n cparam=True\n )\n mask = np.ones(flags.shape)\n mask[flags == 1] = np.nan\n amps = amps*mask\n\n phase, _tphase, flags, ant1, ant2 = read_caltable(\n '{0}_{1}_gpcal'.format(msname, calname),\n cparam=True\n )\n mask = np.ones(flags.shape)\n mask[flags == 1] = np.nan\n phase = phase*mask\n\n if np.all(ant2 == ant2[0]):\n antenna_order_amps = ant1\n if not np.all(ant2 == ant2[0]):\n idxs = np.where(ant1 == ant2)[0]\n tamp = tamp[idxs]\n amps = amps[idxs, ...]\n antenna_order_amps = ant1[idxs]\n\n # Check the output shapes.\n print(tamp.shape, amps.shape)\n assert amps.shape[0] == len(antenna_order_amps)\n assert amps.shape[1] == tamp.shape[0]\n assert amps.shape[2] == 1\n assert amps.shape[3] == 1\n assert amps.shape[4] == len(pols)\n\n amps = np.nanmedian(\n amps.squeeze(axis=2).squeeze(axis=2),\n axis=1\n )*np.nanmedian(\n phase.squeeze(axis=2).squeeze(axis=2),\n axis=1\n )\n tamp = np.median(tamp)\n gaincaltime_offset = (tamp-caltime)*ct.SECONDS_PER_DAY\n\n except Exception as exc:\n tamp = np.nan\n amps = np.ones((0, len(pols)))*np.nan\n gaincaltime_offset = 0.\n antenna_order_amps = np.zeros(0, dtype=np.int)\n status = cs.update(\n status,\n cs.GAIN_TBL_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_GAINCALTIME\n )\n du.exception_logger(logger, 'caltable_to_etcd', exc, throw=False)\n\n # Delays for each antenna.\n try:\n delays, tdel, flags, antenna_order_delays, ant2 = read_caltable(\n '{0}_{1}_kcal'.format(msname, calname), cparam=False)\n mask = np.ones(flags.shape)\n mask[flags == 1] = np.nan\n delays = delays*mask\n\n # Check the output shapes.\n assert delays.shape[0] == len(antenna_order_delays)\n assert delays.shape[1] == tdel.shape[0]\n assert delays.shape[2] == 1\n assert delays.shape[3] == 1\n assert delays.shape[4] == len(pols)\n\n delays = np.nanmedian(delays.squeeze(axis=2).squeeze(axis=2), axis=1)\n tdel = np.median(tdel)\n delaycaltime_offset = (tdel-caltime)*ct.SECONDS_PER_DAY\n\n except Exception as exc:\n tdel = np.nan\n delays = np.ones((0, len(pols)))*np.nan\n delaycaltime_offset = 0.\n status = cs.update(\n status,\n cs.DELAY_TBL_ERR |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_DELAYCALTIME\n )\n antenna_order_delays = np.zeros(0, dtype=np.int)\n du.exception_logger(logger, 'caltable_to_etcd', exc, throw=False)\n\n antenna_order = np.unique(\n np.array([antenna_order_amps,\n antenna_order_delays])\n )\n\n for antnum in antenna_order:\n\n # Everything needs to be cast properly.\n gainamp = []\n gainphase = []\n ant_delay = []\n\n if antnum in antenna_order_amps:\n idx = np.where(antenna_order_amps == antnum)[0][0]\n for amp in amps[idx, :]:\n if not np.isnan(amp):\n gainamp += [float(np.abs(amp))]\n gainphase += [float(np.angle(amp))]\n else:\n gainamp += [None]\n gainphase += [None]\n else:\n gainamp = [None]*len(pols)\n gainphase = [None]*len(pols)\n\n if antnum in antenna_order_delays:\n idx = np.where(antenna_order_delays == antnum)[0][0]\n for delay in delays[idx, :]:\n if not np.isnan(delay):\n ant_delay += [int(np.rint(delay))]\n else:\n ant_delay += [None]\n else:\n ant_delay = [None]*len(pols)\n\n dd = {\n 'ant_num': int(antnum+1),\n 'time': float(caltime),\n 'pol': pols,\n 'gainamp': gainamp,\n 'gainphase': gainphase,\n 'delay': ant_delay,\n 'calsource': calname,\n 'gaincaltime_offset': float(gaincaltime_offset),\n 'delaycaltime_offset': float(delaycaltime_offset),\n 'sim': False,\n 'status':status\n }\n required_keys = dict({\n 'ant_num': [cs.INV_ANTNUM, 0],\n 'time': [cs.INV_DELAYCALTIME, 0.],\n 'pol': [cs.INV_POL, ['B', 'A']],\n 'calsource': [cs.INV_CALSOURCE, 'Unknown'],\n 'sim': [cs.INV_SIM, False],\n 'status': [cs.UNKNOWN_ERR, 0]\n })\n for key, value in required_keys.items():\n if dd[key] is None:\n print('caltable_to_etcd: key {0} must not be None to write to '\n 'etcd'.format(key))\n status = cs.update(status, value[0])\n dd[key] = value[1]\n\n for pol in dd['pol']:\n if pol is None:\n print('caltable_to_etcd: pol must not be None to write to '\n 'etcd')\n status = cs.update(status, cs.INV_POL)\n dd['pol'] = ['B', 'A']\n de.put_dict('/mon/cal/{0}'.format(antnum+1), dd)\n\ndef get_antenna_gains(gains, ant1, ant2, refant=0):\n \"\"\"Calculates antenna gains, g_i, from CASA table of G_ij=g_i g_j*.\n\n Currently does not support baseline-based gains.\n Refant only used for baseline-based case.\n\n Parameters\n ----------\n gains : ndarray\n The gains read in from the CASA gain table. 0th axis is baseline or\n antenna.\n ant1, ant2 : ndarray\n The antenna pair for each entry along the 0th axis in gains.\n refant : int\n The reference antenna index to use to get antenna gains from baseline\n gains.\n\n Returns\n -------\n antennas : ndarray\n The antenna indices.\n antenna_gains : ndarray\n Gains for each antenna in `antennas`.\n \"\"\"\n antennas = np.unique(np.concatenate((ant1, ant2)))\n output_shape = list(gains.shape)\n output_shape[0] = len(antennas)\n antenna_gains = np.zeros(tuple(output_shape), dtype=gains.dtype)\n if np.all(ant2 == ant2[0]):\n for i, ant in enumerate(antennas):\n antenna_gains[i] = 1/gains[ant1==ant]\n else:\n assert len(antennas) == 3, (\"Baseline-based only supported for trio of\"\n \"antennas\")\n for i, ant in enumerate(antennas):\n ant1idxs = np.where(ant1==ant)[0]\n ant2idxs = np.where(ant2==ant)[0]\n otheridx = np.where((ant1!=ant) & (ant2!=ant))[0][0]\n # phase\n sign = 1\n idx_phase = np.where((ant1==ant) & (ant2==refant))[0]\n if len(idx_phase) == 0:\n idx_phase = np.where((ant2==refant) & (ant1==ant))[0]\n assert len(idx_phase) == 1\n sign = -1\n # amplitude\n if len(ant1idxs) == 2:\n g01 = gains[ant1idxs[0]]\n g20 = np.conjugate(gains[ant1idxs[1]])\n if ant1[otheridx] == ant2[ant1idxs[1]]:\n g21 = gains[otheridx]\n else:\n g21 = np.conjugate(gains[otheridx])\n if len(ant1idxs) == 1:\n g01 = gains[ant1idxs[0]]\n g20 = gains[ant2idxs[0]]\n if ant1[otheridx] == ant1[ant2idxs[0]]:\n g21 = gains[otheridx]\n else:\n g21 = np.conjugate(gains[otheridx])\n else:\n g01 = np.conjugate(gains[ant2idxs[0]])\n g20 = gains[ant2idxs[1]]\n if ant1[otheridx] == ant1[ant2idxs[1]]:\n g21 = gains[otheridx]\n else:\n g21 = np.conjugate(gains[otheridx])\n antenna_gains[i] = (np.sqrt(np.abs(g01*g20/g21))*np.exp(\n sign*1.0j*np.angle(gains[idx_phase])))**(-1)\n return antennas, antenna_gains\n\ndef write_beamformer_weights(msname, calname, caltime, antennas, outdir,\n corr_list, antenna_flags, tol=0.3):\n \"\"\"Writes weights for the beamformer.\n\n Parameters\n ----------\n msname : str\n The prefix of the measurement set. Will open `msname`.ms\n calname : str\n The name of the calibrator source.\n antennas : list\n The names of the antennas to extract solutions for. Order must be the\n same as the order in the beamformer.\n outdir : str\n The directory to write the beamformer weights in.\n corr_list : list\n The indices of the correlator machines to write beamformer weights for.\n For now, these must be ordered so that the frequencies are contiguous\n and they are in the same order or the reverse order as in the ms. The\n bandwidth of each correlator is pulled from dsa110-meridian-fs package\n data.\n antenna_flags : ndarray(bool)\n Dimensions (antennas, pols). True where flagged, False otherwise.\n tol : float\n The fraction of data for a single antenna/pol flagged that the\n can be flagged in the beamformer. If more data than this is flagged as\n having bad solutions, the entire antenna/pol pair is flagged.\n\n Returns\n -------\n corr_list : list\n bu : array\n The length of the baselines in the u direction for each antenna\n relative to antenna 24.\n fweights : ndarray\n The frequencies corresponding to the beamformer weights, dimensions\n (correlator, frequency).\n filenames : list\n The names of the file containing the beamformer weights.\n \"\"\"\n # Get the frequencies we want to write solutions for.\n # corr_settings = resource_filename(\"dsamfs\", \"data/dsa_parameters.yaml\")\n # params = yaml.safe_load(fhand)\n ncorr = len(corr_list)\n weights = np.ones((ncorr, len(antennas), 48, 2), dtype=np.complex64)\n fweights = np.ones((ncorr, 48), dtype=np.float32)\n nchan = CORR_PARAMS['nchan']\n dfreq = CORR_PARAMS['bw_GHz']/nchan\n if CORR_PARAMS['chan_ascending']:\n fobs = CORR_PARAMS['f0_GHz']+np.arange(nchan)*dfreq\n else:\n fobs = CORR_PARAMS['f0_GHz']-np.arange(nchan)*dfreq\n nchan_spw = CORR_PARAMS['nchan_spw']\n for i, corr_id in enumerate(corr_list):\n ch0 = CORR_PARAMS['ch0']['corr{0:02d}'.format(corr_id)]\n fobs_corr = fobs[ch0:ch0+nchan_spw]\n fweights[i, :] = fobs_corr.reshape(\n fweights.shape[1],\n -1\n ).mean(axis=1)\n\n antpos_df = get_itrf(\n latlon_center=(ct.OVRO_LAT*u.rad, ct.OVRO_LON*u.rad, ct.OVRO_ALT*u.m)\n )\n blen = np.zeros((len(antennas), 3))\n for i, ant in enumerate(antennas):\n blen[i, 0] = antpos_df['x_m'].loc[ant]-antpos_df['x_m'].loc[24]\n blen[i, 1] = antpos_df['y_m'].loc[ant]-antpos_df['y_m'].loc[24]\n blen[i, 2] = antpos_df['z_m'].loc[ant]-antpos_df['z_m'].loc[24]\n bu, _, _ = calc_uvw(blen, 59000., 'HADEC', 0.*u.rad, 0.6*u.rad)\n bu = bu.squeeze().astype(np.float32)\n\n with table('{0}.ms/SPECTRAL_WINDOW'.format(msname)) as tb:\n fobs = np.array(tb.CHAN_FREQ[:])/1e9\n fobs = fobs.reshape(fweights.size, -1).mean(axis=1)\n f_reversed = not np.all(\n np.abs(fobs-fweights.ravel())/fweights.ravel() < 1e-5\n )\n if f_reversed:\n assert np.all(\n np.abs(fobs[::-1]-fweights.ravel())/fweights.ravel() < 1e-5\n )\n\n gains, _time, flags, ant1, ant2 = read_caltable(\n '{0}_{1}_gacal'.format(msname, calname), True)\n gains[flags] = np.nan\n gains = np.nanmean(gains, axis=1)\n phases, _, flags, ant1p, ant2p = read_caltable(\n '{0}_{1}_gpcal'.format(msname, calname), True)\n phases[flags] = np.nan\n phases = np.nanmean(phases, axis=1)\n assert np.all(ant1p == ant1)\n assert np.all(ant2p == ant2)\n gantenna, gains = get_antenna_gains(gains*phases, ant1, ant2)\n\n bgains, _, flags, ant1, ant2 = read_caltable(\n '{0}_{1}_bcal'.format(msname, calname), True)\n bgains[flags] = np.nan\n bgains = np.nanmean(bgains, axis=1)\n bantenna, bgains = get_antenna_gains(bgains, ant1, ant2)\n assert np.all(bantenna == gantenna)\n\n nantenna = gains.shape[0]\n npol = gains.shape[-1]\n\n gains = gains*bgains\n print(gains.shape)\n gains = gains.reshape(nantenna, -1, npol)\n if f_reversed:\n gains = gains[:, ::-1, :]\n gains = gains.reshape(nantenna, ncorr, -1, npol)\n nfint = gains.shape[2]//weights.shape[2]\n assert gains.shape[2]%weights.shape[2]==0\n\n gains = np.nanmean(\n gains.reshape(\n gains.shape[0], gains.shape[1], -1, nfint, gains.shape[3]\n ), axis=3\n )\n if not np.all(ant2==ant2[0]):\n idxs = np.where(ant1==ant2)\n gains = gains[idxs]\n ant1 = ant1[idxs]\n for i, antid in enumerate(ant1):\n if antid+1 in antennas:\n idx = np.where(antennas==antid+1)[0][0]\n weights[:, idx, ...] = gains[i, ...]\n\n fracflagged = np.sum(np.sum(np.isnan(weights), axis=2), axis=0)\\\n /(weights.shape[0]*weights.shape[2])\n antenna_flags_badsolns = fracflagged > tol\n weights[np.isnan(weights)] = 0.\n\n # Divide by the first non-flagged antenna\n idx0, idx1 = np.nonzero(\n np.logical_not(\n antenna_flags + antenna_flags_badsolns\n )\n )\n weights = (\n weights/weights[:, idx0[0], ..., idx1[0]][:, np.newaxis, :, np.newaxis]\n )\n weights[np.isnan(weights)] = 0.\n\n filenames = []\n for i, corr_idx in enumerate(corr_list):\n wcorr = weights[i, ...].view(np.float32).flatten()\n wcorr = np.concatenate([bu, wcorr], axis=0)\n fname = 'beamformer_weights_corr{0:02d}'.format(corr_idx)\n fname = '{0}_{1}_{2}'.format(\n fname,\n calname,\n caltime.isot\n )\n if os.path.exists('{0}/{1}.dat'.format(outdir, fname)):\n os.unlink('{0}/{1}.dat'.format(outdir, fname))\n with open('{0}/{1}.dat'.format(outdir, fname), 'wb') as f:\n f.write(bytes(wcorr))\n filenames += ['{0}.dat'.format(fname)]\n return corr_list, bu, fweights, filenames, antenna_flags_badsolns\n\ndef get_delays(antennas, msname, calname, applied_delays):\n r\"\"\"Returns the delays to be set in the correlator.\n\n Based on the calibrated delays and the currently applied delays.\n\n Parameters\n ----------\n antennas : list\n The antennas to get delays for.\n msname : str\n The path to the measurement set containing the calibrator pass.\n calname : str\n The name of the calibrator. Will open `msname`\\_`calname`\\_kcal.\n applied_delays : ndarray\n The currently applied delays for every antenna/polarization, in ns.\n Dimensions (antenna, pol).\n\n Returns\n -------\n delays : ndarray\n The delays to be applied for every antenna/polarization in ns.\n Dimensions (antenna, pol).\n flags : ndarray\n True if that antenna/pol data is flagged in the calibration table.\n In this case, the delay should be set to 0. Dimensions (antenna, pol).\n \"\"\"\n delays, _time, flags, ant1, _ant2 = read_caltable(\n '{0}_{1}_kcal'.format(msname, calname)\n )\n delays = delays.squeeze()\n flags = flags.squeeze()\n print('delays: {0}'.format(delays.shape))\n # delays[flags] = np.nan\n ant1 = list(ant1)\n idx = [ant1.index(ant-1) for ant in antennas]\n delays = delays[idx]\n flags = flags[idx]\n newdelays = applied_delays-delays\n newdelays = newdelays - np.nanmin(newdelays)\n newdelays = (np.rint(newdelays/2)*2)\n # delays[flags] = 0\n return newdelays.astype(np.int), flags\n\ndef write_beamformer_solutions(\n msname, calname, caltime, antennas, applied_delays,\n corr_list=np.arange(1, 17),\n outdir='/home/user/beamformer_weights/',\n flagged_antennas=None,\n pols=None\n):\n \"\"\"Writes beamformer solutions to disk.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set used for calibration.\n calname : str\n The name of the calibrator source used for calibration. Will open\n tables that start with `msname`_`calname`\n caltime : astropy.time.Time object\n The transit time of the calibrator.\n antennas : list\n The antenna names for which to write beamformer solutions, in order.\n applied_delays : ndarray\n The currently applied delays at the time of the calibration, in ns.\n Dimensions (antenna, pol). The antenna axis should be in the order\n specified by antennas.\n corr_list : list\n The indices of the correlator machines to write beamformer weights for.\n For now, these must be ordered so that the frequencies are contiguous\n and they are in the same order or the reverse order as in the ms. The\n bandwidth of each correlator is pulled from dsa110-meridian-fs package\n data.\n flagged_antennas : list\n A list of antennas to flag in the beamformer solutions. Should include\n polarizations. e.g. ['24 B', '32 A']\n outdir : str\n The directory to write the beamformer weights in.\n pols : list\n The order of the polarizations.\n\n Returns\n -------\n flags : ndarray(boolean)\n Dimensions (antennas, pols). True where the data is flagged, and should\n not be used. Compiled from the ms flags as well as `flagged_antennas`.\n \"\"\"\n if pols is None:\n pols = ['B', 'A']\n beamformer_flags = {}\n delays, flags = get_delays(antennas, msname, calname, applied_delays)\n print('delay flags:', flags.shape)\n if flagged_antennas is not None:\n for item in flagged_antennas:\n ant, pol = item.split(' ')\n flags[antennas==ant, pols==pol] = 1\n beamformer_flags['{0} {1}'.format(ant, pol)] = ['flagged by user']\n delays = delays-np.min(delays[~flags])\n while not np.all(delays[~flags] < 1024):\n if np.sum(delays[~flags] > 1024) < np.nansum(delays[~flags] < 1024):\n argflag = np.argmax(delays[~flags])\n else:\n argflag = np.argmin(delays[~flags])\n argflag = np.where(~flags.flatten())[0][argflag]\n flag_idxs = np.unravel_index(argflag, flags.shape)\n flags[np.unravel_index(argflag, flags.shape)] = 1\n key = '{0} {1}'.format(antennas[flag_idxs[0]], pols[flag_idxs[1]])\n if key not in beamformer_flags.keys():\n beamformer_flags[key] = []\n beamformer_flags[key] += ['delay exceeds snap capabilities']\n delays = delays-np.min(delays[~flags])\n\n caltime.precision = 0\n corr_list, eastings, _fobs, weights_files, flags_badsolns = \\\n write_beamformer_weights(msname, calname, caltime, antennas, outdir,\n corr_list, flags)\n idxant, idxpol = np.nonzero(flags_badsolns)\n for i, ant in enumerate(idxant):\n key = '{0} {1}'.format(antennas[ant], pols[idxpol[i]])\n if key not in beamformer_flags.keys():\n beamformer_flags[key] = []\n beamformer_flags[key] += ['casa solutions flagged']\n\n calibration_dictionary = {\n 'cal_solutions':\n {\n 'source': calname,\n 'caltime': float(caltime.mjd),\n 'antenna_order': [int(ant) for ant in antennas],\n 'corr_order': [int(corr) for corr in corr_list],\n 'pol_order': ['B', 'A'],\n 'delays': [\n [\n int(delay[0]//2),\n int(delay[1]//2)\n ] for delay in delays\n ],\n 'eastings': [float(easting) for easting in eastings],\n 'weights_axis0': 'antenna',\n 'weights_axis1': 'frequency',\n 'weights_axis2': 'pol',\n 'weight_files': weights_files,\n 'flagged_antennas': beamformer_flags\n }\n }\n\n with open(\n '{0}/beamformer_weights_{1}_{2}.yaml'.format(\n outdir,\n calname,\n caltime.isot\n ),\n 'w'\n ) as file:\n yaml.dump(calibration_dictionary, file)\n return flags\n\ndef convert_calibrator_pass_to_ms(\n cal, date, files, duration, msdir='/mnt/data/dsa110/calibration/',\n hdf5dir='/mnt/data/dsa110/correlator/', antenna_list=None,\n logger=None\n):\n r\"\"\"Converts hdf5 files near a calibrator pass to a CASA ms.\n\n Parameters\n ----------\n cal : dsacalib.utils.src instance\n The calibrator source.\n date : str\n The date (to day precision) of the calibrator pass. e.g. '2020-10-06'.\n files : list\n The hdf5 filenames corresponding to the calibrator pass. These should\n be date strings to second precision.\n e.g. ['2020-10-06T12:35:04', '2020-10-06T12:50:04']\n One ms will be written per filename in `files`. If the length of\n `files` is greater than 1, the mss created will be virtualconcated into\n a single ms.\n duration : astropy quantity\n Amount of data to extract, unit minutes or equivalent.\n msdir : str\n The full path to the directory to place the measurement set in. The ms\n will be written to `msdir`/`date`\\_`cal.name`.ms\n hdf5dir : str\n The full path to the directory containing subdirectories with correlated\n hdf5 data.\n antenna_list : list\n The names of the antennas to include in the measurement set. Names should\n be strings. If not passed, all antennas in the hdf5 files are included.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n \"\"\"\n msname = '{0}/{1}_{2}'.format(msdir, date, cal.name)\n print('looking for files: {0}'.format(' '.join(files)))\n if len(files) == 1:\n try:\n reftime = Time(files[0])\n hdf5files = []\n for hdf5f in sorted(glob.glob(\n '{0}/corr??/{1}*.hdf5'.format(hdf5dir, files[0][:-3])\n )):\n filetime = Time(hdf5f[:-5].split('/')[-1])\n if abs(filetime-reftime) < 1*u.min:\n hdf5files += [hdf5f]\n assert len(hdf5files) < 17\n assert len(hdf5files) > 1\n print(f'found {len(hdf5files)} hdf5files for {files[0]}')\n uvh5_to_ms(\n hdf5files,\n msname,\n ra=cal.ra,\n dec=cal.dec,\n flux=cal.I,\n # dt=duration,\n antenna_list=antenna_list,\n logger=logger\n )\n message = 'Wrote {0}.ms'.format(msname)\n if logger is not None:\n logger.info(message)\n #else:\n print(message)\n except (ValueError, IndexError) as exception:\n message = 'No data for {0} transit on {1}. Error {2}. Traceback: {3}'.format(\n date,\n cal.name,\n type(exception).__name__,\n ''.join(\n traceback.format_tb(exception.__traceback__)\n )\n )\n if logger is not None:\n logger.info(message)\n #else:\n print(message)\n elif len(files) > 0:\n msnames = []\n for filename in files:\n print(filename)\n try:\n reftime = Time(filename)\n hdf5files = []\n for hdf5f in sorted(glob.glob(\n '{0}/corr??/{1}*.hdf5'.format(hdf5dir, filename[:-4])\n )):\n filetime = Time(hdf5f[:-5].split('/')[-1])\n if abs(filetime-reftime) < 1*u.min:\n hdf5files += [hdf5f]\n #assert len(hdf5files) < 17\n #assert len(hdf5files) > 1\n print(f'found {len(hdf5files)} hdf5files for {filename}')\n uvh5_to_ms(\n hdf5files,\n '{0}/{1}'.format(msdir, filename),\n ra=cal.ra,\n dec=cal.dec,\n flux=cal.I,\n # dt=duration,\n antenna_list=antenna_list,\n logger=logger\n )\n msnames += ['{0}/{1}'.format(msdir, filename)]\n except (ValueError, IndexError) as exception:\n message = 'No data for {0}. Error {1}. Traceback: {2}'.format(\n filename,\n type(exception).__name__,\n ''.join(\n traceback.format_tb(exception.__traceback__)\n )\n )\n if logger is not None:\n logger.info(message)\n #else:\n print(message)\n if os.path.exists('{0}.ms'.format(msname)):\n for root, _dirs, walkfiles in os.walk(\n '{0}.ms'.format(msname),\n topdown=False\n ):\n for name in walkfiles:\n os.unlink(os.path.join(root, name))\n shutil.rmtree('{0}.ms'.format(msname))\n if len(msnames) > 1:\n virtualconcat(\n ['{0}.ms'.format(msn) for msn in msnames],\n '{0}.ms'.format(msname))\n message = 'Wrote {0}.ms'.format(msname)\n if logger is not None:\n logger.info(message)\n #else:\n print(message)\n elif len(msnames) == 1:\n os.rename('{0}.ms'.format(msnames[0]), '{0}.ms'.format(msname))\n message = 'Wrote {0}.ms'.format(msname)\n if logger is not None:\n logger.info(message)\n #else:\n print(message)\n else:\n message = 'No data for {0} transit on {1}'.format(date, cal.name)\n if logger is not None:\n logger.info(message)\n #else:\n print(message)\n else:\n message = 'No data for {0} transit on {1}'.format(date, cal.name)\n if logger is not None:\n logger.info(message)\n #else:\n print(message)\n\ndef uvh5_to_ms(fname, msname, ra=None, dec=None, dt=None, antenna_list=None,\n flux=None, fringestop=True, logger=None, refmjd=REFMJD):\n \"\"\"\n Converts a uvh5 data to a uvfits file.\n\n Parameters\n ----------\n fname : str\n The full path to the uvh5 data file.\n msname : str\n The name of the ms to write. Data will be written to `msname`.ms\n ra : astropy quantity\n The RA at which to phase the data. If None, will phase at the meridian\n of the center of the uvh5 file.\n dec : astropy quantity\n The DEC at which to phase the data. If None, will phase at the pointing\n declination.\n dt : astropy quantity\n Duration of data to extract. Default is to extract the entire file.\n antenna_list : list\n Antennas for which to extract visibilities from the uvh5 file. Default\n is to extract all visibilities in the uvh5 file.\n flux : float\n The flux of the calibrator in Jy. If included, will write a model of\n the primary beam response to the calibrator source to the model column\n of the ms. If not included, a model of a constant response over\n frequency and time will be written instead of the primary beam model.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n refmjd : float\n The mjd used in the fringestopper.\n \"\"\"\n print(fname)\n # zenith_dec = 0.6503903199825691*u.rad\n UV = UVData()\n\n # Read in the data\n if antenna_list is not None:\n UV.read(fname, file_type='uvh5', antenna_names=antenna_list,\n run_check_acceptability=False, strict_uvw_antpos_check=False)\n else:\n UV.read(fname, file_type='uvh5', run_check_acceptability=False,\n strict_uvw_antpos_check=False)\n time = Time(UV.time_array, format='jd')\n pt_dec = UV.extra_keywords['phase_center_dec']*u.rad\n pointing = du.direction(\n 'HADEC',\n 0.,\n pt_dec.to_value(u.rad),\n np.mean(time.mjd)\n )\n lamb = c.c/(UV.freq_array*u.Hz)\n if ra is None:\n ra = pointing.J2000()[0]*u.rad\n if dec is None:\n dec = pointing.J2000()[1]*u.rad\n\n if dt is not None:\n extract_times(UV, ra, dt)\n time = Time(UV.time_array, format='jd')\n\n # Set antenna positions\n # This should already be done by the writer but for some reason they\n # are being converted to ICRS\n df_itrf = get_itrf(\n latlon_center=(ct.OVRO_LAT*u.rad, ct.OVRO_LON*u.rad, ct.OVRO_ALT*u.m)\n )\n if len(df_itrf['x_m']) != UV.antenna_positions.shape[0]:\n message = 'Mismatch between antennas in current environment ({0}) and correlator environment ({1}) for file {2}'.format(\n len(df_itrf['x_m']),\n UV.antenna_positions.shape[0],\n fname\n )\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n UV.antenna_positions[:len(df_itrf['x_m'])] = np.array([\n df_itrf['x_m'],\n df_itrf['y_m'],\n df_itrf['z_m']\n ]).T-UV.telescope_location\n antenna_positions = UV.antenna_positions + UV.telescope_location\n blen = np.zeros((UV.Nbls, 3))\n for i, ant1 in enumerate(UV.ant_1_array[:UV.Nbls]):\n ant2 = UV.ant_2_array[i]\n blen[i, ...] = UV.antenna_positions[ant2, :] - \\\n UV.antenna_positions[ant1, :]\n\n uvw_m = calc_uvw_blt(\n blen,\n np.ones(UV.Nbls)*refmjd,\n 'HADEC',\n np.zeros(UV.Nbls)*u.rad,\n np.ones(UV.Nbls)*pt_dec\n )\n\n # Use antenna positions since CASA uvws are slightly off from pyuvdatas\n # uvw_z = calc_uvw_blt(blen, time[:UV.Nbls].mjd, 'HADEC',\n # np.zeros(UV.Nbls)*u.rad, np.ones(UV.Nbls)*zenith_dec)\n # dw = (uvw_z[:, -1] - uvw_m[:, -1])*u.m\n # phase_model = np.exp((2j*np.pi/lamb*dw[:, np.newaxis, np.newaxis])\n # .to_value(u.dimensionless_unscaled))\n # UV.uvw_array = np.tile(uvw_z[np.newaxis, :, :], (UV.Ntimes, 1, 1)\n # ).reshape(-1, 3)\n # UV.data_array = (UV.data_array.reshape(\n # UV.Ntimes, UV.Nbls, UV.Nspws,UV.Nfreqs, UV.Npols\n # )/phase_model[np.newaxis, ..., np.newaxis]).reshape(\n # UV.Nblts, UV.Nspws, UV.Nfreqs, UV.Npols\n # )\n # UV.phase(ra.to_value(u.rad), dec.to_value(u.rad), use_ant_pos=True)\n # Below is the manual calibration which can be used instead.\n # Currently using because casa uvws are more accurate than pyuvdatas, which\n # aren't true uvw coordinates.\n blen = np.tile(blen[np.newaxis, :, :], (UV.Ntimes, 1, 1)).reshape(-1, 3)\n uvw = calc_uvw_blt(blen, time.mjd, 'J2000', ra.to(u.rad), dec.to(u.rad))\n dw = (uvw[:, -1] - np.tile(uvw_m[np.newaxis, :, -1], (UV.Ntimes, 1)\n ).reshape(-1))*u.m\n phase_model = np.exp((2j*np.pi/lamb*dw[:, np.newaxis, np.newaxis])\n .to_value(u.dimensionless_unscaled))\n UV.uvw_array = uvw\n if fringestop:\n UV.data_array = UV.data_array/phase_model[..., np.newaxis]\n UV.phase_type = 'phased'\n UV.phase_center_dec = dec.to_value(u.rad)\n UV.phase_center_ra = ra.to_value(u.rad)\n UV.phase_center_epoch = 2000.\n # Look for missing channels\n freq = UV.freq_array.squeeze()\n # The channels may have been reordered by pyuvdata so check that the\n # parameter UV.channel_width makes sense now.\n ascending = np.median(np.diff(freq)) > 0\n if ascending:\n assert np.all(np.diff(freq) > 0)\n else:\n assert np.all(np.diff(freq) < 0)\n UV.freq_array = UV.freq_array[:, ::-1]\n UV.data_array = UV.data_array[:, :, ::-1, :]\n freq = UV.freq_array.squeeze()\n UV.channel_width = np.abs(UV.channel_width)\n # Are there missing channels?\n if not np.all(np.diff(freq)-UV.channel_width < 1e-5):\n # There are missing channels!\n nfreq = int(np.rint(np.abs(freq[-1]-freq[0])/UV.channel_width+1))\n freq_out = freq[0] + np.arange(nfreq)*UV.channel_width\n existing_idxs = np.rint((freq-freq[0])/UV.channel_width).astype(int)\n data_out = np.zeros((UV.Nblts, UV.Nspws, nfreq, UV.Npols),\n dtype=UV.data_array.dtype)\n nsample_out = np.zeros((UV.Nblts, UV.Nspws, nfreq, UV.Npols),\n dtype=UV.nsample_array.dtype)\n flag_out = np.zeros((UV.Nblts, UV.Nspws, nfreq, UV.Npols),\n dtype=UV.flag_array.dtype)\n data_out[:, :, existing_idxs, :] = UV.data_array\n nsample_out[:, :, existing_idxs, :] = UV.nsample_array\n flag_out[:, :, existing_idxs, :] = UV.flag_array\n # Now write everything\n UV.Nfreqs = nfreq\n UV.freq_array = freq_out[np.newaxis, :]\n UV.data_array = data_out\n UV.nsample_array = nsample_out\n UV.flag_array = flag_out\n\n if os.path.exists('{0}.fits'.format(msname)):\n os.remove('{0}.fits'.format(msname))\n\n UV.write_uvfits('{0}.fits'.format(msname),\n spoof_nonessential=True,\n run_check_acceptability=False,\n strict_uvw_antpos_check=False\n )\n # Get the model to write to the data\n if flux is not None:\n fobs = UV.freq_array.squeeze()/1e9\n lst = UV.lst_array\n model = amplitude_sky_model(du.src('cal', ra, dec, flux),\n lst, pt_dec, fobs)\n model = np.tile(model[:, :, np.newaxis], (1, 1, UV.Npols))\n else:\n model = np.ones((UV.Nblts, UV.Nfreqs, UV.Npols), dtype=np.complex64)\n\n if os.path.exists('{0}.ms'.format(msname)):\n shutil.rmtree('{0}.ms'.format(msname))\n importuvfits('{0}.fits'.format(msname),\n '{0}.ms'.format(msname))\n\n with table('{0}.ms/ANTENNA'.format(msname), readonly=False) as tb:\n tb.putcol('POSITION', antenna_positions)\n\n addImagingColumns('{0}.ms'.format(msname))\n #if flux is not None:\n with table('{0}.ms'.format(msname), readonly=False) as tb:\n tb.putcol('MODEL_DATA', model)\n tb.putcol('CORRECTED_DATA', tb.getcol('DATA')[:])\n\ndef extract_times(UV, ra, dt):\n \"\"\"Extracts data from specified times from an already open UVData instance.\n\n This is an alternative to opening the file with the times specified using\n pyuvdata.UVData.open().\n\n Parameters\n ----------\n UV : pyuvdata.UVData() instance\n The UVData instance from which to extract data. Modified in-place.\n ra : float\n The ra of the source around which to extract data, in radians.\n dt : astropy quantity\n The amount of data to extract, units seconds or equivalent.\n \"\"\"\n lst_min = (ra - (dt*2*np.pi*u.rad/(ct.SECONDS_PER_SIDEREAL_DAY*u.s))/2\n ).to_value(u.rad)%(2*np.pi)\n lst_max = (ra + (dt*2*np.pi*u.rad/(ct.SECONDS_PER_SIDEREAL_DAY*u.s))/2\n ).to_value(u.rad)%(2*np.pi)\n if lst_min < lst_max:\n idx_to_extract = np.where((UV.lst_array >= lst_min) &\n (UV.lst_array <= lst_max))[0]\n else:\n idx_to_extract = np.where((UV.lst_array >= lst_min) |\n (UV.lst_array <= lst_max))[0]\n if len(idx_to_extract) == 0:\n raise ValueError(\"No times in uvh5 file match requested timespan \"\n \"with duration {0} centered at RA {1}.\".format(\n dt, ra))\n idxmin = min(idx_to_extract)\n idxmax = max(idx_to_extract)+1\n assert (idxmax-idxmin)%UV.Nbls == 0\n UV.uvw_array = UV.uvw_array[idxmin:idxmax, ...]\n UV.data_array = UV.data_array[idxmin:idxmax, ...]\n UV.time_array = UV.time_array[idxmin:idxmax, ...]\n UV.lst_array = UV.lst_array[idxmin:idxmax, ...]\n UV.nsample_array = UV.nsample_array[idxmin:idxmax, ...]\n UV.flag_array = UV.flag_array[idxmin:idxmax, ...]\n UV.ant_1_array = UV.ant_1_array[idxmin:idxmax, ...]\n UV.ant_2_array = UV.ant_2_array[idxmin:idxmax, ...]\n UV.baseline_array = UV.baseline_array[idxmin:idxmax, ...]\n UV.integration_time = UV.integration_time[idxmin:idxmax, ...]\n UV.Nblts = int(idxmax-idxmin)\n assert UV.data_array.shape[0]==UV.Nblts\n UV.Ntimes = UV.Nblts//UV.Nbls\n\ndef average_beamformer_solutions(\n fnames, ttime, outdir, corridxs=None, tol=0.3, logger=None\n):\n \"\"\"Averages written beamformer solutions.\n\n Parameters\n ----------\n fnames : list\n ttime : astropy.time.Time object\n A time to use in the filename of the solutions, indicating when they\n were written or are useful. E.g. the transit time of the most recent\n source being averaged over.\n outdir : str\n The directory in which the beamformer solutions are written, and into\n which new solutions should be written.\n corridxs : list\n The correlator nodes for which to average beamformer solutions.\n Defaults to 1 through 16 inclusive.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n\n Returns\n -------\n written_files : list\n The names of the written beamformer solutions (one for each correlator\n node).\n antenna_flags_badsolns:\n Flags for antenna/polarization dimensions of gains.\n \"\"\"\n if corridxs is None:\n corridxs = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16\n ]\n gainshape = (64, 48, 2, 2)\n gains = np.ones(\n (len(fnames), len(corridxs), gainshape[0], gainshape[1],\n gainshape[2], gainshape[3]),\n dtype='<f4'\n )*np.nan\n antenna_flags = [None]*len(fnames)\n eastings = None\n for i, fname in enumerate(fnames):\n tmp_antflags = []\n filepath = '{0}/beamformer_weights_{1}.yaml'.format(outdir, fname)\n if os.path.exists(filepath):\n with open(filepath) as f:\n calibration_params = yaml.load(\n f, Loader=yaml.FullLoader\n )['cal_solutions']\n antenna_order = calibration_params['antenna_order']\n for key in calibration_params['flagged_antennas']:\n if 'casa solutions flagged' in \\\n calibration_params['flagged_antennas'][key]:\n antname = int(key.split(' ')[0])\n tmp_antflags.append(antenna_order.index(antname))\n antenna_flags[i] = sorted(tmp_antflags)\n\n for j, corr in enumerate(corridxs):\n if os.path.exists(\n '{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(\n outdir,\n corr,\n fname\n )\n ):\n with open(\n '{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(\n outdir,\n corr,\n fname\n ),\n 'rb'\n ) as f:\n data = np.fromfile(f, '<f4')\n eastings = data[:64]\n gains[i, j, ...] = data[64:].reshape(gainshape)\n if antenna_flags[i] is not None:\n gains[i, :, antenna_flags[i], ... ] = np.nan\n else:\n message = \\\n '{0} not found during beamformer weight averaging'.format(\n '{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(\n outdir,\n corr,\n fname\n ))\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n if antenna_flags[i] is not None:\n gains[i, :, antenna_flags[i], ... ] = np.nan\n\n gains = np.nanmean(gains, axis=0) #np.nanmedian(gains, axis=0)\n print(gains.shape) # corr, antenna, freq, pol, complex\n fracflagged = np.sum(np.sum(np.sum(\n np.isnan(gains),\n axis=4), axis=2), axis=0)\\\n /(gains.shape[0]*gains.shape[2]*gains.shape[4])\n antenna_flags_badsolns = fracflagged > tol\n gains[np.isnan(gains)] = 0.\n written_files = []\n if eastings is not None:\n for i, corr in enumerate(corridxs):\n fnameout = 'beamformer_weights_corr{0:02d}_{1}'.format(\n corr, ttime.isot\n )\n wcorr = gains[i, ...].flatten()\n wcorr = np.concatenate([eastings, wcorr], axis=0)\n with open('{0}/{1}.dat'.format(outdir, fnameout), 'wb') as f:\n f.write(bytes(wcorr))\n written_files += ['{0}.dat'.format(fnameout)]\n return written_files, antenna_flags_badsolns\n", "id": "6457294", "language": "Python", "matching_score": 9.216972351074219, "max_stars_count": 1, "path": "dsacalib/ms_io.py" }, { "content": "\"\"\"Calibration routine for DSA-110 calibration with CASA.\n\nAuthor: <NAME>, <EMAIL>, 2020/06\n\"\"\"\nimport shutil\nimport os\nimport glob\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nfrom astropy.coordinates import Angle\nimport pandas\nimport scipy # pylint: disable=unused-import\nfrom casacore.tables import table\nimport dsautils.calstatus as cs\nimport dsacalib.utils as du\nimport dsacalib.ms_io as dmsio\nimport dsacalib.fits_io as dfio\nimport dsacalib.calib as dc\nimport dsacalib.plotting as dp\nimport dsacalib.fringestopping as df\nimport dsacalib.constants as ct\nfrom dsacalib.ms_io import extract_vis_from_ms\nimport astropy.units as u # pylint: disable=wrong-import-order\nfrom astropy.utils import iers # pylint: disable=wrong-import-order\niers.conf.iers_auto_url_mirror = ct.IERS_TABLE\niers.conf.auto_max_age = None\nfrom astropy.time import Time # pylint: disable=wrong-import-position\n\ndef __init__():\n return\n\ndef _check_path(fname):\n \"\"\"Raises an AssertionError if the path `fname` does not exist.\n\n Parameters\n ----------\n fname : str\n The file to check existence of.\n \"\"\"\n assert os.path.exists(fname), 'File {0} does not exist'.format(fname)\n\ndef triple_antenna_cal(\n obs_params, ant_params, throw_exceptions=True, sefd=False, logger=None\n):\n r\"\"\"Calibrate visibilities from 3 antennas.\n\n Assumes visbilities are stored using dsa-10 or dsa-110 fits format.\n The caltable\\_to\\_etcd function should be able to handle this, but I haven't\n tested that yet.\n\n Parameters\n ----------\n obs_params : dict\n Observing parameters\n ant_params : dict\n show_plots : Boolean\n If set to ``True``, plots of the delay and gain calibration solutions\n will be shown. Defaults ``False``.\n throw_exception : Boolean\n If set to ``True``, exceptions will be thrown after being logged in\n syslog. If set to ``False``, the exceptions will not be thrown, but\n will still be logged in syslog. Defaults ``True``.\n sefd : Boolean\n If set to ``True``, enough data (60 minutes) will be included in the\n measurement set to calculate the off-source power (60 minutes) and the\n calibration solutions will be solved against a model of ones. If set to\n ``False``, only 10 minutes will be included in the measurement set and\n the calibration solutison will be solved against a sky model.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n\n Returns\n -------\n status : int\n The status code of the pipeline. Decode with dsautils.calstatus.\n caltime : float\n The meridian crossing time of the source in MJD. If the input file\n could not be opened, ``None`` will be returned.\n \"\"\"\n # TODO: Only keep one of the gain tables in the end, on a fine timescale.\n status = 0\n current_error = cs.UNKNOWN_ERR\n calstring = 'initialization'\n\n try:\n fname = obs_params['fname']\n msname = obs_params['msname']\n cal = obs_params['cal']\n utc_start = obs_params['utc_start']\n pt_dec = ant_params['pt_dec']\n antenna_order = ant_params['antenna_order']\n refant = ant_params['refant']\n antpos = ant_params['antpos']\n\n # Remove files that we will create so that things will fail if casa\n # doesn't write a table.\n casa_dirnames = [\n '{0}.ms'.format(msname),\n '{0}_{1}_kcal'.format(msname, cal.name),\n '{0}_{1}_2kcal'.format(msname, cal.name),\n '{0}_{1}_bcal'.format(msname, cal.name),\n '{0}_{1}_gpcal'.format(msname, cal.name),\n '{0}_{1}_gacal'.format(msname, cal.name),\n '{0}_{1}_gcal_ant'.format(msname, cal.name)\n ]\n for dirname in casa_dirnames:\n if os.path.exists(dirname):\n shutil.rmtree(dirname)\n\n calstring = 'opening visibility file'\n current_error = (\n cs.INFILE_ERR |\n cs.INV_ANTNUM |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n caldur = 60*u.min if sefd else 10*u.min\n fobs, blen, bname, _tstart, _tstop, tsamp, vis, mjd, lst, \\\n transit_idx, antenna_order = dfio.read_psrfits_file(\n fname,\n cal,\n antenna_order=antenna_order,\n autocorrs=True,\n dur=caldur,\n utc_start=utc_start,\n dsa10=False,\n antpos=antpos\n )\n caltime = mjd[transit_idx]\n\n calstring = 'read and verification of visibility file'\n current_error = (\n cs.CAL_MISSING_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n\n nt = vis.shape[1]\n assert nt > 0, \"calibrator not in file\"\n current_error = (\n cs.INFILE_FORMAT_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n\n nant = len(antenna_order)\n assert nant == 3, (\"triple_antenna_cal only works with a triplet of \"\n \"antennas\")\n assert int(refant) in antenna_order, (\"refant {0} not in \"\n \"visibilities\".format(refant))\n\n calstring = \"flagging of ms data\"\n current_error = (\n cs.FLAGGING_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n# maskf, _fraction_flagged = du.mask_bad_bins(\n# vis,\n# axis=2,\n# thresh=2.0,\n# # medfilt=True, # currently not supported\n# nmed=129\n# )\n# maskt, _fraction_flagged = du.mask_bad_bins(\n# vis,\n# axis=1,\n# thresh=2.0,\n# # medfilt=True, # currently not supported\n# nmed=129\n# )\n maskp, _fraction_flagged = du.mask_bad_pixels(\n vis,\n thresh=6.0,\n #mask=maskt*maskf\n )\n# mask = maskt*maskf*maskp\n# vis *= mask\n vis *= maskp\n\n calstring = 'fringestopping'\n current_error = (\n cs.FRINGES_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n df.fringestop(vis, blen, cal, mjd, fobs, pt_dec)\n\n calstring = 'writing to ms'\n current_error = (\n cs.MS_WRITE_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n amp_model = df.amplitude_sky_model(cal, lst, pt_dec, fobs)\n amp_model = np.tile(\n amp_model[np.newaxis, :, :, np.newaxis],\n (vis.shape[0], 1, 1, vis.shape[-1])\n )\n dmsio.convert_to_ms(\n cal,\n vis,\n mjd[0],\n '{0}'.format(msname),\n bname,\n antenna_order,\n tsamp,\n nint=25,\n antpos=antpos,\n dsa10=False,\n model=None if sefd else amp_model\n )\n _check_path('{0}.ms'.format(msname))\n\n calstring = 'flagging of ms data'\n current_error = (\n cs.FLAGGING_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n error = dc.flag_zeros(msname)\n if error > 0:\n status = cs.update(status, ['flagging_err'])\n message = \"Non-fatal error in zero flagging\"\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n if 8 in antenna_order:\n error = dc.flag_antenna(msname, '8', pol='A')\n if error > 0:\n status = cs.update(status, ['flagging_err'])\n message = \"Non-fatal error in antenna 8 flagging\"\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n\n # Antenna-based delay calibration\n calstring = 'delay calibration'\n current_error = (\n cs.DELAY_CAL_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n error = dc.delay_calibration(msname, cal.name, refants=[refant])\n if error > 0:\n status = cs.update(status, ['delay_cal_err'])\n message = 'Non-fatal error occured in delay calibration.'\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n _check_path('{0}_{1}_kcal'.format(msname, cal.name))\n\n calstring = 'flagging of ms data'\n current_error = (\n cs.FLAGGING_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_GAINCALTIME\n )\n if error > 0:\n status = cs.update(status, ['flagging_err'])\n message = 'Non-fatal error occured in calculation of delays on short timescales.'\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n if error > 0:\n status = cs.update(status, ['flagging_err'])\n message = 'Non-fatal error occured in flagging of bad timebins'\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n _check_path('{0}_{1}_2kcal'.format(msname, cal.name))\n calstring = 'baseline-based bandpass and gain calibration'\n current_error = (\n cs.GAIN_BP_CAL_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_GAINCALTIME\n )\n error = dc.calibrate_gain(\n msname,\n cal.name,\n '{0}_{1}'.format(msname, cal.name),\n refant,\n tga='inf',\n tgp='inf',\n blbased=True,\n combined=False\n )\n if error > 0:\n status = cs.update(status, ['gain_bp_cal_err'])\n message = 'Non-fatal error occured in gain/bandpass calibration.'\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n for fname in [\n '{0}_{1}_bcal'.format(msname, cal.name),\n '{0}_{1}_gpcal'.format(msname, cal.name),\n '{0}_{1}_gacal'.format(msname, cal.name)\n ]:\n _check_path(fname)\n calstring = 'calculation of antenna gains'\n gamp, _tamp, famp, _ant1, _ant2 = dmsio.read_caltable(\n '{0}_{1}_gacal'.format(msname, cal.name),\n cparam=True\n )\n gphase, _tphase, fphase, _ant1, _ant2 = dmsio.read_caltable(\n '{0}_{1}_gpcal'.format(msname, cal.name),\n cparam=True\n )\n gains = (gamp*gphase).squeeze(axis=2)\n flags = (famp*fphase).squeeze(axis=2)\n # This makes some assumptions about the bl order! Should add some\n # statements to make sure it's true\n gains, flags = dc.fill_antenna_gains(gains, flags)\n\n # These tables will contain the results on fine time-scales.\n gamp = np.abs(gains).astype(np.complex128)\n gamp = gamp.reshape(gamp.shape[0], -1)\n # tb = cc.table()\n with table(\n '{0}_{1}_gacal'.format(msname, cal.name),\n readonly=False\n ) as tb:\n shape = np.array(tb.CPARAM[:]).shape\n tb.putcol('CPARAM', gamp.reshape(shape))\n\n gphase = np.exp(1.j*np.angle(gains))\n with table(\n '{0}_{1}_gpcal'.format(msname, cal.name),\n readonly=False\n ) as tb:\n shape = np.array(tb.CPARAM[:]).shape\n tb.putcol('CPARAM', gphase.reshape(shape))\n\n if not sefd:\n # reduce to a single value to use\n mask = np.ones(flags.shape)\n mask[flags == 1] = np.nan\n gains = np.nanmedian(gains*mask, axis=1, keepdims=True)\n flags = np.min(flags, axis=1, keepdims=True)\n\n if 8 in antenna_order:\n flags[..., 0] = 1\n\n shutil.copytree(\n '{0}/template_gcal_ant'.format(ct.PKG_DATA_PATH),\n '{0}_{1}_gcal_ant'.format(msname, cal.name)\n )\n\n # Write out a new gains that is a single value.\n with table(\n '{0}_{1}_gcal_ant'.format(msname, cal.name),\n readonly=False\n ) as tb:\n tb.putcol('TIME', np.ones(6)*np.median(mjd)*ct.SECONDS_PER_DAY)\n tb.putcol('FLAG', flags.squeeze(axis=1))\n tb.putcol('CPARAM', gains.squeeze(axis=1))\n _check_path('{0}_{1}_gcal_ant'.format(msname, cal.name))\n\n except Exception as exc:\n status = cs.update(status, current_error)\n du.exception_logger(logger, calstring, exc, throw_exceptions)\n try:\n caltime\n except NameError:\n caltime = Time.now().mjd\n\n return status, caltime\n\ndef plot_solutions(\n msname, calname, figure_path, show_plots=False, logger=None\n):\n r\"\"\"Plots the antenna delay, gain and bandpass calibration solutions.\n\n Creates separate files for all solutions. To create one plot with all\n solutions, use plotting.summary_plot.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. Used to identify the calibration\n tables.\n calname : str\n The name of the calibrator. Used to identify the calibration tables.\n antenna_order : list\n The antenna names, in order.\n fobs : array\n The central frequency of each channel, in GHz.\n blbased : boolean\n True of the calibration was baseline-based.\n figure_dir : str\n The location to save the figures. Defaults ``./figures``.\n show_plots : boolean\n If False, plots are closed after being saved. Defaults False.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n \"\"\"\n try:\n _ = dp.plot_antenna_delays(\n msname,\n calname,\n outname=figure_path,\n show=show_plots\n )\n except RuntimeError:\n message = 'Plotting antenna delays failed for {0}'.format(\n msname\n )\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n try:\n _ = dp.plot_gain_calibration(\n msname,\n calname,\n outname=figure_path,\n show=show_plots\n )\n except RuntimeError:\n message = 'Plotting gain calibration solutions failed for {0}'.format(\n msname\n )\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n try:\n _ = dp.plot_bandpass(\n msname,\n calname,\n outname=figure_path,\n show=show_plots\n )\n except RuntimeError:\n message = \\\n 'Plotting bandpass calibration solutions failed for {0}'.format(\n msname\n )\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n\ndef calibration_head(obs_params, ant_params, write_to_etcd=False,\n throw_exceptions=None, sefd=False, logger=None):\n \"\"\"Controls calibrtion of a dsa10 or dsa110 dataset.\n\n After calibration, results are writen to etcd.\n\n Parameters\n ----------\n obs_params : list\n The observing parameters.\n ant_params : list\n The antenna configuration.\n write_to_etcd : boolean\n If set to ``True``, the results of the calibration are pushed to etcd.\n Defaults ``False``.\n throw_exceptions : boolean\n If set to ``False``, exceptions are not raised after being logged to\n syslog. Instead, `calibration_head` and `triple_antenna_cal` return the\n status value. If set to ``None``, `throw_exceptions` will be set to\n ``not write_to_etcd``.\n sefd : boolean\n If set to ``True``, the solutions will be solved against a model of\n ones in order to allow fitting of the source pass to the antenna gains\n and 60 minutes will be saved to the measurement set. If set to\n ``False``, a sky model will be used in calibration and only 10 minutes\n of data is saved to the measurement set.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n\n Returns\n -------\n int\n The status code. Decode with dsautils.calstatus.\n \"\"\"\n if throw_exceptions is None:\n throw_exceptions = not write_to_etcd\n message = 'Beginning calibration of ms {0}.ms (start time {1}) using source {2}'.format(\n obs_params['msname'],\n obs_params['utc_start'].isot,\n obs_params['cal'].name\n )\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n status, caltime = triple_antenna_cal(obs_params, ant_params,\n throw_exceptions, sefd, logger=logger)\n message = 'Ending calibration of ms {0}.ms (start time {1}) using source {2} with status {3}'.format(\n obs_params['msname'], obs_params['utc_start'].isot,\n obs_params['cal'].name, status\n )\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n print('Status: {0}'.format(cs.decode(status)))\n print('')\n if write_to_etcd:\n dmsio.caltable_to_etcd(\n obs_params['msname'], obs_params['cal'].name,\n ant_params['antenna_order'], caltime, status, logger=logger\n )\n return status\n\ndef _gauss_offset(xvals, amp, mean, sigma, offset):\n \"\"\"Calculates the value of a Gaussian at the locations `x`.\n\n Parameters\n ----------\n xvals : array\n The x values at which to evaluate the Gaussian.\n amp, mean, sigma, offset : float\n Define the Gaussian: amp * exp(-(x-mean)**2/(2 sigma**2)) + offset\n\n Returns\n -------\n array\n The values of the Gaussian function defined evaluated at xvals.\n \"\"\"\n return amp*np.exp(-(xvals-mean)**2/(2*sigma**2))+offset\n\ndef _gauss(xvals, amp, mean, sigma):\n \"\"\"Calculates the value of a Gaussian at the locations `x`.\n\n\n Parameters\n ----------\n xvals : array\n The x values at which to evaluate the Gaussian.\n amp, mean, sigma : float\n Define the Gaussian: amp * exp(-(x-mean)**2/(2 sigma**2))\n\n Returns\n -------\n array\n The values of the Gaussian function defined evaluated at xvals.\n \"\"\"\n return _gauss_offset(xvals, amp, mean, sigma, 0.)\n\ndef calculate_sefd(\n msname, cal, fmin=None, fmax=None, baseline_cal=False, showplots=False,\n msname_delaycal=None, calname_delaycal=None, halfpower=False, pols=None\n ):\n r\"\"\"Calculates the SEFD from a measurement set.\n\n The measurement set must have been calibrated against a model of ones and\n must include autocorrelations.\n\n Parameters\n ----------\n msname : str\n The measurement set name. The measurement set `msname`.ms will\n be opened.\n cal : src class instance\n The calibrator source. Will be used to identify the correct\n calibration tables. The table `msname`\\_`cal.name`\\_gacal will\n be opened.\n fmin : float\n The lowest frequency to consider when calculating the off-source power\n to use in the SEFD calculation, in GHz. Channels below this frequency\n will be flagged. Defaults 1.35.\n fmax : float\n The greatest frequency to consider when calculating the off-source\n power to use in the SEFD calculation, in GHz. Channels above this\n frequency will be flagged. Defaults 1.45.\n baseline_cal : Boolean\n Set to ``True`` if the gain tables were derived using baseline-based\n calibration. Set to ``False`` if the gain tables were derived using\n antenna-based calibration. Defaults ``True``.\n showplots : Boolean\n If set to ``True``, plots will be generated that show the Gaussian fits\n to the gains. Defaults ``False``.\n msname_delaycal : str\n The name of the measurement set from which delay solutions should be\n applied. Defaults to `msname`.\n calname_delaycal : str\n The name of the calibrator source from which delay solutions should be\n applied. Defaults to `calname`.\n halfpower : Boolean\n If True, will calculate the sefd using the half-power point instead of\n using the off-source power. Defaults False.\n pols : list\n The labels of the polarization axes. Defaults ['B', 'A'].\n\n Returns\n -------\n antenna_names : list\n The names of the antennas in their order in `sefds`.\n sefds : ndarray\n The SEFD of each antenna/polarization pair, in Jy. Dimensions (antenna,\n polarization).\n ant_gains : ndarray\n The antenna gains in 1/Jy. Dimensions (antenna, polarization).\n ant_transit_time : ndarray\n The meridian transit time of the source as seen by each antenna/\n polarization pair, in MJD. Dimensions (antenna, polarization).\n fref : float\n The reference frequency of the SEFD measurements in GHz.\n hwhms : float\n The hwhms of the calibrator transits in days.\n \"\"\"\n # Change so figures saved if showplots is False\n if pols is None:\n pols = ['B', 'A']\n if msname_delaycal is None:\n msname_delaycal = msname\n if calname_delaycal is None:\n calname_delaycal = cal.name\n npol = 2\n\n # Get the visibilities (for autocorrs)\n dc.apply_delay_bp_cal(msname, calname_delaycal, msnamecal=msname_delaycal,\n blbased=baseline_cal)\n vis, tvis, fvis, flag, ant1, ant2, pt_dec, _, _ = dmsio.extract_vis_from_ms(\n msname, 'CORRECTED_DATA')\n mask = (1-flag).astype(float)\n mask[mask < 0.5] = np.nan\n vis = vis*mask\n vis = vis[ant1 == ant2, ...]\n antenna_order = ant1[ant1 == ant2]\n nant = len(antenna_order)\n # Note that these are antenna idxs, not names\n\n # Open the gain files and read in the gains\n gain, time, flag, ant1, ant2 = dmsio.read_caltable(\n '{0}_{1}_2gcal'.format(msname, cal.name), cparam=True)\n gain[flag] = np.nan\n antenna, gain = dmsio.get_antenna_gains(gain, ant1, ant2)\n gain = 1/gain\n antenna = list(antenna)\n idxs = [antenna.index(ant) for ant in antenna_order]\n gain = gain[idxs, ...]\n assert gain.shape[0] == nant\n gain = np.abs(gain*np.conjugate(gain))\n gain = np.abs(np.nanmean(gain, axis=2)).squeeze(axis=2)\n idxl = np.searchsorted(fvis, fmin) if fmin is not None else 0\n idxr = np.searchsorted(fvis, fmax) if fmax is not None else vis.shape[-2]\n fref = np.median(fvis[idxl:idxr])\n\n if idxl < idxr:\n vis = vis[..., idxl:idxr, :]\n else:\n vis = vis[..., idxr:idxl, :]\n# imag_fraction = np.nanmean((vis.imag/vis.real).reshape(nant, -1),\n# axis=-1)\n# assert np.nanmax(np.abs(imag_fraction) < 1e-4), (\"Autocorrelations have \"\n# \"non-negligable imaginary \"\n# \"components.\")\n vis = np.abs(vis)\n\n # Complex gain includes an extra relative delay term\n # in the phase, but we really only want the amplitude\n # We will ignore the phase for now\n\n ant_gains_on = np.zeros((nant, npol))\n eant_gains_on = np.zeros((nant, npol))\n ant_transit_time = np.zeros((nant, npol))\n eant_transit_time = np.zeros((nant, npol))\n ant_transit_width = np.zeros((nant, npol))\n eant_transit_width = np.zeros((nant, npol))\n offbins_before = np.zeros((nant, npol), dtype=int)\n offbins_after = np.zeros((nant, npol), dtype=int)\n autocorr_gains_off = np.zeros((nant, npol))\n ant_gains = np.zeros((nant, npol))\n sefds = np.zeros((nant, npol))\n hwhms = np.zeros((nant, npol))\n expected_transit_time = (\n Time(time[0], format='mjd')\n -cal.direction.hadec(\n obstime=time[0]\n )[0]*ct.SECONDS_PER_SIDEREAL_DAY*u.s/(2*np.pi)\n ).mjd-time[0]\n max_flux = df.amplitude_sky_model(\n cal,\n cal.ra.to_value(u.rad),\n pt_dec,\n fref\n )\n\n if showplots:\n nx = 3\n ny = nant//nx\n if nant%nx != 0:\n ny += 1\n _fig, ax = plt.subplots(\n ny, nx, figsize=(8*nx, 8*ny), sharey=True\n )\n ccyc = plt.rcParams['axes.prop_cycle'].by_key()['color']\n ax = ax.flatten()\n\n # Fit a Gaussian to the gains\n for i in range(nant):\n for j in range(npol):\n if showplots:\n ax[i].plot(time-time[0], gain[i, :, j], '.', color=ccyc[j])\n initial_params = [np.max(gain[i, :, j]), expected_transit_time,\n 0.0035] #, 0]\n try:\n x = time-time[0]\n y = gain[i, :, j]\n idx = ~np.isnan(y)\n assert len(idx) >= 4\n params, cov = curve_fit(_gauss, x[idx], y[idx],\n p0=initial_params)\n except (RuntimeError, ValueError, AssertionError):\n params = initial_params.copy()\n cov = np.zeros((len(params), len(params)))\n\n ant_gains_on[i, j] = params[0]#+params[3]\n ant_gains[i, j] = ant_gains_on[i, j]/max_flux\n eant_gains_on[i, j] = np.sqrt(cov[0, 0])#+np.sqrt(cov[3, 3])\n\n ant_transit_time[i, j] = time[0]+params[1]\n eant_transit_time[i, j] = np.sqrt(cov[1, 1])\n ant_transit_width[i, j] = params[2]\n eant_transit_width[i, j] = np.sqrt(cov[2, 2])\n if not halfpower:\n offbins_before[i, j] = np.searchsorted(\n time, ant_transit_time[i, j]-ant_transit_width[i, j]*3)\n offbins_after[i, j] = len(time)-np.searchsorted(\n time, ant_transit_time[i, j]+ant_transit_width[i, j]*3)\n idxl = np.searchsorted(\n tvis, ant_transit_time[i, j]-ant_transit_width[i, j]*3)\n idxr = np.searchsorted(\n tvis, ant_transit_time[i, j]+ant_transit_width[i, j]*3)\n autocorr_gains_off[i, j] = np.nanmedian(\n np.concatenate(\n (vis[i, :idxl, :, j], vis[i, idxr:, :, j]), axis=0))\n sefds[i, j] = autocorr_gains_off[i, j]/ant_gains[i, j]\n else:\n hwhm = np.sqrt(2*np.log(2))*ant_transit_width[i, j]\n idxl = np.searchsorted(tvis, ant_transit_time[i, j]-hwhm)\n idxr = np.searchsorted(tvis, ant_transit_time[i, j]+hwhm)\n autocorr_gains_off[i, j] = np.nanmedian(\n np.concatenate(\n (vis[i, idxl-10:idxl+10, :, j],\n vis[i, idxr-10:idxr+10, :, j]), axis=0))\n sefds[i, j] = (\n autocorr_gains_off[i, j]/ant_gains[i, j]- max_flux/2\n )\n hwhms[i, j] = hwhm\n if showplots:\n ax[i].plot(\n time-time[0],\n _gauss(time-time[0], *params),\n '-',\n color=ccyc[j],\n label='{0} {1}: {2:.0f} Jy; {3:.03f} min'.format(\n antenna_order[i]+1,\n pols[j],\n sefds[i, j],\n (\n ant_transit_time[i, j]\n -time[0]\n -expected_transit_time\n )*ct.SECONDS_PER_DAY/60\n )\n )\n ax[i].legend()\n # ax[i].axvline(expected_transit_time, color='k')\n ax[i].set_xlabel(\"Time (d)\")\n ax[i].set_ylabel(\"Unnormalized power\")\n\n if showplots:\n max_gain = np.nanmax(ant_gains_on)\n ax[0].set_ylim(-0.1*max_gain, 1.1*max_gain)\n\n return antenna_order+1, sefds, ant_gains, ant_transit_time, fref, hwhms\n\ndef dsa10_cal(fname, msname, cal, pt_dec, antpos, refant, badants=None):\n \"\"\"Calibrate dsa10 data.\n\n Parameters\n ----------\n fname : str\n The fits file containing the correlated dsa10 data.\n msname : str\n The measurement set containing the correlated dsa10 data.\n cal : dsautils.src instance\n The calibrator source.\n pt_dec : float\n The pointing declination of the array in radians.\n antpos : str\n The path to the ITRF file containing the antenna positions.\n refant : str or int\n The reference antenna name (if str) or index (if int).\n badants : list(str)\n The naems of antennas that should be flagged before calibration.\n \"\"\"\n # TODO: get header information from the ms instead of the fits file.\n if badants is None:\n badants = []\n\n for file_path in ['{0}.ms'.format(msname),\n '{0}_{1}_kcal'.format(msname, cal.name),\n '{0}_{1}_gacal'.format(msname, cal.name),\n '{0}_{1}_gpcal'.format(msname, cal.name),\n '{0}_{1}_bcal'.format(msname, cal.name),\n '{0}_{1}_2kcal'.format(msname, cal.name)]:\n if os.path.exists(file_path):\n shutil.rmtree(file_path)\n\n fobs, blen, bname, tstart, _tstop, tsamp, vis, mjd, lst, _transit_idx, \\\n antenna_order = dfio.read_psrfits_file(\n fname, cal, dur=10*u.min, antpos=antpos, badants=badants)\n\n df.fringestop(vis, blen, cal, mjd, fobs, pt_dec)\n amp_model = df.amplitude_sky_model(cal, lst, pt_dec, fobs)\n amp_model = np.tile(amp_model[np.newaxis, :, :, np.newaxis],\n (vis.shape[0], 1, 1, vis.shape[-1]))\n\n dmsio.convert_to_ms(cal, vis, tstart, msname, bname, antenna_order,\n tsamp=tsamp, nint=25, antpos=antpos,\n model=amp_model)\n _check_path('{0}.ms'.format(msname))\n\n dc.flag_zeros(msname)\n if '8' in antenna_order:\n dc.flag_antenna(msname, '8', pol='A')\n\n dc.delay_calibration(msname, cal.name, [refant])\n _check_path('{0}_{1}_kcal'.format(msname, cal.name))\n\n dc.gain_calibration(\n msname,\n cal.name,\n refant=refant,\n forsystemhealth=True\n )\n for tbl in ['gacal', 'gpcal', 'bcal']:\n _check_path('{0}_{1}_{2}'.format(msname, cal.name, tbl))\n\ndef flag_pixels(msname, thresh=6.0, logger=None):\n \"\"\"Flags pixels using dsautils.mask_bad_pixels.\n\n Parameters\n ----------\n msname : str\n The path to the measurement set. Opens `msname`.ms\n thresh : float\n The RFI threshold in units of standard deviation. Anything above\n thresh*stddev + mean will be flagged.\n \"\"\"\n # Flag RFI - only for single spw\n vis, _, _, flags, ant1, ant2, _, _, orig_shape = extract_vis_from_ms(\n msname,\n )\n good_pixels, fraction_flagged = du.mask_bad_pixels(\n vis.squeeze(2),\n mask=~flags.squeeze(2),\n thresh=thresh\n )\n\n # # Not properly account for shape - getting repeat messages\n # (idx1s, idx2s) = np.where(fraction_flagged > 0.3)\n # for idx1 in idx1s:\n # for idx2 in idx2s:\n # message = \\\n # 'Baseline {0}-{1} {2}: {3} percent of data flagged'.format(\n # ant1[idx1],\n # ant2[idx1],\n # 'A' if idx2==1 else 'B',\n # fraction_flagged[idx1, idx2]*100\n # )\n # if logger is not None:\n # logger.info(message)\n # else:\n # print(message)\n\n flags = flags + ~good_pixels[:, :, np.newaxis, :, :]\n if orig_shape[0] == 'time':\n flags = flags.swapaxes(0, 1)\n with table('{0}.ms'.format(msname), readonly=False) as tb:\n shape = np.array(tb.getcol('FLAG')[:]).shape\n tb.putcol('FLAG', flags.reshape(shape))\n\ndef flag_antennas_using_delays(\n antenna_delays, kcorr, msname, kcorr_thresh=0.3, logger=None\n):\n \"\"\"Flags antennas by comparing the delay on short times to the delay cal.\n\n Parameters\n ----------\n antenna_delays : ndarray\n The antenna delays from the 2kcal calibration file, calculated on short\n timescales.\n kcorr : ndarray\n The antenna delays from the kcal calibration file, calculated over the\n entire calibration pass.\n msname : str\n The path to the measurement set. Will open `msname`.ms\n kcorr_thresh : float\n The tolerance for descrepancies between the antenna_delays and kcorr,\n in nanoseconds.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n \"\"\"\n error = 0\n percent_bad = (\n np.abs(antenna_delays-kcorr) > 1\n ).sum(1).squeeze(1).squeeze(1)/antenna_delays.shape[1]\n for i in range(percent_bad.shape[0]):\n for j in range(percent_bad.shape[1]):\n if percent_bad[i, j] > kcorr_thresh:\n error += not dc.flag_antenna(msname, '{0}'.format(i+1),\n pol='A' if j==0 else 'B')\n message = 'Flagged antenna {0}{1} in {2}'.format(\n i+1, 'A' if j==0 else 'B', msname\n )\n if logger is not None:\n logger.info(message)\n else:\n print(message)\n return error\n\ndef calibrate_measurement_set(\n msname, cal, refants, throw_exceptions=True, bad_antennas=None,\n bad_uvrange='2~27m', keepdelays=False, forsystemhealth=False,\n interp_thresh=1.5, interp_polyorder=7, blbased=False, manual_flags=None,\n logger=None\n):\n r\"\"\"Calibrates the measurement set.\n\n Calibration can be done with the aim of monitoring system health (set\n `forsystemhealth=True`), obtaining beamformer weights (set\n `forsystemhealth=False` and `keepdelays=False`), or obtaining delays (set\n `forsystemhealth=False` and `keepdelays=True`, new beamformer weights will\n be generated as well).\n\n Parameters\n ----------\n msname : str\n The name of the measurement set. Will open `msname`.ms\n cal : dsacalib.utils.src instance\n The calibration source. Calibration tables will begin with\n `msname`\\_`cal.name`\n refant : str or int\n The reference antenna name (if str) or index (if int) for calibration.\n throw_exceptions : bool\n If set to False, exceptions will not be thrown, although they will be\n logged to syslog. Defaults True.\n bad_antennas : list(str)\n Antennas (names) to be flagged before calibration.\n bad_uvrange : str\n Baselines with lengths within bad_uvrange will be flagged before\n calibration. Must be a casa-understood string with units.\n keepdelays : bool\n Only used if `forsystemhealth` is False. If `keepdelays` is set to\n False and `forsystemhealth` is set to False, then delays are integrated\n into the bandpass solutions and the kcal table is set to all zeros. If\n `keepdelays` is set to True and `forsystemhealth` is set to False, then\n delays are kept at 2 nanosecond resolution. If `forsystemhealth` is\n set to True, delays are kept at full resolution regardless of the\n keepdelays parameter. Defaults False.\n forsystemhealth : bool\n Set to True for full-resolution delay and bandpass solutions to use to\n monitor system health, or to False to generate beamformer weights and\n delays. Defaults False.\n interp_thresh: float\n Used if `forsystemhealth` is False, when smoothing bandpass gains.\n The gain amplitudes and phases are fit using a polynomial after any\n points more than interp_thresh*std away from the median-filtered trend\n are flagged.\n interp_polyorder : int\n Used if `forsystemhealth` is False, when smoothing bandpass gains.\n The gain amplitudes and phases are fit using a polynomial of order\n interp_polyorder.\n blbased : boolean\n Set to True for baseline-based calibration, False for antenna-based\n calibration.\n manual_flags : list(str)\n Include any additional flags to be done prior to calibration, as\n CASA-understood strings.\n logger : dsautils.dsa_syslog.DsaSyslogger() instance\n Logger to write messages too. If None, messages are printed.\n\n Returns\n -------\n int\n A status code. Decode with dsautils.calstatus\n \"\"\"\n if isinstance(refants, (int,str)):\n refant = refants\n refants = [refant]\n else:\n refant = refants[0]\n\n print('entered calibration')\n status = 0\n current_error = cs.UNKNOWN_ERR\n calstring = 'initialization'\n\n try:\n # Remove files that we will create so that things will fail if casa\n # doesn't write a table.\n print('removing files')\n tables_to_remove = [\n '{0}_{1}_2kcal'.format(msname, cal.name),\n '{0}_{1}_kcal'.format(msname, cal.name),\n '{0}_{1}_bkcal'.format(msname, cal.name),\n '{0}_{1}_gacal'.format(msname, cal.name),\n '{0}_{1}_gpcal'.format(msname, cal.name),\n '{0}_{1}_bcal'.format(msname, cal.name)\n ]\n if forsystemhealth:\n tables_to_remove += [\n '{0}_{1}_2gcal'.format(msname, cal.name)\n ]\n for path in tables_to_remove:\n if os.path.exists(path):\n shutil.rmtree(path)\n print('flagging of ms data')\n calstring = \"flagging of ms data\"\n current_error = (\n cs.FLAGGING_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n print('resetting flags')\n # Reset flags in the measurement set\n dc.reset_flags(msname, datacolumn='data')\n dc.reset_flags(msname, datacolumn='model')\n dc.reset_flags(msname, datacolumn='corrected')\n print('flagging baselines')\n current_error = (\n cs.FLAGGING_ERR\n )\n error = dc.flag_baselines(msname, uvrange=bad_uvrange)\n if error > 0:\n message = 'Non-fatal error occured in flagging short baselines of {0}.'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n print('flagging zeros')\n error = dc.flag_zeros(msname)\n if error > 0:\n message = 'Non-fatal error occured in flagging zeros of {0}.'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n print('flagging antennas')\n if bad_antennas is not None:\n for ant in bad_antennas:\n error = dc.flag_antenna(msname, ant)\n if error > 0:\n message = 'Non-fatal error occured in flagging ant {0} of {1}.'.format(ant, msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n if manual_flags is not None:\n for entry in manual_flags:\n dc.flag_manual(msname, entry[0], entry[1])\n print('flagging rfi')\n flag_pixels(msname)\n if error > 0:\n message = 'Non-fatal error occured in flagging bad pixels of {0}.'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n print('delay cal')\n # Antenna-based delay calibration\n calstring = 'delay calibration'\n current_error = (\n cs.DELAY_CAL_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n error = dc.delay_calibration(\n msname,\n cal.name,\n refants=refants\n )\n if error > 0:\n status = cs.update(status, cs.DELAY_CAL_ERR )\n message = 'Non-fatal error occured in delay calibration of {0}.'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n _check_path('{0}_{1}_kcal'.format(msname, cal.name))\n print('flagging based on delay cal')\n calstring = 'flagging of ms data'\n current_error = (\n cs.FLAGGING_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_GAINCALTIME\n )\n _times, antenna_delays, kcorr, _ant_nos = dp.plot_antenna_delays(\n msname, cal.name, show=False)\n error += flag_antennas_using_delays(antenna_delays, kcorr, msname)\n if error > 0:\n status = cs.update(status, cs.FLAGGING_ERR)\n message = 'Non-fatal error occured in flagging of bad timebins on {0}'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n try:\n _check_path('{0}_{1}_2kcal'.format(msname, cal.name))\n except AssertionError:\n status = cs.update(status, cs.FLAGGING_ERR)\n message = 'Non-fatal error occured in flagging of bad timebins on {0}'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n print('delay cal again')\n # Antenna-based delay calibration\n calstring = 'delay calibration'\n current_error = (\n cs.DELAY_CAL_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_DELAY_P1 |\n cs.INV_DELAY_P2 |\n cs.INV_GAINCALTIME |\n cs.INV_DELAYCALTIME\n )\n shutil.rmtree('{0}_{1}_kcal'.format(msname, cal.name))\n shutil.rmtree('{0}_{1}_2kcal'.format(msname, cal.name))\n error = dc.delay_calibration(msname, cal.name, refants=refants)\n if error > 0:\n status = cs.update(status, cs.DELAY_CAL_ERR )\n message = 'Non-fatal error occured in delay calibration ' + \\\n 'of {0}.'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n _check_path('{0}_{1}_kcal'.format(msname, cal.name))\n\n print('bandpass and gain cal')\n calstring = 'bandpass and gain calibration'\n current_error = (\n cs.GAIN_BP_CAL_ERR |\n cs.INV_GAINAMP_P1 |\n cs.INV_GAINAMP_P2 |\n cs.INV_GAINPHASE_P1 |\n cs.INV_GAINPHASE_P2 |\n cs.INV_GAINCALTIME\n )\n\n error = dc.gain_calibration(\n msname,\n cal.name,\n refant,\n blbased=blbased,\n forsystemhealth=forsystemhealth,\n keepdelays=keepdelays,\n interp_thresh=interp_thresh,\n interp_polyorder=interp_polyorder\n )\n if error > 0:\n status = cs.update(status, cs.GAIN_BP_CAL_ERR)\n message = 'Non-fatal error occured in gain/bandpass calibration of {0}.'.format(msname)\n if logger is not None:\n logger.warning(message)\n else:\n print(message)\n fnames = [\n '{0}_{1}_bcal'.format(msname, cal.name),\n '{0}_{1}_bacal'.format(msname, cal.name),\n '{0}_{1}_bpcal'.format(msname, cal.name),\n '{0}_{1}_gpcal'.format(msname, cal.name),\n '{0}_{1}_gacal'.format(msname, cal.name)\n ]\n if forsystemhealth:\n fnames += [\n '{0}_{1}_2gcal'.format(msname, cal.name)\n ]\n if not keepdelays and not forsystemhealth:\n fnames += [\n '{0}_{1}_bkcal'.format(msname, cal.name)\n ]\n for fname in fnames:\n _check_path(fname)\n print('combining bandpass and delay solns')\n # Combine bandpass solutions and delay solutions\n with table('{0}_{1}_bacal'.format(msname, cal.name)) as tb:\n bpass = np.array(tb.CPARAM[:])\n with table('{0}_{1}_bpcal'.format(msname, cal.name)) as tb:\n bpass *= np.array(tb.CPARAM[:])\n if not forsystemhealth:\n with table('{0}_{1}_bkcal'.format(msname, cal.name)) as tb:\n bpass = np.array(tb.CPARAM[:])\n with table(\n '{0}_{1}_bcal'.format(msname, cal.name),\n readonly=False\n ) as tb:\n tb.putcol('CPARAM', bpass)\n if not forsystemhealth:\n tbflag = np.array(tb.FLAG[:])\n tb.putcol('FLAG', np.zeros(tbflag.shape, tbflag.dtype))\n\n except Exception as exc:\n status = cs.update(status, current_error)\n du.exception_logger(logger, calstring, exc, throw_exceptions)\n print('end of cal routine')\n return status\n\ndef cal_in_datetime(dt, transit_time, duration=5*u.min, filelength=15*u.min):\n \"\"\"Check to see if a transit is in a given file.\n\n Parameters\n ----------\n dt : str\n The start time of the file, given as a string.\n E.g. '2020-10-06T23:19:02'\n transit_time : astropy.time.Time instance\n The transit time of the source.\n duration : astropy quantity\n The amount of time around transit you are interested in, in minutes or\n seconds.\n filelength : astropy quantity\n The length of the hdf5 file, in minutes or seconds.\n\n Returns\n -------\n bool\n True if at least part of the transit is within the file, else False.\n \"\"\"\n filestart = Time(dt)\n fileend = filestart+filelength\n transitstart = transit_time-duration/2\n transitend = transit_time+duration/2\n\n # For any of these conditions,\n # the file contains data that we want\n if (filestart < transitstart) and (fileend > transitend):\n transit_file = True\n elif (filestart > transitstart) and (fileend < transitend):\n transit_file = True\n elif (fileend > transitstart) and \\\n (fileend-transitstart < duration):\n transit_file = True\n elif (filestart < transitend) and \\\n (transitend-filestart) < duration:\n transit_file = True\n else:\n transit_file = False\n return transit_file\n\ndef get_files_for_cal(\n caltable, refcorr='01', duration=5*u.min, filelength=15*u.min,\n hdf5dir='/mnt/data/dsa110/correlator/', date_specifier='*'):\n \"\"\"Returns a dictionary containing the filenames for each calibrator pass.\n\n Parameters\n ----------\n caltable : str\n The path to the csv file containing calibrators of interest.\n refcorr : str\n The reference correlator to search for recent hdf5 files from. Searches\n the directory `hdf5dir`/corr`refcorr`/\n duration : astropy quantity\n The duration around transit which you are interested in extracting, in\n minutes or seconds.\n filelength : astropy quantity\n The length of the hdf5 files, in minutes or seconds.\n hdf5dir : str\n The path to the hdf5 files.\n date_specifier : str\n A specifier to include to limit the dates for which you are interested\n in. Should be something interpretable by glob and should be to the\n second precision. E.g. `2020-10-06*`, `2020-10-0[678]*` and\n `2020-10-06T01:03:??` are all valid.\n\n Returns\n -------\n dict\n A dictionary specifying the hdf5 filenames that correspond to the\n requested datesand calibrators.\n \"\"\"\n calsources = pandas.read_csv(caltable, header=0)\n files = sorted(\n glob.glob(\n '{0}/corr{1}/{2}.hdf5'.format(\n hdf5dir,\n refcorr,\n date_specifier\n )\n )\n )\n datetimes = [f.split('/')[-1][:19] for f in files]\n if len(np.unique(datetimes)) != len(datetimes):\n print('Multiple files exist for the same time.')\n dates = np.unique([dt[:10] for dt in datetimes])\n\n filenames = dict()\n for date in dates:\n filenames[date] = dict()\n for _index, row in calsources.iterrows():\n if isinstance(row['ra'], str):\n rowra = row['ra']\n else:\n rowra = row['ra']*u.deg\n if isinstance(row['dec'], str):\n rowdec = row['dec']\n else:\n rowdec = row['dec']*u.deg\n cal = du.src(\n row['source'],\n ra=Angle(rowra),\n dec=Angle(rowdec),\n I=row['flux (Jy)']\n )\n\n midnight = Time('{0}T00:00:00'.format(date))\n delta_lst = -1*(\n cal.direction.hadec(midnight.mjd)[0]\n )%(2*np.pi)\n transit_time = (\n midnight + delta_lst/(2*np.pi)*ct.SECONDS_PER_SIDEREAL_DAY*u.s\n )\n assert transit_time.isot[:10]==date\n\n # Get the filenames for each calibrator transit\n transit_files = []\n for dt in datetimes:\n if cal_in_datetime(dt, transit_time, duration, filelength):\n transit_files += [dt]\n\n filenames[date][cal.name] = {\n 'cal': cal,\n 'transit_time': transit_time,\n 'files': transit_files\n }\n return filenames\n", "id": "1091801", "language": "Python", "matching_score": 5.896885871887207, "max_stars_count": 1, "path": "dsacalib/routines.py" }, { "content": "\"\"\"\nDSACALIB/UTILS.PY\n\n<NAME>, <EMAIL>, 10/2019\n\nModified for python3 from DSA-10 routines written by <NAME>, <NAME>.\n\nRoutines to interact w/ fits visibilities recorded by DSA-10, hdf5 visibilities\nrecorded by DSA-110, and visibility in CASA measurement sets.\n\"\"\"\n\n# TODO: Update source class\n\n# Always import scipy before importing casatools.\nimport traceback\nfrom scipy.ndimage.filters import median_filter\nimport numpy as np\nfrom antpos.utils import get_itrf\nimport astropy.units as u\nfrom astropy.coordinates import Angle\nfrom dsacalib import constants as ct\nimport casatools as cc\n\ndef exception_logger(logger, task, exception, throw):\n \"\"\"Logs exception traceback to syslog using the dsa_syslog module.\n\n Parameters\n ----------\n logger : dsa_syslog.DsaSyslogger() instance\n The logger used for within the reduction pipeline.\n task : str\n A short description of where in the pipeline the error occured.\n exception : Exception\n The exception that occured.\n throw : boolean\n If set to True, the exception is raised after the traceback is written\n to syslogs.\n \"\"\"\n error_string = 'During {0}, {1} occurred:\\n{2}'.format(\n task, type(exception).__name__, ''.join(\n traceback.format_tb(exception.__traceback__)\n )\n )\n if logger is not None:\n logger.error(error_string)\n else:\n print(error_string)\n if throw:\n raise exception\n\nclass src():\n \"\"\"Simple class for holding source parameters.\n \"\"\"\n\n def __init__(self, name, ra, dec, I=1., epoch='J2000', pa=None,\n maj_axis=None, min_axis=None):\n \"\"\"Initializes the src class.\n\n Parameters\n ----------\n name : str\n Identifier for the source.\n ra : str\n The right ascension of the source. e.g. \"12h00m19.21s\".Astropy\n quantity also accepted.\n dec : str\n The declination of the source. e.g. \"+73d00m45.7s\". Astropy\n quantity also accepted.\n I : float\n The flux of the source in Jy. Defaults 1.\n epoch : str\n The epoch of `ra` and `dec`. Defaults \"J2000\".\n pa : float\n The position angle in degrees. Defaults ``None``.\n maj_axis : float\n The major axis in arcseconds. Defaults ``None``.\n min_axis : float\n The minor axis in arcseconds. Defaults ``None``.\n \"\"\"\n self.name = name\n self.I = I\n assert epoch == 'J2000'\n self.epoch = 'J2000'\n if isinstance(ra, str):\n ra = to_deg(ra)\n if isinstance(dec, str):\n dec = to_deg(dec)\n self.ra = ra\n self.dec = dec\n self.direction = direction(\n 'J2000',\n ra.to_value(u.rad),\n dec.to_value(u.rad)\n )\n self.pa = pa\n if maj_axis is None:\n self.maj_axis = None\n else:\n self.maj_axis = maj_axis*u.arcsecond\n if min_axis is None:\n self.min_axis = None\n else:\n self.min_axis = min_axis*u.arcsecond\n\ndef to_deg(string):\n \"\"\"Converts a string representation of RA or DEC to degrees.\n\n Parameters\n ----------\n string : str\n RA or DEC in string format e.g. \"12h00m19.21s\" or \"+73d00m45.7s\".\n\n Returns\n -------\n deg : astropy quantity\n The angle in degrees.\n \"\"\"\n return Angle(string).to(u.deg)\n\ndef get_autobl_indices(nant, casa=False):\n \"\"\"Returns a list of the indices containing the autocorrelations.\n\n Can return the index for either correlator-ordered visibilities (`casa` set\n to ``False``) or CASA-ordered visibilities (`casa` set to ``True``).\n\n Parameters\n ----------\n nant : int\n The number of antennas in the visibility set.\n casa : boolean\n Whether the visibilities follow CASA ordering standards (`casa` set to\n ``True``) or DSA-10/DSA-110 correlator ordering standards (`casa` set\n to ``False``). Defaults to ``False``, or correlator ordering standards.\n\n Returns\n -------\n auto_bls : list\n The list of indices in the visibilities corresponding to\n autocorrelations.\n \"\"\"\n auto_bls = []\n i = -1\n for j in range(1, nant+1):\n i += j\n auto_bls += [i]\n if casa:\n nbls = (nant*(nant+1))//2\n auto_bls = [(nbls-1)-aidx for aidx in auto_bls]\n auto_bls = auto_bls[::-1]\n return auto_bls\n\ndef get_antpos_itrf(antpos):\n \"\"\"Reads and orders antenna positions from a text or csv file.\n\n Parameters\n ----------\n antpos : str\n The path to the text or csv file containing the antenna positions.\n\n Returns\n -------\n anum : list(int)\n The antenna numbers, in numerical order.\n xx, yy, zz : list(float)\n The ITRF coordinates of the antennas, in meters.\n \"\"\"\n if antpos[-4:] == '.txt':\n anum, xx, yy, zz = np.loadtxt(antpos).transpose()\n anum = anum.astype(int)+1\n anum, xx, yy, zz = zip(*sorted(zip(anum, xx, yy, zz)))\n elif antpos[-4:] == '.csv':\n df = get_itrf(antpos)\n anum = np.array(df.index)\n xx = np.array(df[['dx_m']])\n yy = np.array(df[['dy_m']])\n zz = np.array(df[['dz_m']])\n return anum, xx, yy, zz\n\ndef mask_bad_bins(vis, axis, thresh=6.0, medfilt=False, nmed=129):\n \"\"\"Masks bad channels or time bins in visibility data.\n\n Parameters\n ----------\n vis : ndarray\n The visibility array, with dimensions (baselines, time, frequency,\n polarization)\n axis : int\n The axis to flag along. `axis` set to 1 will flag bad time bins. `axis`\n set to 2 will flag bad frequency bins.\n thresh : float\n The threshold above which to flag data. Anything that deviates from the\n median by more than `thresh` multiplied by the standard deviation is\n flagged.\n medfilt : Boolean\n Whether to median filter to remove an average trend. If ``True``, will\n median filter. If ``False``, will subtract the median for the\n baseline/pol pair.\n nmed : int\n The size of the median filter to use. Only used in medfilt is ``True``.\n Must be an odd integer.\n\n Returns\n -------\n good_bins : ndarray\n Has a value of 1 where the bin is good, and 0 where the bin should be\n flagged. If `axis` is 2, the dimensions are (baselines, 1, frequency,\n polarization). If `axis` is 1, the dimensions are (baselines, time, 1,\n polarization).\n fraction_flagged : ndarray\n The fraction of data flagged for each baseline/polarization pair.\n Dimensions (baselines, polarization).\n \"\"\"\n # TODO: Update medfilt to use the correct axis\n assert not medfilt\n assert axis in (1, 2)\n avg_axis = 1 if axis == 2 else 2\n\n # Average over time (or frequency) first.\n vis_avg = np.abs(np.mean(vis, axis=avg_axis, keepdims=True))\n # Median filter over frequency (or time) and remove the median trend or\n # remove the median.\n if medfilt:\n vis_avg_mf = median_filter(vis_avg.real, size=(1, nmed, 1))\n vis_avg -= vis_avg_mf\n else:\n vis_avg -= np.median(vis_avg, axis=1, keepdims=True)\n # Calculate the standard deviation along the frequency (or time) axis.\n vis_std = np.std(vis_avg, axis=1, keepdims=True)\n # Get good channels.\n good_bins = np.abs(vis_avg) < thresh*vis_std\n fraction_flagged = (\n 1-good_bins.sum(axis=axis)/good_bins.shape[axis]\n ).squeeze()\n return good_bins, fraction_flagged\n\ndef mask_bad_pixels(vis, thresh=6.0, mask=None):\n r\"\"\"Masks pixels with values above a SNR threshold within each visibility.\n\n Parameters\n ----------\n vis : ndarray\n The complex visibilities. Dimensions (baseline, time, frequency,\n polarization).\n thresh : float\n The threshold above which to flag data. Data above `thresh`\\*the\n standard deviation in each channel of each visiblity is flagged.\n Defaults 6.\n mask : ndarray\n A mask for data that is already flagged. Should be 0 where data has\n been flagged, 1 otherwise. Same dimensions as `vis`. Data previously\n flagged is not used in the calculation of the channel standard\n deviations.\n\n Returns\n -------\n good_pixels : ndarray\n Whether a given pixel in `vis` is good (1 or ``True``) or bad (i.e.\n above the threshold: 0 or ``False``). Same dimensions as ``vis``.\n fraction_flagged : array\n The ratio of the flagged data to the total number of pixels for each\n baseline/polarization.\n \"\"\"\n (nbls, nt, nf, npol) = vis.shape\n vis = np.abs(vis.reshape(nbls, -1, npol))\n vis = vis-np.median(vis, axis=1, keepdims=True)\n if mask is not None:\n vis = vis*mask.reshape(nbls, -1, npol)\n std = np.std(np.abs(vis), axis=1, keepdims=True)\n good_pixels = np.abs(vis) < thresh*std\n fraction_flagged = 1 - good_pixels.sum(1)/good_pixels.shape[1]\n good_pixels = good_pixels.reshape(nbls, nt, nf, npol)\n return good_pixels, fraction_flagged\n\ndef daz_dha(dec, daz=None, dha=None, lat=ct.OVRO_LAT):\n \"\"\"Converts an offset between azimuth and hour angle.\n\n Assumes that the offset in azimuth or hour angle from an azimuth of pi or\n hour angle of 0 is small. One of `daz` or `dha` must be provided, the\n other is calculated.\n\n Parameters\n ----------\n dec : float\n The pointing declination of the antenna in radians.\n daz : float\n The azimuth offset in radians. ``None`` may also be passed, in which\n case the azimuth offset is calculated and returned. Defaults to\n ``None``.\n dha : float\n The hour angle offset in radians. ``None`` may also be passed, in which\n case the hour angle offset is calculated and returned. Defaults to\n ``None``.\n lat : float\n The latitude of the antenna in radians. Defaults to the value of\n ``ovro_lat`` defined in ``dsacalib.constants``.\n\n Returns\n -------\n float\n The converted offset. If the value of `daz` passed was not ``None``,\n this is the hour angle offset corresponding to the azimuth offset\n `daz`. If the value of `dha` passed was not ``None``, this is the\n azimuth offset corresonding to the hour angle offset `dha`.\n\n Raises\n ------\n RuntimeError\n If neither `daz or `dha` is defined.\n \"\"\"\n factor = -1*(np.sin(lat)-np.tan(dec)*np.cos(lat))\n if daz is not None:\n assert dha is None, \"daz and dha cannot both be defined.\"\n ans = daz*factor\n elif dha is not None:\n ans = dha/factor\n else:\n raise RuntimeError('One of daz or dha must be defined')\n return ans\n\nclass direction():\n \"\"\"Class for holding sky coordinates and converting between ICRS and FK5.\n \n Parameters\n ----------\n epoch : str\n 'J2000' (for ICRS or J2000 coordinates) or 'HADEC' (for FK5 coordinates\n at an equinox of obstime)\n lon : float\n The longitude (right ascension or hour angle) in radians\n lat : float\n The latitude (declination) in radians\n obstime : float\n The observation time in mjd.\n observatory : str\n The name of the observatory\n \"\"\"\n def __init__(self, epoch, lon, lat, obstime=None, observatory='OVRO_MMA'):\n \n assert epoch in ['J2000', 'HADEC']\n if epoch == 'HADEC':\n assert obstime is not None\n self.epoch = epoch\n self.lon = lon\n self.lat = lat\n self.obstime = obstime\n self.observatory = observatory\n \n def J2000(self, obstime=None, observatory=None):\n \"\"\"Provides direction in J2000 coordinates.\n\n Parameters\n ----------\n obstime : float\n Time of observation in mjd.\n location : str\n Name of the observatory.\n\n Returns\n -------\n tuple\n ra, dec at J2000 in units of radians.\n \"\"\"\n if self.epoch == 'J2000':\n return self.lon, self.lat\n\n assert self.epoch == 'HADEC'\n if obstime is None:\n assert self.obstime is not None\n obstime = self.obstime\n if observatory is None:\n assert self.observatory is not None\n observatory = self.observatory\n\n me = cc.measures()\n epoch = me.epoch(\n 'UTC',\n '{0}d'.format(obstime)\n )\n location = me.observatory(observatory)\n source = me.direction(\n 'HADEC', \n '{0}rad'.format(self.lon),\n '{0}rad'.format(self.lat)\n )\n me.doframe(epoch)\n me.doframe(location)\n output = me.measure(source, 'J2000')\n assert output['m0']['unit'] == 'rad'\n assert output['m1']['unit'] == 'rad'\n return output['m0']['value'], output['m1']['value']\n\n def hadec(self, obstime=None, observatory=None):\n \"\"\"Provides direction in HADEC (FK5) at `obstime`.\n\n Parameters\n ----------\n obstime : float\n Time of observation in mjd.\n location : str\n Name of the observatory.\n\n Returns\n -------\n tuple\n ha, dec at obstime in units of radians.\n \"\"\"\n if self.epoch == 'HADEC':\n assert obstime is None\n return self.lon, self.lat\n\n assert self.epoch == 'J2000'\n if obstime is None:\n assert self.obstime is not None\n obstime = self.obstime\n if observatory is None:\n assert self.observatory is not None\n observatory = self.observatory\n me = cc.measures()\n epoch = me.epoch(\n 'UTC',\n '{0}d'.format(obstime)\n )\n location = me.observatory(observatory)\n source = me.direction(\n 'J2000', \n '{0}rad'.format(self.lon),\n '{0}rad'.format(self.lat)\n )\n me.doframe(epoch)\n me.doframe(location)\n output = me.measure(source, 'HADEC')\n assert output['m0']['unit'] == 'rad'\n assert output['m1']['unit'] == 'rad'\n return output['m0']['value'], output['m1']['value']\n", "id": "11160947", "language": "Python", "matching_score": 2.027029514312744, "max_stars_count": 1, "path": "dsacalib/utils.py" }, { "content": "import logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\nimport matplotlib.pyplot as plt\nimport casatools as tools\nimport casatasks as tasks\nimport os.path\nimport shutil\n\n\ndef gensources(complist='src.cl', caldirection=\"J2000 12h00m00.0s 50d00m00.0s\", calflux=1.0,\n srcdirection=\"J2000 12h30m00.0s 50d00m00.0s\", srcflux=1.0,\n freq='1.4GHz'):\n \"\"\" Create component list\n \"\"\"\n\n if os.path.exists(complist):\n logger.info(\"Removing existing file, {0}\".format(complist))\n shutil.rmtree(complist)\n\n # assume compact sources\n cl = tools.componentlist()\n cl.addcomponent(dir=caldirection, flux=calflux, fluxunit='Jy', freq=freq, shape=\"Gaussian\", \n majoraxis=\"1arcsec\", minoraxis='1arcsec', positionangle='0deg')\n if srcdirection is not None:\n cl.addcomponent(dir=srcdirection, flux=srcflux, fluxunit='Jy', freq=freq, shape=\"Gaussian\", \n majoraxis=\"1arcsec\", minoraxis='1arcsec', positionangle='0deg')\n cl.rename(complist)\n cl.done()\n\n# FIRST model image\n# > hdulist = astroquery.skyview.SkyView.get_images(position=co, survey='VLA FIRST (1.4 GHz)', height=2*u.deg, width=2*u.deg, pixels=7200) \n# Header from FIRST archive only has two axes. Need to force definition on import.\n# > casatasks.importfits(fitsimage='first_12h+50d.fits', imagename='first_12h+50d.ms', beam=[\"5arcsec\", \"5arcsec\", \"0deg\"], overwrite=True, defaultaxes=True, defaultaxesvalues=[180.0, 50.0, 1400000000.0, 'I'])\n\n\ndef transit(direction, integration, num):\n i = 0\n while i < num:\n epoch, ra, dec = direction.split(' ')\n hh, mm, ss = ra.replace('h', ' ').replace('m', ' ')[:-1].split()\n ss = float(ss) + float(integration)\n ra = '{0}h{1}m{2}s'.format(hh, mm, ss)\n direction = ' '.join([epoch, ra, dec])\n yield direction\n i += 1\n\n\ndef simulate(imagename='', complist='', msname='dsa110-calsrc.ms', freq='1.4GHz', integrationtime='10s',\n diameter=5.0, noise='0Jy', gainnoise=0., nchan=1,\n calobsdir = \"J2000 12h00m00.0s 50d00m00.0s\", srcobsdir=\"J2000 12h30m00.0s 50d00m00.0s\"):\n \"\"\" Use source model to generate simulated ms for a few DSA antennas.\n If imagename and complist both provided, then complist will be added to image.\n \"\"\"\n\n # outriggers\n x = [0, 400, 380, 370, 410, 420, 200, -200, -400, -980, -550, -1250, -1200, -1200, -1350]\n y = [-1050, -500, 350, 800, 975, 1120, 1100, 1050, 950, 775, 1000, 1120, 575, -800, -900]\n names = ['DSA-101', 'DSA-102', 'DSA-103', 'DSA-104', 'DSA-105', 'DSA-106', 'DSA-107', 'DSA-108', 'DSA-109', 'DSA-110', 'DSA-111', 'DSA-112', 'DSA-113', 'DSA-114', 'DSA-115']\n\n if os.path.exists(msname):\n logger.info(\"Removing existing file, {0}\".format(msname))\n shutil.rmtree(msname)\n\n sm = tools.simulator()\n sm.open(msname)\n\n me = tools.measures()\n refpos = me.observatory('OVRO_MMA') \n # TODO: confirm coordinates and set offsets from OVRO_MMA\n sm.setconfig(telescopename='DSA-110', x=x, y=y, dishdiameter=[diameter]*len(x), z=[0.]*len(x), offset=[0.0],\n mount=['ALT-AZ'], antname=names, padname=names, coordsystem='local', referencelocation=refpos)\n\n sm.setspwindow(spwname='LBand', freq=freq, deltafreq='0.5MHz', freqresolution='0.5MHz', nchannels=nchan, stokes='XX YY')\n sm.settimes(integrationtime=integrationtime, usehourangle=True, referencetime=58722)\n sm.setfeed(mode='perfect X Y')\n sm.setauto(autocorrwt=0.0)\n\n vp = tools.vpmanager()\n vp.reset()\n vp.setpbairy(telescope=\"DSA-110\", dishdiam=\"{0}m\".format(diameter), maxrad=\"10deg\", blockagediam=\"1m\")\n sm.setvp(dovp=True, usedefaultvp=False)\n\n if calobsdir is not None:\n sm.setfield(sourcename='cal', sourcedirection=me.direction(*calobsdir.split()))\n if srcobsdir is not None:\n sm.setfield(sourcename='src', sourcedirection=me.direction(*srcobsdir.split()))\n\n if calobsdir is not None:\n# sm.observe(sourcename='cal', spwname='LBand', starttime='-450s', stoptime='450s') # times are in HA referenced to first source\n sm.observemany(sourcenames=5*['src'], spwname='LBand', starttimes=5*['-5s'], stoptimes=5*['5s'], directions=list(transit(calobsdir, 5., 5))) # times are in HA referenced to first source\n\n if srcobsdir is not None:\n sm.observe(sourcename='src', spwname='LBand', starttime='1350s', stoptime='2250s') # 30min later\n\n if len(imagename) and len(complist):\n sm.predict(imagename=imagename)\n if len(complist):\n sm.predict(complist=complist, incremental=True)\n elif len(complist):\n sm.predict(complist=complist)\n\n if noise != '0Jy':\n sm.setnoise(mode='simplenoise', simplenoise=noise)\n\n if gainnoise:\n sm.setgain(mode='fbm', amplitude=gainnoise)\n\n if (noise != '0Jy') or gainnoise:\n sm.corrupt()\n\n sm.summary()\n sm.done()\n\n\ndef read(msname='dsa110-calsrc.ms'):\n \"\"\" Read simulated ms and return data\n \"\"\"\n\n ms = tools.ms() \n ms.open('dsa110-calsrc.ms') \n dd = ms.getdata(items=['data', 'axis_info', 'uvw'], ifraxis=True) \n data = dd['data'] \n# times = dd['axis_info']['time_axis']['MJDseconds'] \n# plt.plot(data[...,0].flatten().real, data[...,0].flatten().imag, '.') \n logger.info(\"Read data of shape: {0}\".format(data.shape))\n\n return data\n\n\ndef solve(msname='dsa110-calsrc.ms', calname='cal.G', apply=False, show=True):\n cb = tools.calibrater()\n cb.open(msname)\n cb.setsolve(type='G', t=900., table=calname, phaseonly=True, refant=0)\n cb.solve()\n\n if show:\n cb.listcal(caltable=calname)\n\n if apply:\n cb.correct()\n\n\ndef display(imname=None):\n \"\"\" Show an image\n \"\"\"\n\n im = tools.image()\n im.open(imname)\n data = im.getchunk()\n fig = plt.figure(figsize=(10,8))\n plt.imshow(data.squeeze(), origin='bottom', interpolation='nearest')\n plt.show()\n", "id": "4318858", "language": "Python", "matching_score": 1.7550816535949707, "max_stars_count": 0, "path": "simdata.py" }, { "content": "# plot triggered FRB candidates\n# <EMAIL> & <EMAIL>\n# 25/02/2021\n\nimport os\nimport os.path\nimport sys\n\nimport scipy.signal\nfrom scipy import stats\n\nimport numpy as np\nimport matplotlib as mpl\nimport h5py\nmpl.use('Agg') # hack\nimport matplotlib.pyplot as plt \nimport json\nimport glob\nimport optparse\nfrom mpl_toolkits.axes_grid.inset_locator import inset_axes\n\nimport multiprocessing\nfrom joblib import Parallel, delayed\n\n#import filterbank\nfrom sigpyproc.Readers import FilReader\nimport slack\n\nncpu = multiprocessing.cpu_count() - 1 \n\n# Keras neural network model for Freq/Time array\nMLMODELPATH='/home/user/connor/software/machine_learning/20190501freq_time.hdf5'\nBASEDIR='/mnt/data/dsa110/'\nwebPLOTDIR=BASEDIR+'webPLOTS/'\nMLMODELPATH='/home/ubuntu/connor/MLmodel/20190501freq_time.hdf5'\nBASEDIR='/data/dsa110/'\nwebPLOTDIR=BASEDIR+'webPLOTS/'\n\nplt.rcParams.update({\n 'font.size': 12,\n 'font.family': 'serif',\n 'axes.labelsize': 14,\n 'axes.titlesize': 15,\n 'xtick.labelsize': 12,\n 'ytick.labelsize': 12,\n 'xtick.direction': 'in',\n 'ytick.direction': 'in',\n 'xtick.top': True,\n 'ytick.right': True,\n 'lines.linewidth': 0.5,\n 'lines.markersize': 5,\n 'legend.fontsize': 14,\n 'legend.borderaxespad': 0,\n 'legend.frameon': False,\n 'legend.loc': 'lower right'})\n\ndef read_fil_data_dsa(fn, start=0, stop=1):\n \"\"\" Read in filterbank data\n \"\"\"\n fil_obj = FilReader(fn)\n header = fil_obj.header\n delta_t = fil_obj.header['tsamp'] # delta_t in seconds \n fch1 = header['fch1']\n nchans = header['nchans']\n foff = header['foff']\n fch_f = fch1 + nchans*foff\n freq = np.linspace(fch1,fch_f,nchans)\n try:\n data = fil_obj.readBlock(start, stop)\n except(ValueError):\n data = 0\n\n return data, freq, delta_t, header\n\ndef plotfour(dataft, datats, datadmt, \n beam_time_arr=None, figname_out=None, dm=0,\n dms=[0,1], \n datadm0=None, suptitle='', heimsnr=-1,\n ibox=1, ibeam=-1, prob=-1, showplot=True,multibeam_dm0ts=None):\n \"\"\" Plot a trigger's dynamics spectrum, \n dm/time array, pulse profile, \n multibeam info (optional), and zerodm (optional)\n\n Parameter\n ---------\n dataft : \n freq/time array (nfreq, ntime)\n datats : \n dedispersed timestream\n datadmt : \n dm/time array (ndm, ntime)\n beam_time_arr : \n beam time SNR array (nbeam, ntime)\n figname_out : \n save figure with this file name \n dm : \n dispersion measure of trigger \n dms : \n min and max dm for dm/time array \n datadm0 : \n raw data timestream without dedispersion\n \"\"\"\n\n classification_dict = {'prob' : [],\n 'snr_dm0_ibeam' : [],\n 'snr_dm0_allbeam' : []}\n datats /= np.std(datats[datats!=np.max(datats)])\n nfreq, ntime = dataft.shape\n xminplot,xmaxplot = 200,800 # milliseconds\n dm_min, dm_max = dms[0], dms[1]\n tmin, tmax = 0., 1e3*dataft.header['tsamp']*ntime\n freqmax = dataft.header['fch1']\n freqmin = freqmax + dataft.header['nchans']*dataft.header['foff']\n tarr = np.linspace(tmin, tmax, ntime)\n fig = plt.figure(figsize=(8,10))\n\n plt.subplot(321)\n extentft=[tmin,tmax,freqmin,freqmax]\n plt.imshow(dataft, aspect='auto',extent=extentft, interpolation='nearest')\n plt.xlim(xminplot,xmaxplot)\n plt.xlabel('Time (ms)')\n plt.ylabel('Freq (MHz)')\n if prob!=-1:\n plt.text(xminplot+50,0.5*(freqmax+freqmin),\"Prob=%0.2f\" % prob, color='white', fontweight='bold')\n classification_dict['prob'] = prob\n plt.subplot(322)\n extentdm=[tmin, tmax, dm_min, dm_max]\n plt.imshow(datadmt[::-1], aspect='auto',extent=extentdm)\n plt.xlim(xminplot,xmaxplot)\n plt.xlabel('Time (ms)')\n plt.ylabel(r'DM (pc cm$^{-3}$)')\n\n plt.subplot(323)\n plt.plot(tarr, datats)\n plt.grid('on', alpha=0.25)\n plt.xlabel('Time (ms)')\n plt.ylabel(r'Power ($\\sigma$)')\n plt.xlim(xminplot,xmaxplot)\n plt.text(0.55*(tmin+1000.), 0.5*(max(datats)+np.median(datats)), \n 'Heimdall S/N : %0.1f\\nHeimdall DM : %d\\\n \\nHeimdall ibox : %d\\nibeam : %d' % (heimsnr,dm,ibox,ibeam), \n fontsize=8, verticalalignment='center')\n \n parent_axes=fig.add_subplot(324)\n if beam_time_arr is None:\n plt.xticks([])\n plt.yticks([])\n plt.text(0.20, 0.55, 'Multibeam info\\nunder construction',\n fontweight='bold')\n else:\n parent_axes.imshow(beam_time_arr[::-1], aspect='auto', extent=[tmin, tmax, 0, beam_time_arr.shape[0]], \n interpolation='nearest')\n parent_axes.axvline(540, ymin=0, ymax=6, color='r', linestyle='--', alpha=0.55)\n parent_axes.axvline(460, ymin=0, ymax=6, color='r', linestyle='--', alpha=0.55)\n parent_axes.axhline(max(0,ibeam-1), xmin=0, xmax=100, color='r', linestyle='--', alpha=0.55)\n parent_axes.axhline(ibeam+1, xmin=0, xmax=100, color='r', linestyle='--', alpha=0.55)\n parent_axes.set_xlim(xminplot,xmaxplot)\n parent_axes.set_xlabel('Time (ms)')\n parent_axes.set_ylabel('Beam', fontsize=15)\n small_axes = inset_axes(parent_axes,\n width=\"25%\", # width = 30% of parent_bbox\n height=\"25%\", # height : 1 inch\n loc=4)\n small_axes.imshow(beam_time_arr[ibeam-4:ibeam+4][::-1], aspect='auto', extent=[tmin, tmax, ibeam-4, ibeam+4],\n interpolation='nearest', cmap='afmhot')\n small_axes.set_xlim(400., 600.)\n\n if datadm0 is not None:\n plt.subplot(325)\n datadm0 -= np.median(datadm0.mean(0))\n datadm0_sigmas = datadm0.mean(0)/np.std(datadm0.mean(0)[-500:])\n snr_dm0ts_iBeam = np.max(datadm0_sigmas)\n plt.plot(np.linspace(0, tmax, len(datadm0[0])), datadm0_sigmas, c='k')\n classification_dict['snr_dm0_ibeam'] = snr_dm0ts_iBeam\n \n if multibeam_dm0ts is not None:\n multibeam_dm0ts = multibeam_dm0ts/np.std(multibeam_dm0ts[multibeam_dm0ts!=multibeam_dm0ts.max()])\n multibeam_dm0ts -= np.median(multibeam_dm0ts)\n snr_dm0ts_allbeams = np.max(multibeam_dm0ts)\n plt.plot(np.linspace(0, tmax, len(multibeam_dm0ts)), multibeam_dm0ts, color='C1', alpha=0.75)\n plt.legend(['iBeam=%d'%ibeam, 'All beams'], loc=1, fontsize=10)\n plt.ylabel(r'Power ($\\sigma$)')\n classification_dict['snr_dm0_allbeam'] = snr_dm0ts_allbeams\n else:\n plt.legend(['DM=0 Timestream'], loc=2, fontsize=10)\n plt.xlabel('Time (ms)')\n \n plt.subplot(326)\n plt.plot(np.linspace(freqmax,freqmin,datadm0.shape[0]), np.mean(datadm0,axis=-1), color='k')\n plt.semilogy()\n plt.legend(['spectrum'], loc=2)\n plt.xlabel('freq [MHz]')\n\n print(classification_dict)\n not_real = False\n\n if multibeam_dm0ts is not None:\n if classification_dict['snr_dm0_allbeam']>7.0:\n if classification_dict['prob']<0.5:\n not_real = True\n\n if classification_dict['prob']<0.01:\n not_real = True\n\n if not_real==True:\n suptitle += ' (Probably not real)'\n \n plt.suptitle(suptitle, color='C1')\n plt.tight_layout()\n if figname_out is not None:\n plt.savefig(figname_out)\n if showplot:\n plt.show()\n\n return not_real\n \ndef dm_transform(data, dm_max=20,\n dm_min=0, dm0=None, ndm=64, \n freq_ref=None, downsample=16):\n \"\"\" Transform freq/time data to dm/time data. \n \"\"\"\n ntime = data.shape[1]\n\n dms = np.linspace(dm_min, dm_max, ndm, endpoint=True)\n\n if dm0 is not None:\n dm_max_jj = np.argmin(abs(dms-dm0))\n dms += (dm0-dms[dm_max_jj])\n\n data_full = np.zeros([ndm, ntime//downsample])\n\n for ii, dm in enumerate(dms):\n dd = data.dedisperse(dm)\n _dts = np.mean(dd,axis=0)\n data_full[ii] = _dts[:ntime//downsample*downsample].reshape(ntime//downsample, downsample).mean(1)\n\n return data_full, dms\n\ndef proc_cand_fil(fnfil, dm, ibox, snrheim=-1, \n pre_rebin=1, nfreq_plot=64,\n heim_raw_tres=1, \n rficlean=False, ndm=64):\n \"\"\" Take filterbank file path, preprocess, and \n plot trigger\n\n Parameters:\n ----------\n\n fnfil : str \n path to .fil file \n DM : float \n dispersion measure of trigger \n ibox : int \n preferred boxcar width \n snrheim : float \n S/N of candidate found by Heimdall\n pre_rebin : int \n rebin in time by this factor *before* dedispersion (saves time)\n nfreq_plot : int \n number of frequency channels in output\n heim_raw_tres : 32 \n \"\"\"\n header = read_fil_data_dsa(fnfil, 0, 1)[-1]\n # read in 4 seconds of data\n nsamp = int(4.0/header['tsamp'])\n data, freq, delta_t_raw, header = read_fil_data_dsa(fnfil, start=0, \n stop=nsamp)\n\n nfreq0, ntime0 = data.shape\n\n if pre_rebin>1:\n # Ensure that you do not pre-downsample by more than the total boxcar\n pre_rebin = min(pre_rebin, ibox*heim_raw_tres)\n data = data.downsample(pre_rebin)\n\n datadm0 = data.copy()\n \n if rficlean:\n# print(\"Cleaning data perchannel\")\n data = cleandata(data, clean_type='aladsa')\n\n tsdm0 = np.mean(data,axis=0)\n\n datadm, dms = dm_transform(data, dm_max=dm+250,\n dm_min=dm-250, dm0=dm, ndm=ndm, \n freq_ref=None, \n downsample=heim_raw_tres*ibox//pre_rebin)\n data = data.dedisperse(dm)\n data = data.downsample(heim_raw_tres*ibox//pre_rebin)\n data = data.reshape(nfreq_plot, data.shape[0]//nfreq_plot, \n data.shape[1]).mean(1)\n\n data = data-np.median(data,axis=1,keepdims=True)\n data /= np.std(data)\n\n return data, datadm, tsdm0, dms, datadm0\n\n\ndef medflagdata(spec, filtsize, thres):\n specfilt = scipy.signal.medfilt(spec,kernel_size=int(filtsize));\n speccorrec = spec - specfilt;\n specstd = stats.median_absolute_deviation(speccorrec);\n return np.concatenate((np.argwhere(speccorrec > thres*specstd),np.argwhere(speccorrec < -thres*specstd)))\n\ndef cleandata(data, threshold_time=3.25, threshold_frequency=2.75, bin_size=32,\n n_iter_time=3, n_iter_frequency=3, clean_type='time', wideclean=None):\n \"\"\" Take filterbank object and mask\n RFI time samples with average spectrum.\n\n Parameters:\n ----------\n data :\n data array (nfreq, ntime)\n threshold_time : float\n units of sigma\n threshold_frequency : float\n units of sigma\n bin_size : int\n quantization bin size\n n_iter_time : int\n Number of iteration for time cleaning\n n_iter_frequency : int\n Number of iteration for frequency cleaning\n clean_type : str\n type of cleaning to be done.\n Accepted values: 'time', 'frequency', 'both', 'perchannel'\n\n Returns:\n -------\n cleaned filterbank object\n \"\"\"\n if clean_type not in ['time', 'both', 'frequency', 'perchannel', 'aladsa']:\n return data\n \n nfreq = data.shape[0]\n ntimes = data.shape[1]\n\n dtmean = np.mean(data, axis=-1)\n # Clean in time\n #sys_temperature_bandpass(data.data)\n #remove_noisy_freq(data.data, 3)\n #remove_noisy_channels(data.data, sigma_threshold=2, iters=5)\n if clean_type in ['time', 'both']:\n for i in range(n_iter_time):\n dfmean = np.mean(data, axis=0)\n stdevf = np.std(dfmean)\n medf = np.median(dfmean)\n maskf = np.where(np.abs(dfmean - medf) > threshold_time*stdevf)[0]\n # replace with mean spectrum\n data[:, maskf] = dtmean[:, None]*np.ones(len(maskf))[None]\n \n if clean_type=='aladsa':\n print('flagging a la DSA\\n');\n meanidx = medflagdata(dtmean, 21, 5.);\n varidx = medflagdata(np.var(data,axis=-1), 21, 5.);\n allidx = np.concatenate((meanidx,varidx));\n allidx = np.asarray(list(set(list(np.ravel(allidx)))));\n data[allidx,:] = np.zeros((len(allidx),ntimes));\n \n\n if clean_type=='perchannel':\n for ii in range(n_iter_time):\n dtmean = np.mean(data, axis=1, keepdims=True)\n dtsig = np.std(data, axis=1)\n for nu in range(data.shape[0]):\n d = dtmean[nu]\n sig = dtsig[nu]\n maskpc = np.where(np.abs(data[nu]-d)>threshold_time*sig)[0]\n data[nu][maskpc] = d\n\n # Clean in frequency\n # remove bandpass by averaging over bin_size ajdacent channels\n if clean_type in ['frequency', 'both']:\n for ii in range(n_iter_frequency):\n dtmean_nobandpass = data.mean(1) - dtmean.reshape(-1, bin_size).mean(-1).repeat(bin_size)\n stdevt = np.std(dtmean_nobandpass)\n medt = np.median(dtmean_nobandpass)\n maskt = np.abs(dtmean_nobandpass - medt) > threshold_frequency*stdevt\n data[maskt] = np.median(dtmean)#dtmean.reshape(-1, bin_size).mean(-1).repeat(bin_size)[maskt]\n\n return data\n\ndef generate_beam_time_arr(fl, ibeam=0, pre_rebin=1, \n dm=0, ibox=1, heim_raw_tres=1):\n \"\"\" Take list of nbeam .fil files, dedisperse each \n to the dm of the main trigger, and generate an \n (nbeam, ntime) SNR array.\n\n Parameters:\n -----------\n fl : list \n list of .fil files, each 4 seconds long\n ibeam : int \n beam number of trigger\n pre_rebin : \n downsample by this factor before dedispersion to save time\n dm : int \n dm of ibeam candidate\n ibox : int \n boxcar width of ibeam candidate \n heim_raw_tres : int \n ratio of \n\n Returns:\n --------\n beam_time_arr : ndarray \n array of SNR values (nbeam, ntime)\n \"\"\"\n fl.sort()\n nbeam = len(fl[:])\n header = read_fil_data_dsa(fl[0], 0, 1)[-1]\n # read in 4 seconds of data\n nsamp = int(4.0/header['tsamp'])\n nsamp_final = nsamp // (heim_raw_tres*ibox)\n nfreq_final = 1024\n \n# beam_time_arr = np.zeros([nbeam, nsamp_final])\n beam_time_arr = np.zeros([nbeam, nfreq_final, nsamp_final]) \n multibeam_dm0ts = 0\n beamno_arr=[]\n \n for jj,fnfil in enumerate(fl):\n print(fnfil, beam_time_arr.shape)\n beamno = int(fnfil.strip('.fil').split('_')[-1])\n data, freq, delta_t_raw, header = read_fil_data_dsa(fnfil, start=0, \n stop=nsamp)\n nfreq0, ntime0 = data.shape\n\n # Ensure that you do not pre-downsample by more than the total boxcar\n pre_rebin = min(pre_rebin, ibox*heim_raw_tres)\n\n multibeam_dm0ts += data.mean(0) \n # Rebin in frequency by 8x\n data = data.downsample(pre_rebin)\n data = data.dedisperse(dm)\n data = data.downsample(heim_raw_tres*ibox//pre_rebin)\n datats = np.mean(data, axis=0)\n\n # Low resolution nbeam, nfreq, ntime array\n data_ftb = data.reshape(nfreq_final, data.shape[0]//nfreq_final, data.shape[1]).mean(1)\n # Normalize data excluding outliers\n datatscopy = datats.copy()\n datatscopy.sort()\n medts = np.median(datatscopy[:int(0.975*len(datatscopy))])\n sigts = np.std(datatscopy[:int(0.975*len(datatscopy))])\n datats -= medts \n datats /= sigts\n beamno_arr.append(beamno)\n\n# beam_time_arr[beamno, :] = datats\n beam_time_arr[jj, :] = data_ftb \n\n return beam_time_arr, multibeam_dm0ts, beamno_arr\n\n\ndef plot_fil(fn, dm, ibox, multibeam=None, figname_out=None,\n ndm=32, suptitle='', heimsnr=-1,\n ibeam=-1, rficlean=True, nfreq_plot=32, \n classify=False, heim_raw_tres=1, \n showplot=True, save_data=False):\n \"\"\" Vizualize FRB candidates on DSA-110\n \"\"\"\n# if type(multibeam)==list:\n# beam_time_arr, multibeam_dm0ts = generate_beam_time_arr(multibeam, ibeam=ibeam, pre_rebin=1, \n# dm=dm, ibox=ibox, \n# heim_raw_tres=heim_raw_tres)\n# \n# x,y = np.where(beam_time_arr==beam_time_arr.max())\n# ibeam = x[0]\n# fn = flist[ibeam]\n# for fn_ in flist:\n# print(fn_, fn_.strip('_')[-1])\n# if str(ibeam) in fn_.strip('_')[-1]:\n# print(ibeam,'here')\n# else:\n# beam_time_arr = None\n# multibeam_dm0ts = None\n\n if type(multibeam)==list:\n data_beam_freq_time = []\n beam_time_arr_results = Parallel(n_jobs=ncpu)(delayed(generate_beam_time_arr)(multibeam[8*ii:8*(ii+1)],\n ibox=ibox, pre_rebin=1,\n dm=dm, heim_raw_tres=heim_raw_tres)\n for ii in range(32))\n# for datacube in beam_time_arr_results:\n beamno_arr=[]\n for ii in range(len(beam_time_arr_results)):\n beamno_arr.append(beam_time_arr_results[ii][2])\n data_beam_freq_time.append(beam_time_arr_results[ii][0])\n data_beam_freq_time = np.concatenate(data_beam_freq_time, axis=0)\n print(data_beam_freq_time.shape)\n beam_time_arr = data_beam_freq_time.mean(1)\n multibeam_dm0ts = beam_time_arr.mean(0)\n else:\n beam_time_arr = None\n multibeam_dm0ts = None \n \n \n dataft, datadm, tsdm0, dms, datadm0 = proc_cand_fil(fn, dm, ibox, snrheim=-1, \n pre_rebin=1, nfreq_plot=nfreq_plot,\n ndm=ndm, rficlean=rficlean,\n heim_raw_tres=heim_raw_tres)\n \n if classify:\n from keras.models import load_model\n fnmodel=MLMODELPATH\n model = load_model(fnmodel)\n mm = np.argmax(dataft.mean(0))\n tlow, thigh = mm-32, mm+32\n if mm<32:\n tlow=0\n thigh=64\n if thigh>dataft.shape[1]:\n thigh=dataft.shape[1]\n tlow=thigh-64\n dataml = dataft[:,tlow:thigh]\n dataml -= np.median(dataml, axis=1, keepdims=True)\n dataml /= np.std(dataml, axis=-1)[:, None]\n dataml[dataml!=dataml] = 0.0\n dataml = dataml[None,..., None]\n prob = model.predict(dataml)[0,1]\n else:\n prob = -1\n \n if save_data:\n fnout = (fn.split('/')[-1]).strip('.fil') + '.hdf5'\n fnout = '/home/ubuntu/connor/software/misc/data/MLtraining/' + fnout\n \n paramsdict = {'dm' : dm, 'ibox' : ibox, 'ibeam' : ibeam,\n 'snr' : heimsnr}\n \n g = h5py.File(fnout,'w')\n g.create_dataset('data_freq_time',data=dataft)\n g.create_dataset('data_dm_time',data=datadm)\n if beam_time_arr is None:\n g.create_dataset('data_beam_time',data=[])\n else:\n g.create_dataset('data_beam_time',data=beam_time_arr)\n g.create_dataset('params',data=str(paramsdict))\n g.close()\n \n \n not_real = plotfour(dataft, dataft.mean(0), datadm, datadm0=datadm0, \n beam_time_arr=beam_time_arr, figname_out=figname_out, dm=dm,\n dms=[dms[0],dms[-1]], \n suptitle=suptitle, heimsnr=heimsnr,\n ibox=ibox, ibeam=ibeam, prob=prob, showplot=showplot, multibeam_dm0ts=multibeam_dm0ts)\n\n return not_real\n \ndef read_json(jsonfile):\n with open(jsonfile) as f:\n triggerdata = json.load(f)\n\n timehr = float(triggerdata.get(list(triggerdata.keys())[0]).get('mjds'))\n snr = float(triggerdata.get(list(triggerdata.keys())[0]).get('snr'))\n dm = float(triggerdata.get(list(triggerdata.keys())[0]).get('dm'))\n ibeam = int(triggerdata.get(list(triggerdata.keys())[0]).get('ibeam'))\n ibox = int(triggerdata.get(list(triggerdata.keys())[0]).get('ibox'))\n\n return timehr,snr,dm,ibeam,ibox\n\n\nif __name__=='__main__':\n\n parser = optparse.OptionParser(prog=\"filplotter\",\n version=\"\",\n usage=\"%prog fname datestr specnum [OPTIONS]\",\n description=\"Visualize and classify filterbank data\")\n\n parser.add_option('-s', '--slack', dest='slack', action=\"store_true\",help=\"send figure to slack\")\n parser.add_option('-d', '--dm', dest='dm',\n help=\"DM \", default=None)\n parser.add_option('-c', '--classify', dest='classify', action=\"store_true\",\n help=\"classify using ML\") \n parser.add_option('-r', '--rficlean', dest='rficlean', action=\"store_true\",\n help=\"excise RFI from data\")\n parser.add_option('-w', '--ibox', dest='ibox', type=int,\n help=\"ibox found by Heimdall\", default=1)\n parser.add_option('--ndm', dest='ndm', type=int, default=32, \n help=\"number of DMs for DM/time plot\")\n parser.add_option('--ntime_plot', dest='ntime_plot', type=int, default=64, \n help=\"number of samples to plot\")\n parser.add_option('--nfreq_plot', dest='nfreq_plot', type=int, default=32, \n help=\"number of freq channels to plot\")\n parser.add_option('--save_data', dest='save_data', action='store_true',\n help=\"save data to hdf5 for ML classifier training\")\n\n options, args = parser.parse_args()\n datestr = args[0]\n specnum = args[1]\n\n flist = glob.glob(BASEDIR+'/T1/corr*/'+datestr+'/fil_%s/*.fil' % specnum)\n flist.sort()\n jsonfile = glob.glob(BASEDIR+'/T3/corr01/'+datestr+'/*%s*.json' % specnum)[0]\n \n timehr,snr,dm,ibeam,ibox = read_json(jsonfile)\n ibeam += 1 # hack until ibeam is figured out\n# timehr,snr,dm,ibeam,ibox=0,10,26.8,115,8\n# print('Read (FAKE) JSON file') #hack\n\n beamindlist = []\n for fnfil in flist:\n beamno = int(fnfil.strip('.fil').split('_')[-1])\n beamindlist.append(beamno)\n if beamno==ibeam:\n fname = fnfil\n flist_=[]\n\n # reorder the filename list in beam number\n for ii in range(len(flist)):\n flist_.append(flist[np.where(np.array(beamindlist)==ii)[0][0]])\n flist = flist_\n\n if options.slack:\n showplot=False\n else:\n showplot=True\n\n outstr = (specnum, dm, int(ibox), datestr, int(ibeam), timehr)\n suptitle = 'specnum:%s DM:%0.2f boxcar:%d \\n%s ibeam:%d MJD:%f' % outstr\n\n fnameout = fname.replace('.fil','.png')\n figdirout = webPLOTDIR+datestr+'/candidates/'\n figdirout = './plots/'\n os.system('mkdir -p %s' % figdirout)\n fnameout = figdirout+specnum+'.png'\n \n not_real = plot_fil(fname, dm, ibox, figname_out=fnameout,\n ndm=options.ndm, suptitle=suptitle, heimsnr=snr,\n ibeam=ibeam, rficlean=options.rficlean, \n nfreq_plot=options.nfreq_plot, \n classify=options.classify, showplot=showplot, \n multibeam=flist,\n heim_raw_tres=1, save_data=options.save_data)\n print(not_real)\n if options.slack and not_real==False:\n print(\"Sending to slack\")\n slack_file = '{0}/.config/slack_api'.format(\n os.path.expanduser(\"~\")\n )\n if not os.path.exists(slack_file):\n raise RuntimeError(\n \"Could not find file with slack api token at {0}\".format(\n slack_file\n )\n )\n with open(slack_file) as sf_handler:\n slack_token = sf_handler.read()\n client = slack.WebClient(token=slack_token);\n client.files_upload(channels='candidates',file=fnameout,initial_comment=fnameout);\n", "id": "1578270", "language": "Python", "matching_score": 1.5890148878097534, "max_stars_count": 0, "path": "services/filplot.py" }, { "content": "import argparse\nfrom dsaT3 import labels\n\ndef local_run(args):\n\n if args.candidate is not None:\n if args.label is not None:\n try:\n labels.set_label(args.candidate, args.label)\n except:\n print('Could not set label: ', args.candidate, args.label)\n\n if args.notes is not None:\n try:\n labels.set_notes(args.candidate, args.notes)\n except:\n print('Could not set notes: ', args.candidate, args.notes)\n\n if args.label=='archive':\n if not args.search:\n print('Searching for voltage files, because you are archiving')\n labels.check_voltages(args.candidate)\n\n if args.search and args.candidate is not None:\n labels.check_voltages(args.candidate)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--candidate', type=str, default=None, help='Candidate name', required=True)\n parser.add_argument('-l', '--label', type=str, default=None, help='Label ('+str(labels._allowed)+')')\n parser.add_argument('-n', '--notes', type=str, default=None, help='Notes (free form)')\n parser.add_argument('-s', '--search', action='store_true', help='Search for voltage files')\n the_args = parser.parse_args()\n local_run(the_args)\n", "id": "3529173", "language": "Python", "matching_score": 0.40192940831184387, "max_stars_count": 0, "path": "services/label_candidate.py" }, { "content": "\"\"\"\nA script to convert voltage files to measurement sets.\n\"\"\"\nimport json\nimport re\nimport os\nimport glob\nimport subprocess\nfrom multiprocessing import Process, Manager\nimport multiprocessing\nimport queue\nimport argparse\nimport time\nimport yaml\nfrom pkg_resources import resource_filename\nfrom astropy.time import Time\nimport astropy.units as u\nfrom dsaT3.utils import get_declination_mjd, rsync_file\nfrom dsaT3.T3imaging import generate_T3_uvh5\nfrom dsacalib.ms_io import uvh5_to_ms\nfrom dsautils import cnf\n\nNPROC = 8\nPARAMFILE = resource_filename('dsaT3', 'data/T3_parameters.yaml')\nwith open(PARAMFILE) as YAMLF:\n T3PARAMS = yaml.load(YAMLF, Loader=yaml.FullLoader)['T3corr']\nCONF = cnf.Conf()\nCORR_LIST = list(CONF.get('corr')['ch0'].keys())\n\ndef rsync_handler(\n rsync_queue,\n corr_queue,\n rsync,\n):\n rsync_done = False\n while not rsync_done:\n try:\n item = rsync_queue.get()\n except queue.Empty:\n time.sleep(10)\n else:\n if item == 'END':\n rsync_done = True\n continue\n srcfile, vfile = item\n if not os.path.exists(vfile):\n if rsync:\n rsync_file(\n srcfile,\n vfile\n )\n else:\n os.symlink(srcfile, vfile)\n corr_queue.put(vfile)\n\ndef corr_handler(\n deltat_ms,\n deltaf_MHz,\n corr_queue,\n uvh5_queue,\n ncorrfiles,\n ncorrfiles_lock\n):\n \"\"\"Correlates data using T3 cpu correlator.\n\n Parameters\n ----------\n deltat_ms : float\n The desired integration time in the correlated data, in ms.\n deltaf_MHz : float\n The desired integration frequency in the correlated data, in MHz.\n \"\"\"\n corr_done = False\n while not corr_done:\n if ncorrfiles.value > NPROC-1:\n time.sleep(10)\n continue\n try:\n vfile = corr_queue.get()\n except queue.Empty:\n time.sleep(10)\n else:\n if vfile == 'END':\n corr_done = True\n continue\n with ncorrfiles_lock:\n ncorrfiles.value += 1\n if not os.path.exists('{0}.corr'.format(vfile)):\n command = (\n '/home/ubuntu/proj/dsa110-shell/dsa110-bbproc/dsacorr '\n '-d {0} -o {0}.corr -t {1} -f {2} -a {3}'.format(\n vfile,\n deltat_ms,\n deltaf_MHz, \n len(T3PARAMS['antennas'])\n )\n )\n print(command)\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True\n )\n proc_stdout = str(process.communicate()[0].strip())\n print(proc_stdout)\n corr_files = dict({})\n corr = re.findall('corr\\d\\d', vfile)[0]\n corr_files[corr] = '{0}.corr'.format(vfile)\n uvh5_queue.put(corr_files)\n\ndef uvh5_handler(\n candname,\n declination,\n tstart,\n ntint,\n nfint,\n start_offset,\n end_offset,\n uvh5_queue,\n ncorrfiles,\n ncorrfiles_lock\n):\n proc = multiprocessing.current_process()\n uvh5_done = False\n while not uvh5_done:\n try:\n corr_files = uvh5_queue.get()\n except queue.Empty:\n time.sleep(10)\n else:\n if corr_files == 'END':\n print('proc {0} is setting uvh5_done'.format(proc.pid))\n uvh5_done = True\n continue\n uvh5name = generate_T3_uvh5(\n '{0}/{1}'.format(T3PARAMS['corrdir'], candname),\n declination,\n tstart,\n ntint=ntint,\n nfint=nfint,\n filelist=corr_files,\n start_offset=start_offset,\n end_offset=end_offset\n )\n print(uvh5name)\n #for value in corr_files.values():\n # os.remove(value)\n with ncorrfiles_lock:\n ncorrfiles.value -= 1\n print('{0} exiting'.format(proc.pid))\n\ndef __main__(candname, datestring, ntint, nfint, start_offset, end_offset):\n \"\"\"\n Correlate voltage files and convert to a measurement set.\n\n Parameters\n ----------\n candname : str\n The unique name of the candidate.\n datestring : str\n The datestring the observation is archived under. Use 'current' if the\n data is from the current, unarchived observing run.\n filelist : list\n The full paths to the voltage files on dsa-storage.\n ntint : int\n The number of time samples to integrate together during correlation.\n nfint : int\n The number of frequency channels to integrate together after removing\n outrigger delays.\n start_offset : int\n The number of time samples (after correlation) to offset the start of\n the measurement set by. If not provided, the entire time is converted\n to a measurement set.\n end_offset : int\n The last time sample (after correlation) to write to the measurement\n set. If not provide,d the entire time is converted to a measurement\n set.\n \"\"\"\n if start_offset < 0:\n start_offset = None\n if end_offset < 0:\n end_offset = None\n if datestring == 'current':\n rsync = True\n filenames = [\n '{0}.sas.pvt:/home/ubuntu/data/{1}_data.out'.format(\n corr,\n candname\n ) for corr in CORR_LIST\n ]\n headername = '{0}/{1}.json'.format(T3PARAMS['T3dir'], candname)\n else:\n rsync = False\n filenames = [\n '{0}/{1}/{2}_{3}_data.out'.format(\n T3PARAMS['archivedir'],\n datestring,\n corr,\n candname\n ) for corr in CORR_LIST\n ]\n headername = '{0}/{1}/{2}.json'.format(\n T3PARAMS['archivedir'],\n datestring,\n candname\n )\n outnames = [\n '{0}/{1}_{2}_data.out'.format(\n T3PARAMS['corrdir'],\n corr,\n candname\n ) for corr in CORR_LIST\n ]\n # Get metadata\n with open(headername) as jsonf:\n metadata = json.load(jsonf)\n tstart = Time(metadata['mjds'], format='mjd')\n try:\n declination = get_declination_mjd(tstart)\n except ConnectionError:\n declination = 54.58209895*u.deg\n deltat_ms = ntint*T3PARAMS['deltat_s']*1e3\n deltaf_MHz = T3PARAMS['deltaf_MHz']\n\n manager = Manager()\n ncorrfiles = manager.Value('i', 0)\n ncorrfiles_lock = manager.Lock()\n rsync_queue = manager.Queue()\n corr_queue = manager.Queue()\n uvh5_queue = manager.Queue()\n # Copy files\n for i, filename in enumerate(filenames):\n rsync_queue.put([filename, outnames[i]])\n rsync_queue.put('END')\n processes = []\n processes += [Process(\n target=rsync_handler,\n args=(\n rsync_queue,\n corr_queue,\n rsync\n ),\n daemon=True\n )]\n for i in range(NPROC):\n processes += [Process(\n target=corr_handler,\n args=(\n deltat_ms,\n deltaf_MHz,\n corr_queue,\n uvh5_queue,\n ncorrfiles,\n ncorrfiles_lock\n ),\n daemon=True\n )]\n for i in range(NPROC):\n processes += [Process(\n target=uvh5_handler,\n args=(\n candname,\n declination,\n tstart,\n ntint,\n nfint,\n start_offset,\n end_offset,\n uvh5_queue,\n ncorrfiles,\n ncorrfiles_lock\n ),\n daemon=True\n )]\n for proc in processes:\n proc.start()\n processes[0].join()\n for i in range(NPROC):\n corr_queue.put('END')\n for proc in processes[1:NPROC+1]:\n proc.join()\n print('All corr processes done')\n # We get here\n for i in range(NPROC):\n uvh5_queue.put('END')\n for proc in processes[1+NPROC:]:\n proc.join()\n print('A uvh5 process joined.')\n print('All uvh5 processes done')\n hdf5files = sorted(glob.glob('{0}/{1}_corr??.hdf5'.format(\n T3PARAMS['corrdir'],\n candname\n )))\n uvh5_to_ms(\n hdf5files,\n '{0}/{1}'.format(T3PARAMS['msdir'], candname)\n )\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Correlate candidate voltage files.'\n )\n parser.add_argument(\n 'candname',\n type=str,\n help='unique candidate name'\n )\n parser.add_argument(\n '--datestring',\n type=str,\n help='datestring of archived candidate',\n nargs='?',\n default='current'\n )\n parser.add_argument(\n '--ntint',\n type=int,\n nargs='?',\n default=8,\n help='number of native time bins to integrate during correlation'\n )\n parser.add_argument(\n '--nfint',\n type=int,\n nargs='?',\n default=8,\n help='number of native freq bins to integrate during correlation'\n )\n parser.add_argument(\n '--startoffset',\n type=int,\n nargs='?',\n default=1716,\n help='nbins from beginning of correlated data to start writing to ms'\n )\n parser.add_argument(\n '--stopoffset',\n type=int,\n nargs='?',\n default=2484,\n help='number of bins from end of correlation to write to ms'\n )\n args = parser.parse_args()\n __main__(args.candname, args.datestring, ntint=args.ntint, nfint=args.nfint,\n start_offset=args.startoffset, end_offset=args.stopoffset)\n", "id": "10468929", "language": "Python", "matching_score": 3.1766884326934814, "max_stars_count": 0, "path": "dsaT3/voltages_to_ms.py" }, { "content": "\"\"\"Creating and manipulating measurement sets from T3 visibilities.\n\nAuthor: <NAME>, <EMAIL>\n\"\"\"\nimport yaml\nimport h5py\nimport numpy as np\nfrom pkg_resources import resource_filename\nimport matplotlib.pyplot as plt\nimport astropy.units as u\nfrom astropy.coordinates import Angle\nfrom antpos.utils import get_itrf\nfrom pyuvdata import UVData\nimport casatools as cc\nfrom casacore.tables import table\nfrom dsautils import cnf\nfrom dsamfs.io import initialize_uvh5_file, update_uvh5_file\nfrom dsacalib.ms_io import extract_vis_from_ms\nfrom dsacalib.fringestopping import calc_uvw\nimport dsacalib.constants as ct\nfrom dsacalib.preprocess import remove_outrigger_delays\n\nPARAMFILE = resource_filename('dsaT3', 'data/T3_parameters.yaml')\nwith open(PARAMFILE) as YAMLF:\n T3PARAMS = yaml.load(YAMLF, Loader=yaml.FullLoader)['T3corr']\n\nMYCONF = cnf.Conf()\nCORRPARAMS = MYCONF.get('corr')\nMFSPARAMS = MYCONF.get('fringe')\nCORR_ORDER = np.arange(1, 17)\nANTENNA_ORDER = [\n 24,\n 25,\n 26,\n 27,\n 28,\n 29,\n 30,\n 31,\n 32,\n 33,\n 34,\n 35,\n 20,\n 19,\n 18,\n 17,\n 16,\n 15,\n 14,\n 13,\n 100,\n 101,\n 102,\n 116,\n 103,\n 12,\n 11,\n 10,\n 9,\n 8,\n 7,\n 6,\n 5,\n 4,\n 3,\n 2,\n 1,\n 104,\n 105,\n 106,\n 107,\n 108,\n 109,\n 110,\n 111,\n 112,\n 113,\n 114,\n 115,\n 117,\n 36,\n 37,\n 38,\n 39,\n 40,\n 41,\n 42,\n 43,\n 44,\n 45,\n 46,\n 47,\n 48,\n 49,\n]\n\ndef get_mjd(armed_mjd, utc_start, specnum):\n \"\"\"Get the start mjd of a voltage dump.\n\n Parameters\n ----------\n armed_mjd : float\n The time at which the snaps were armed, in mjd.\n utc_start : int\n The spectrum number at which the correlator was started.\n specnum : int\n The spectrum number of the first spectrum in the voltage dump,\n referenced to when the correlator was started.\n\n Returns\n -------\n tstart : float\n The start time of the voltage dump in mjd.\n \"\"\"\n tstart = (armed_mjd+utc_start*4*8.192e-6/86400+\n (1/(250e6/8192/2)*specnum/ct.SECONDS_PER_DAY))\n return tstart\n\ndef get_blen(antennas):\n \"\"\"Gets the baseline lengths for a subset of antennas.\n\n Parameters\n ----------\n antennas : list\n The antennas used in the array.\n\n Returns\n -------\n blen : array\n The ITRF coordinates of all of the baselines.\n bname : list\n The names of all of the baselines.\n \"\"\"\n ant_itrf = get_itrf(\n latlon_center=(ct.OVRO_LAT*u.rad, ct.OVRO_LON*u.rad, ct.OVRO_ALT*u.m)\n ).loc[antennas]\n xx = np.array(ant_itrf['dx_m'])\n yy = np.array(ant_itrf['dy_m'])\n zz = np.array(ant_itrf['dz_m'])\n # Get uvw coordinates\n nants = len(antennas)\n nbls = (nants*(nants+1))//2\n blen = np.zeros((nbls, 3))\n bname = []\n k = 0\n for i in range(nants):\n for j in range(i, nants):\n blen[k, :] = np.array([\n xx[i]-xx[j],\n yy[i]-yy[j],\n zz[i]-zz[j]\n ])\n bname += ['{0}-{1}'.format(\n antennas[i],\n antennas[j]\n )]\n k += 1\n return blen, bname\n\ndef generate_T3_uvh5(name, pt_dec, tstart, ntint, nfint, filelist, params=T3PARAMS, start_offset=None, end_offset=None):\n \"\"\"Generates a measurement set from the T3 correlations.\n\n Parameters\n ----------\n name : str\n The name of the measurement set.\n pt_dec : quantity\n The pointing declination in degrees or equivalient.\n tstart : astropy.time.Time instance\n The start time of the correlated data.\n ntint : float\n The number of time bins that have been binned together (compared to the\n native correlator resolution).\n nfint : float\n The number of frequency bins to bin together before writing the ms\n (compared to the native resolution).\n filelist : dictionary\n The correlator data files for each node.\n params : dictionary\n T3 parameters.\n start_offset : int\n The timesample to start at. If given, end_offset must also be given.\n Defaults to transform the whole file to a ms.\n end_offset : int\n The timesample to end at.\n\n Returns\n -------\n str\n The name of the measurement set created.\n \"\"\"\n antenna_order = params['antennas']\n fobs = params['f0_GHz']+params['deltaf_MHz']*1e-3*(\n np.arange(params['nchan'])+0.5)\n nant = len(antenna_order)\n nbls = (nant*(nant+1))//2\n tsamp = params['deltat_s']*ntint*u.s\n tobs = tstart + (np.arange(params['nsubint']//ntint)+0.5)*tsamp\n if start_offset is None:\n start_offset = 0\n if end_offset is None:\n end_offset = len(tobs)\n #assert start_offset is not None\n #assert end_offset is not None\n tobs = tobs[start_offset:end_offset]\n blen, bname = get_blen(params['antennas'])\n itemspframe = nbls*params['nchan_corr']*params['npol']*2\n framespblock = 16\n itemspblock = itemspframe*framespblock\n assert (end_offset - start_offset)%framespblock == 0\n nblocks = (end_offset-start_offset)//framespblock\n # Get outrigger delays\n delays = np.zeros(len(bname), dtype=np.int)\n for i, bn in enumerate(bname):\n ant1, ant2 = bn.split('-')\n delays[i] = MFSPARAMS['outrigger_delays'].get(int(ant1), 0)-\\\n MFSPARAMS['outrigger_delays'].get(int(ant2), 0)\n for corr, corrfile in filelist.items(): # corr, ch0 in params['ch0'].items():\n ch0 = params['ch0'][corr]\n fobs_corr_full = fobs[ch0:(ch0+params['nchan_corr'])]\n fobs_corr = np.median(fobs_corr_full.reshape(-1, nfint), axis=-1)\n outname = '{1}_{0}.hdf5'.format(corr, name)\n vis_model = np.exp(2j*np.pi*fobs_corr_full[:, np.newaxis]*\n delays[np.newaxis, :, np.newaxis, np.newaxis])\n vis_model = vis_model.astype(np.complex64)\n with h5py.File(outname, 'w') as fhdf5:\n initialize_uvh5_file(\n fhdf5,\n len(fobs_corr),\n 2,\n pt_dec.to_value(u.rad),\n antenna_order,\n fobs_corr,\n #outrigger_delays\n )\n with open(corrfile, 'rb') as cfhandler:\n if start_offset is not None:\n cfhandler.seek(start_offset*32*itemspframe)\n for i in range(nblocks):\n data = np.fromfile(\n cfhandler,\n dtype=np.float32,\n count=itemspblock\n )\n data = data.reshape(-1, 2)\n data = data[..., 0] + 1.j*data[..., 1]\n data = data.reshape(framespblock, nbls, len(fobs_corr_full), params['npol'])[..., [0, -1]]\n data /= vis_model\n if nfint > 1:\n data = data.reshape(framespblock, nbls, len(fobs_corr), nfint, 2).mean(axis=3)\n bu, bv, bw = calc_uvw(\n blen,\n tobs.mjd[i*framespblock:(i+1)*framespblock],\n 'HADEC',\n np.zeros(framespblock)*u.rad,\n np.ones(framespblock)*pt_dec\n )\n buvw = np.array([bu, bv, bw]).T\n update_uvh5_file(\n fhdf5,\n data.astype(np.complex64),\n tobs.jd[i*framespblock:(i+1)*framespblock],\n tsamp,\n bname,\n buvw,\n np.ones(data.shape, np.float32)\n )\n return outname\n\ndef plot_image(imname, verbose=False, outname=None, show=True,\n expected_point=None):\n \"\"\"Plots an image from the casa-generated image file.\n\n Paramters\n ---------\n imname : str\n The name full path of the image file.\n verbose : bool\n If set to True, prints some information about the image.\n outname : str\n If provided, saves the image in <outname>_image.png.\n show : bool\n If False, the image is closed at the end of the function.\n cellsize : str\n The size of each pixel, in a Casa-recognized angle.\n \"\"\"\n error = 0\n ia = cc.image()\n error += not ia.open(imname)\n dd = ia.summary()\n # dd has shape npixx, npixy, nch, npol\n npixx = dd['shape'][0]\n if verbose:\n print('Image shape: {0}'.format(dd['shape']))\n imvals = ia.getchunk(0, int(npixx))[:, :, 0, 0]\n #imvals = fftshift(imvals)\n error += ia.done()\n max_idxs = np.unravel_index(imvals.argmax(), imvals.shape)\n cellsizex = Angle(dd['incr'][0], dd['axisunits'][0])\n cellsizey = Angle(dd['incr'][1], dd['axisunits'][1])\n ra, dec = (\n Angle('{0}{1}'.format(dd['refval'][0], dd['axisunits'][0])),\n Angle('{0}{1}'.format(dd['refval'][1], dd['axisunits'][1]))\n )\n brightest_point = (\n ra +\n Angle('{0}{1}'.format(\n dd['incr'][0]*(max_idxs[0]-dd['refpix'][0]),\n dd['axisunits'][0]\n ))/np.cos(dec),\n dec +\n Angle('{0}{1}'.format(\n dd['incr'][1]*(max_idxs[1]-dd['refpix'][1]),\n dd['axisunits'][1]\n ))\n )\n if verbose:\n print('Peak SNR at pix ({0},{1}) = {2}'.format(max_idxs[0],\n max_idxs[1],\n imvals.max()/\n imvals.std()))\n print('Value at peak: {0}'.format(imvals.max()))\n print('Value at origin: {0}'.format(imvals[imvals.shape[0]//2,\n imvals.shape[1]//2]))\n\n _, ax = plt.subplots(1, 1, figsize=(15, 8))\n pim = ax.imshow(\n imvals.transpose(),\n interpolation='none',\n origin='lower',\n extent=[\n (-imvals.shape[0]/2*Angle(cellsizex)).to_value(u.arcmin),\n (imvals.shape[0]/2*Angle(cellsizex)).to_value(u.arcmin),\n (-imvals.shape[1]/2*Angle(cellsizey)).to_value(u.arcmin),\n (imvals.shape[1]/2*Angle(cellsizey)).to_value(u.arcmin)\n ]\n )\n plt.colorbar(pim)\n ax.axvline(0, color='white', alpha=0.5)\n ax.axhline(0, color='white', alpha=0.5)\n ax.set_xlabel('l (arcmin)')\n ax.set_ylabel('m (arcmin)')\n plttitle = '{0} {1:.2f} {2:.2f}'.format(\n imname,\n brightest_point[0],\n brightest_point[1]\n )\n if expected_point is not None:\n plttitle += ', offset by {0:.2f} {1:.2f}'.format(\n (brightest_point[0]-expected_point[0]).to(u.arcmin),\n (brightest_point[1]-expected_point[1]).to(u.arcmin)\n )\n plt.title(plttitle)\n if outname is not None:\n plt.savefig('{0}_image.png'.format(outname))\n if not show:\n plt.close()\n if error > 0:\n print('{0} errors occured during imaging'.format(error))\n return brightest_point\n\ndef read_bfweights(bfweights, bfdir):\n \"\"\"Reads the beamforming weights.\n\n Parameters\n ----------\n bfweights : str\n The label of the file containing the weights. Will open\n <bfdir>/beamformer_weights_<bfweights>.yaml\n bfdir : str\n The directory in which the beamformer weights are stored.\n\n Returns\n -------\n antenna_order : list\n The order of the antennas in the bfweights array.\n bfweights : ndarray\n The beamformer weights, (antenna, freqeuncy, polarization).\n Frequency is in the same order as in the correlator.\n \"\"\"\n with open('{0}/beamformer_weights_{1}.yaml'.format(\n bfdir,\n bfweights,\n )) as yamlf:\n bfparams = yaml.load(yamlf, Loader=yaml.FullLoader)\n if 'cal_solutions' in bfparams.keys():\n bfparams = bfparams['cal_solutions']\n antenna_order = bfparams.get('antenna_order', ANTENNA_ORDER)\n corr_order = bfparams.get('corr_order', CORR_ORDER)\n gains = np.zeros(\n (len(antenna_order), len(corr_order), 48, 2),\n dtype=np.complex\n )\n for corridx, corr in enumerate(corr_order):\n with open(\n '{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(\n bfdir,\n corr,\n bfweights\n ),\n 'rb'\n ) as f:\n data = np.fromfile(f, '<f4')\n temp = data[64:].reshape(64, 48, 2, 2)\n gains[:, corridx, :, :] = temp[..., 0]+1.0j*temp[..., 1]\n gains = gains.reshape(\n (len(antenna_order), len(corr_order)*48, 2)\n )\n return antenna_order, gains\n\ndef calibrate_T3ms(msname, bfweights, bfdir, dedisp_mask=None):\n \"\"\"Calibrates a measurement set using the beamformer weights.\n\n Calibrated data is written into the CORRECTED_DATA column.\n\n Parameters\n ----------\n msname : str\n The name of the measurement set.\n bfweights : str\n The label of the file containing the weights. Will open\n <bfdir>/beamformer_weights_<bfweights>.yaml\n bfdir : str\n The directory in which the beamformer weights are stored.\n dedisp_mask : str\n The path to a dedispersion mask to be applied.\n \"\"\"\n antenna_order, gains = read_bfweights(bfweights, bfdir)\n gains = gains[:, ::-1, :]\n\n data, _, fobs, flags, ant1, ant2, _, _, orig_shape = extract_vis_from_ms(\n msname,\n data='data'\n )\n print(data.shape)\n data = data.reshape(\n data.shape[0],\n data.shape[1],\n data.shape[2],\n gains.shape[1],\n -1,\n data.shape[-1]\n )\n assert np.all(np.diff(fobs) > 0)\n assert orig_shape == ['time', 'baseline', 'spw']\n for i in range(data.shape[0]):\n a1 = ant1[i]+1\n a2 = ant2[i]+1\n try:\n bl_gains = (\n np.conjugate(\n gains[antenna_order.index(a2), ...]\n )*gains[antenna_order.index(a1), ...]\n )\n bl_gains = np.exp(1.j*np.angle(bl_gains))\n data[i, ...] *= bl_gains[:, np.newaxis, :]\n except ValueError:\n flags[i, ...] = 1\n print('no calibration solutions for baseline {0}-{1}'.format(a1, a2))\n data = data.swapaxes(0, 1).reshape((-1, len(fobs), data.shape[-1]))\n flags = flags.swapaxes(0, 1).reshape((-1, len(fobs), flags.shape[-1]))\n # dedisp_flags = np.load(dedisp_mask)\n # check size \n # data[data!=data] = np.nanmean(data) this should be okay now\n with table('{0}.ms'.format(msname), readonly=False) as tb:\n tb.putcol('CORRECTED_DATA', data)\n tb.putcol('FLAG', flags)\n", "id": "382834", "language": "Python", "matching_score": 4.890398979187012, "max_stars_count": 0, "path": "dsaT3/T3imaging.py" }, { "content": "\"\"\"Tests for ms_io.py.\n\"\"\"\nimport datetime\nimport numpy as np\nfrom antpos.utils import get_itrf, get_baselines\nimport casatools as cc\nfrom casacore.tables import table\nfrom dsacalib import constants\nfrom dsacalib import utils\nfrom dsacalib.fringestopping import calc_uvw\nfrom dsacalib import ms_io as msio\nimport astropy.units as u\nfrom astropy.utils import iers\niers.conf.iers_auto_url_mirror = constants.IERS_TABLE\niers.conf.auto_max_age = None\nfrom astropy.time import Time\n\ndef test_simulate_ms(tmpdir):\n \"\"\"Test simulate_ms function.\n \"\"\"\n ntint = 32*32*4\n nfint = 8*8*4\n antennas = np.array([24, 25, 26])\n blen_df = get_baselines(antennas[::-1], autocorrs=True, casa_order=True)\n blen = np.array([blen_df['x_m'], blen_df['y_m'], blen_df['z_m']]).T\n ant_itrf = get_itrf().loc[antennas]\n xx = ant_itrf['dx_m']\n yy = ant_itrf['dy_m']\n zz = ant_itrf['dz_m']\n antenna_names = [str(a) for a in antennas]\n top_of_channel = 1.53*u.GHz + (-250*u.MHz/8192)*1024\n deltaf = -0.030517578125*u.MHz\n nchan = 6144//nfint\n fobs = top_of_channel + deltaf*nfint*(np.arange(nchan)+0.5)\n tstart = Time(datetime.datetime.utcnow())\n source = utils.src(\n name='3C273',\n ra=187.27916667*u.deg,\n dec=2.0625*u.deg\n )\n me = cc.measures()\n msio.simulate_ms(\n ofile='{0}/test.ms'.format(tmpdir),\n tname='OVRO_MMA',\n anum=antenna_names,\n xx=xx,\n yy=yy,\n zz=zz,\n diam=4.5,\n mount='alt-az',\n pos_obs=me.observatory('OVRO_MMA'),\n spwname='L_BAND',\n freq='{0}GHz'.format(fobs[0].to_value(u.GHz)),\n deltafreq='{0}MHz'.format((deltaf*nfint).to_value(u.MHz)),\n freqresolution='{0}MHz'.format(np.abs((deltaf*nfint).to_value(u.MHz))),\n nchannels=6144//nfint,\n integrationtime='{0}s'.format(0.000032768*ntint),\n obstm=tstart.mjd,\n dt=0.000429017462010961+7.275957614183426e-12-7.767375791445374e-07,\n source=source,\n stoptime='{0}s'.format(0.000032768*122880),\n autocorr=True,\n fullpol=True\n )\n with table('{0}/test.ms/POLARIZATION'.format(tmpdir)) as tb:\n # CORR_TYPE integers given by order in\n # https://casa.nrao.edu/active/docs/doxygen/html/classcasa_1_1Stokes.html\n assert np.all(tb.CORR_TYPE[:][0] == np.array([9, 10, 11, 12]))\n assert np.all(\n tb.CORR_PRODUCT[:][0] == np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n )\n assert tb.NUM_CORR[:][0] == 4\n with table('{0}/test.ms'.format(tmpdir)) as tb:\n ant1 = np.array(tb.ANTENNA1[:])\n ant2 = np.array(tb.ANTENNA2[:])\n tobs = np.array(tb.TIME[:])/constants.SECONDS_PER_DAY\n uvw = np.array(tb.UVW[:])\n with table('{0}/test.ms/SPECTRAL_WINDOW'.format(tmpdir)) as tb:\n fobs_out = np.array(tb.CHAN_FREQ[:])\n tobs = tobs.reshape(-1, 6)\n assert np.all(np.abs(tobs[0, :]-tobs[0, 0]) < 1e-15)\n assert np.abs(tobs[0, 0]-(tstart.mjd+0.000032768*ntint/2/constants.SECONDS_PER_DAY)) < 1e-10\n assert tobs.shape[0] == 122880//ntint\n assert fobs_out.shape == (1, 6144//nfint)\n assert np.all(np.abs(np.diff(fobs_out)-(-0.030517578125*nfint*1e6)) < 1e-15)\n assert np.all(np.abs(fobs_out[0, :]-fobs.to_value(u.Hz)) < 1e-15)\n bu, bv, bw = calc_uvw(\n blen,\n tobs.reshape(-1, 6)[:, 0],\n source.epoch,\n source.ra,\n source.dec\n )\n # Note that antpos gives A* B, casa gives A B*\n # Need to confirm which order we are doing\n assert np.all(np.abs(\n uvw[:, 0].reshape(-1, 6) - bu.T) < 1e-8)\n assert np.all(np.abs(\n uvw[:, 1].reshape(-1, 6) - bv.T) < 1e-8)\n assert np.all(np.abs(\n uvw[:, 2].reshape(-1, 6) - bw.T) < 1e-8)\n", "id": "10984765", "language": "Python", "matching_score": 3.0125889778137207, "max_stars_count": 1, "path": "tests/test_ms_io.py" }, { "content": "import pytest\n\nimport astropy.units as u\nimport numpy as np\nfrom dsacalib import constants\nfrom astropy.utils import iers\niers.conf.iers_auto_url_mirror = constants.IERS_TABLE\niers.conf.auto_max_age = None\nfrom astropy.time import Time\nfrom dsacalib import utils\n\ndef test_siderealtime():\n st = Time.now().sidereal_time('apparent', longitude=constants.OVRO_LON*u.rad).radian\n assert st > 0\n\ndef test_src():\n tol = 1e-8\n ss = utils.src('test', '0d0m0.00s', '3h0m0.00s')\n assert ss.name == 'test'\n assert np.abs(ss.ra.to_value(u.rad) - 0.) < tol\n assert np.abs(ss.dec.to_value(u.rad) - np.pi/4) < tol\n ss = utils.src('test', 0.*u.deg, 45.*u.deg)\n assert ss.name == 'test'\n assert np.abs(ss.ra.to_value(u.rad) - 0.) < tol\n assert np.abs(ss.dec.to_value(u.rad) - np.pi/4) < tol\n\ndef test_todeg():\n tol = 1e-8\n assert np.abs(utils.to_deg('0d0m0.00s').to_value(u.rad) - 0.) < 1e-8\n assert np.abs(utils.to_deg('3h0m0.00s').to_value(u.rad) - np.pi/4) < 1e-8\n", "id": "1081367", "language": "Python", "matching_score": 0.9589105248451233, "max_stars_count": 1, "path": "tests/test_utils.py" }, { "content": "import pytest\nfrom caltools import caltools\n\n\ndef test_querycoord():\n from astropy import coordinates as co, units as u\n coord = co.SkyCoord(ra=180., dec=0., unit=(u.deg, u.deg))\n table = caltools.query_heasarc(coord=coord, mission='NVSS')\n print(table)\n assert table is not None\n\n\ndef test_queryradec():\n table = caltools.query_heasarc(ra=180., dec=0.)\n print(table)\n assert table is not None\n\n", "id": "8914740", "language": "Python", "matching_score": 1.5184651613235474, "max_stars_count": 0, "path": "tests/test_query.py" }, { "content": "# Functions heavily borrowed from rf_meta_query/catalog_utils.py by <NAME>\n# claw, 19oct07\n\nimport numpy as np\nfrom astropy import coordinates, units\nfrom astroquery.heasarc import Heasarc\nimport logging\nlogging.basicConfig(format='%(asctime)s %(message)s')\n\nheasarc = Heasarc()\n\n_beamradius_DSA = 2.\n\n\ndef get_calibrator_lists(ra, dec, fluxratio=0.7, survey='NVSS', radius=_beamradius_DSA):\n \"\"\" Given ra,dec, define list of calibrator sources that includes fluxratio of the total flux in the field.\n Returns array of tuples (ra, dec, flux) to be used as input to models.\n Can optionally define survey and radius for catalog query.\n \"\"\"\n\n table = list_calibrators(ra, dec, surveys=[survey], radius=radius)[survey]\n if table is None:\n return np.empty(0)\n\n table.sort(keys='flux', reverse=True)\n totalflux = table['flux'].sum() # TODO: define as all flux but select on compact sources?\n if fluxratio < 1:\n ind = np.where(np.cumsum(table['flux']) > fluxratio*totalflux)[0][0] + 1\n else:\n ind = len(table)\n\n return np.array(table[:ind]['ra', 'dec', 'flux'])\n\n\ndef list_calibrators(ra, dec, surveys=[\"NVSS\"], radius=_beamradius_DSA):\n \"\"\" Search surveys for sources near (ra, dec)\n Args:\n ra, dec: float\n Coordinates in degrees\n surveys: list(str)\n Survey strings used by HEASARC (e.g., FIRST, NVSS)\n radius: float\n Radius of aree to include in degrees\n \"\"\"\n \n tables = {}\n coord = coordinates.SkyCoord(ra=ra, dec=dec, unit=(units.deg, units.deg))\n\n for survey in surveys:\n cat = query_heasarc(coord=coord, mission=survey, radius=radius*units.deg)\n if cat is not None:\n cat = sort_by_separation(clean_heasarc(cat), coord=coord)\n if 'FLUX_20_CM' in cat.columns:\n cat.rename_column(\"FLUX_20_CM\", \"flux\")\n cols_keep = ['NAME', 'ra', 'dec', 'flux', 'separation']\n else:\n cols_keep = ['NAME', 'ra', 'dec', 'separation']\n tables[survey] = cat[cols_keep]\n else:\n tables[survey] = None\n\n # TODO: select based on source size\n return tables\n\n\ndef query_heasarc(coord=None, ra=None, dec=None, mission='NVSS', radius=_beamradius_DSA*units.deg):\n \"\"\"\n Use astroquery to query the HEARSARC database\n\n Args:\n ra, dec: float (degrees)\n coord: astropy.coordinates.sky_coordinate.SkyCoord\n mission: str\n Uses HEASARC notation\n radius: Angle\n\n Returns:\n \"\"\"\n\n if ra is not None and dec is not None:\n coord = coordinates.SkyCoord(ra=ra, dec=dec, unit=(units.deg, units.deg))\n\n assert isinstance(coord, coordinates.sky_coordinate.SkyCoord)\n\n catalog = None\n try:\n catalog = heasarc.query_region(coord, mission=mission, radius=radius)\n except (ValueError, TypeError):\n logging.warn(\"No source found at {0}\".format(coord))\n\n return catalog\n\n\ndef sort_by_separation(catalog, coord, radec=('ra', 'dec'), add_sep=True):\n \"\"\"\n Sort an input catalog by separation from input coordinate\n\n Args:\n catalog: astropy.table.Table\n coord: SkyCoord\n radec: tuple\n Defines catalog columns holding RA, DEC (in deg)\n add_sep: bool, optional\n Add a 'separation' column with units of arcmin\n\n Returns:\n srt_catalog: astropy.table.Table\n Sorted catalog\n\n \"\"\"\n # Check\n for key in radec:\n if key not in catalog.keys():\n print(\"RA/DEC key: {:s} not in your Table\".format(key))\n raise IOError(\"Try again..\")\n # Grab coords\n cat_coords = coordinates.SkyCoord(ra=catalog[radec[0]].data,\n dec=catalog[radec[1]].data, unit='deg')\n\n # Separations\n seps = coord.separation(cat_coords)\n isrt = np.argsort(seps)\n # Add?\n if add_sep:\n catalog['separation'] = seps.to('arcmin').value\n # Sort\n srt_catalog = catalog[isrt]\n # Return\n return srt_catalog\n\n\ndef clean_heasarc(catalog):\n \"\"\" Renames columns\n \"\"\"\n\n catalog.rename_column(\"RA\", \"ra\")\n catalog.rename_column(\"DEC\", \"dec\")\n for key in ['ra', 'dec']:\n catalog[key].unit = units.deg\n\n return catalog\n", "id": "10433890", "language": "Python", "matching_score": 2.9317338466644287, "max_stars_count": 0, "path": "catalogs/caltools.py" }, { "content": "import pytest\nfrom caltools import caltools\n\n\ndef test_nvss():\n table = caltools.list_calibrators(180., 0., surveys=[\"NVSS\"], radius=1.)\n print(table)\n assert table is not None\n\n\ndef test_first():\n table = caltools.list_calibrators(180., 40., surveys=[\"FIRST\"], radius=1.)\n print(table)\n assert table is not None\n\n\ndef test_radius():\n table1 = caltools.list_calibrators(180., 40., surveys=[\"FIRST\"], radius=1.)\n table2 = caltools.list_calibrators(180., 40., surveys=[\"FIRST\"], radius=2.)\n print(table1, table2)\n assert len(table2[\"FIRST\"]) > len(table1[\"FIRST\"])\n \n\ndef test_getlist():\n ll = caltools.get_calibrator_lists(180., 40., fluxratio=0.7, survey='NVSS')\n print(ll)\n assert len(ll)\n\n\ndef test_listsize():\n ll1 = caltools.get_calibrator_lists(180., 40., fluxratio=0.7, survey='NVSS')\n ll2 = caltools.get_calibrator_lists(180., 40., fluxratio=0.9, survey='NVSS')\n print(ll1, ll2)\n assert len(ll2) > len(ll1)\n", "id": "11317174", "language": "Python", "matching_score": 0.667550802230835, "max_stars_count": 0, "path": "tests/test_tables.py" }, { "content": "import pytest\nfrom event import labels\n\n\ndef test_list():\n labels.list_cands_labels('data/t2trigger.json')\n\n\ndef test_read():\n dd = labels.readfile('data/t2trigger.json')\n assert len(dd)\n", "id": "7178108", "language": "Python", "matching_score": 0.12922175228595734, "max_stars_count": 0, "path": "tests/test_labels.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Tests for FHD_cal object.\"\"\"\nimport pytest\nimport os\n\nimport numpy as np\n\nfrom pyuvdata import UVCal\nimport pyuvdata.tests as uvtest\nfrom pyuvdata.data import DATA_PATH\n\n# set up FHD file list\ntestdir = os.path.join(DATA_PATH, \"fhd_cal_data/\")\ntestfile_prefix = \"1061316296_\"\nobs_testfile = os.path.join(testdir, testfile_prefix + \"obs.sav\")\ncal_testfile = os.path.join(testdir, testfile_prefix + \"cal.sav\")\nsettings_testfile = os.path.join(testdir, testfile_prefix + \"settings.txt\")\n\n\ndef test_read_fhdcal_raw_write_read_calfits(tmp_path):\n \"\"\"\n FHD cal to calfits loopback test.\n\n Read in FHD cal files, write out as calfits, read back in and check for\n object equality.\n \"\"\"\n fhd_cal = UVCal()\n calfits_cal = UVCal()\n fhd_cal.read_fhd_cal(cal_testfile, obs_testfile, settings_file=settings_testfile)\n\n assert np.max(fhd_cal.gain_array) < 2.0\n\n outfile = str(tmp_path / \"outtest_FHDcal_1061311664.calfits\")\n fhd_cal.write_calfits(outfile, clobber=True)\n calfits_cal.read_calfits(outfile)\n assert fhd_cal == calfits_cal\n\n return\n\n\ndef test_read_fhdcal_fit_write_read_calfits(tmp_path):\n # do it again with fit gains (rather than raw)\n fhd_cal = UVCal()\n calfits_cal = UVCal()\n fhd_cal.read_fhd_cal(\n cal_testfile, obs_testfile, settings_file=settings_testfile, raw=False\n )\n outfile = str(tmp_path / \"outtest_FHDcal_1061311664.calfits\")\n fhd_cal.write_calfits(outfile, clobber=True)\n calfits_cal.read_calfits(outfile)\n assert fhd_cal == calfits_cal\n\n return\n\n\ndef test_extra_history(tmp_path):\n \"\"\"Test that setting the extra_history keyword works.\"\"\"\n fhd_cal = UVCal()\n calfits_cal = UVCal()\n extra_history = \"Some extra history for testing\\n\"\n fhd_cal.read_fhd_cal(\n cal_testfile,\n obs_testfile,\n settings_file=settings_testfile,\n extra_history=extra_history,\n )\n\n outfile = str(tmp_path / \"outtest_FHDcal_1061311664.calfits\")\n fhd_cal.write_calfits(outfile, clobber=True)\n calfits_cal.read_calfits(outfile)\n assert fhd_cal == calfits_cal\n assert extra_history in fhd_cal.history\n\n return\n\n\ndef test_extra_history_strings(tmp_path):\n # try again with a list of history strings\n fhd_cal = UVCal()\n calfits_cal = UVCal()\n extra_history = [\"Some extra history for testing\", \"And some more history as well\"]\n fhd_cal.read_fhd_cal(\n cal_testfile,\n obs_testfile,\n settings_file=settings_testfile,\n extra_history=extra_history,\n )\n\n outfile = str(tmp_path / \"outtest_FHDcal_1061311664.calfits\")\n fhd_cal.write_calfits(outfile, clobber=True)\n calfits_cal.read_calfits(outfile)\n assert fhd_cal == calfits_cal\n for line in extra_history:\n assert line in fhd_cal.history\n\n return\n\n\ndef test_flags_galaxy(tmp_path):\n \"\"\"Test files with time, freq and tile flags and galaxy models behave.\"\"\"\n testdir = os.path.join(DATA_PATH, \"fhd_cal_data/flag_set\")\n obs_testfile_flag = os.path.join(testdir, testfile_prefix + \"obs.sav\")\n cal_testfile_flag = os.path.join(testdir, testfile_prefix + \"cal.sav\")\n settings_testfile_flag = os.path.join(testdir, testfile_prefix + \"settings.txt\")\n\n fhd_cal = UVCal()\n calfits_cal = UVCal()\n fhd_cal.read_fhd_cal(\n cal_testfile_flag, obs_testfile_flag, settings_file=settings_testfile_flag\n )\n\n outfile = str(tmp_path / \"outtest_FHDcal_1061311664.calfits\")\n fhd_cal.write_calfits(outfile, clobber=True)\n calfits_cal.read_calfits(outfile)\n assert fhd_cal == calfits_cal\n\n\ndef test_break_read_fhdcal():\n \"\"\"Try various cases of missing files.\"\"\"\n fhd_cal = UVCal()\n pytest.raises(TypeError, fhd_cal.read_fhd_cal, cal_testfile) # Missing obs\n\n with uvtest.check_warnings(UserWarning, \"No settings file\"):\n fhd_cal.read_fhd_cal(cal_testfile, obs_testfile)\n\n # Check only pyuvdata version history with no settings file\n assert fhd_cal.history == \"\\n\" + fhd_cal.pyuvdata_version_str\n\n\ndef test_read_multi(tmp_path):\n \"\"\"Test reading in multiple files.\"\"\"\n testdir2 = os.path.join(DATA_PATH, \"fhd_cal_data/set2\")\n obs_testfile_list = [\n obs_testfile,\n os.path.join(testdir2, testfile_prefix + \"obs.sav\"),\n ]\n cal_testfile_list = [\n cal_testfile,\n os.path.join(testdir2, testfile_prefix + \"cal.sav\"),\n ]\n settings_testfile_list = [\n settings_testfile,\n os.path.join(testdir2, testfile_prefix + \"settings.txt\"),\n ]\n\n fhd_cal = UVCal()\n calfits_cal = UVCal()\n\n with uvtest.check_warnings(UserWarning, \"UVParameter diffuse_model does not match\"):\n fhd_cal.read_fhd_cal(\n cal_testfile_list, obs_testfile_list, settings_file=settings_testfile_list\n )\n\n outfile = str(tmp_path / \"outtest_FHDcal_1061311664.calfits\")\n fhd_cal.write_calfits(outfile, clobber=True)\n calfits_cal.read_calfits(outfile)\n assert fhd_cal == calfits_cal\n\n\ndef test_break_read_multi():\n \"\"\"Test errors for different numbers of files.\"\"\"\n testdir2 = os.path.join(DATA_PATH, \"fhd_cal_data/set2\")\n obs_testfile_list = [\n obs_testfile,\n os.path.join(testdir2, testfile_prefix + \"obs.sav\"),\n ]\n cal_testfile_list = [\n cal_testfile,\n os.path.join(testdir2, testfile_prefix + \"cal.sav\"),\n ]\n settings_testfile_list = [\n settings_testfile,\n os.path.join(testdir2, testfile_prefix + \"settings.txt\"),\n ]\n\n fhd_cal = UVCal()\n pytest.raises(\n ValueError,\n fhd_cal.read_fhd_cal,\n cal_testfile_list,\n obs_testfile_list[0],\n settings_file=settings_testfile_list,\n )\n pytest.raises(\n ValueError,\n fhd_cal.read_fhd_cal,\n cal_testfile_list,\n obs_testfile_list,\n settings_file=settings_testfile_list[0],\n )\n pytest.raises(\n ValueError,\n fhd_cal.read_fhd_cal,\n cal_testfile_list,\n obs_testfile_list + obs_testfile_list,\n settings_file=settings_testfile_list,\n )\n pytest.raises(\n ValueError,\n fhd_cal.read_fhd_cal,\n cal_testfile_list,\n obs_testfile_list,\n settings_file=settings_testfile_list + settings_testfile_list,\n )\n pytest.raises(\n ValueError,\n fhd_cal.read_fhd_cal,\n cal_testfile_list[0],\n obs_testfile_list,\n settings_file=settings_testfile_list[0],\n )\n pytest.raises(\n ValueError,\n fhd_cal.read_fhd_cal,\n cal_testfile_list[0],\n obs_testfile_list[0],\n settings_file=settings_testfile_list,\n )\n", "id": "11667148", "language": "Python", "matching_score": 2.7225191593170166, "max_stars_count": 0, "path": "pyuvdata/uvcal/tests/test_fhd_cal.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2019 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Tests for MWACorrFITS object.\"\"\"\n\nimport pytest\nimport os\nimport numpy as np\n\nfrom pyuvdata import UVData\nfrom pyuvdata.data import DATA_PATH\nimport pyuvdata.tests as uvtest\nfrom astropy.io import fits\n\n# set up MWA correlator file list\ntestdir = os.path.join(DATA_PATH, \"mwa_corr_fits_testfiles/\")\n\ntestfiles = [\n \"1131733552.metafits\",\n \"1131733552_20151116182537_mini_gpubox01_00.fits\",\n \"1131733552_20151116182637_mini_gpubox06_01.fits\",\n \"1131733552_mini_01.mwaf\",\n \"1131733552_mini_06.mwaf\",\n \"1131733552_mod.metafits\",\n \"1131733552_mini_cotter.uvfits\",\n \"1131733552_metafits_ppds.fits\",\n]\nfilelist = [testdir + i for i in testfiles]\n\n\[email protected](scope=\"module\")\ndef flag_file_init(tmp_path_factory):\n tmp_path = tmp_path_factory.mktemp(\"pyuvdata_corr_fits\", numbered=True)\n spoof_file1 = str(tmp_path / \"spoof_01_00.fits\")\n spoof_file6 = str(tmp_path / \"spoof_06_00.fits\")\n # spoof box files of the appropriate size\n with fits.open(filelist[1]) as mini1:\n mini1[1].data = np.repeat(mini1[1].data, 8, axis=0)\n extra_dat = np.copy(mini1[1].data)\n for app_ind in range(2):\n mini1.append(fits.ImageHDU(extra_dat))\n mini1[2].header[\"MILLITIM\"] = 500\n mini1[2].header[\"TIME\"] = mini1[1].header[\"TIME\"]\n mini1[3].header[\"MILLITIM\"] = 0\n mini1[3].header[\"TIME\"] = mini1[1].header[\"TIME\"] + 1\n mini1.writeto(spoof_file1)\n\n with fits.open(filelist[2]) as mini6:\n mini6[1].data = np.repeat(mini6[1].data, 8, axis=0)\n extra_dat = np.copy(mini6[1].data)\n for app_ind in range(2):\n mini6.append(fits.ImageHDU(extra_dat))\n mini6[2].header[\"MILLITIM\"] = 500\n mini6[2].header[\"TIME\"] = mini6[1].header[\"TIME\"]\n mini6[3].header[\"MILLITIM\"] = 0\n mini6[3].header[\"TIME\"] = mini6[1].header[\"TIME\"] + 1\n mini6.writeto(spoof_file6)\n\n flag_testfiles = [spoof_file1, spoof_file6, filelist[0]]\n\n yield flag_testfiles\n\n\ndef test_read_mwa_write_uvfits(tmp_path):\n \"\"\"\n MWA correlator fits to uvfits loopback test.\n\n Read in MWA correlator files, write out as uvfits, read back in and check\n for object equality.\n \"\"\"\n mwa_uv = UVData()\n uvfits_uv = UVData()\n messages = [\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv.read_mwa_corr_fits(\n filelist[0:2], correct_cable_len=True, phase_to_pointing_center=True\n )\n\n testfile = str(tmp_path / \"outtest_MWAcorr.uvfits\")\n mwa_uv.write_uvfits(testfile, spoof_nonessential=True)\n uvfits_uv.read_uvfits(testfile)\n assert mwa_uv == uvfits_uv\n\n messages = [\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv.read_mwa_corr_fits(\n filelist[0:2], correct_cable_len=True, phase_to_pointing_center=True\n )\n\n testfile = str(tmp_path / \"outtest_MWAcorr.uvfits\")\n mwa_uv.write_uvfits(testfile, spoof_nonessential=True)\n uvfits_uv.read_uvfits(testfile)\n assert mwa_uv == uvfits_uv\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_select_on_read():\n mwa_uv = UVData()\n mwa_uv2 = UVData()\n mwa_uv.read_mwa_corr_fits(filelist[0:2], correct_cable_len=True)\n unique_times = np.unique(mwa_uv.time_array)\n select_times = unique_times[\n np.where(\n (unique_times >= np.min(mwa_uv.time_array))\n & (unique_times <= np.mean(mwa_uv.time_array))\n )\n ]\n mwa_uv.select(times=select_times)\n with uvtest.check_warnings(\n UserWarning,\n [\n 'Warning: select on read keyword set, but file_type is \"mwa_corr_fits\"',\n \"telescope_location is not set. Using known values for MWA.\",\n \"some coarse channel files were not submitted\",\n ],\n ):\n mwa_uv2.read(\n filelist[0:2],\n correct_cable_len=True,\n time_range=[np.min(mwa_uv.time_array), np.mean(mwa_uv.time_array)],\n )\n assert mwa_uv == mwa_uv2\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_read_mwa_read_cotter():\n \"\"\"\n Pyuvdata and cotter equality test.\n\n Read in MWA correlator files and the corresponding cotter file and check\n for data array equality.\n \"\"\"\n mwa_uv = UVData()\n cotter_uv = UVData()\n # cotter data has cable correction and is unphased\n mwa_uv.read(\n filelist[0:2],\n correct_cable_len=True,\n remove_dig_gains=False,\n remove_coarse_band=False,\n )\n cotter_uv.read(filelist[6])\n # cotter doesn't record the auto xy polarizations\n # due to a possible bug in cotter, the auto yx polarizations are conjugated\n # fix these before testing data_array\n autos = np.isclose(mwa_uv.ant_1_array - mwa_uv.ant_2_array, 0.0)\n cotter_uv.data_array[autos, :, :, 2] = cotter_uv.data_array[autos, :, :, 3]\n cotter_uv.data_array[autos, :, :, 3] = np.conj(cotter_uv.data_array[autos, :, :, 3])\n assert np.allclose(\n mwa_uv.data_array[:, :, :, :],\n cotter_uv.data_array[:, :, :, :],\n atol=1e-4,\n rtol=0,\n )\n\n\ndef test_read_mwa_write_uvfits_meta_mod(tmp_path):\n \"\"\"\n MWA correlator fits to uvfits loopback test with a modified metafits file.\n\n Read in MWA correlator files, write out as uvfits, read back in and check\n for object equality.\n \"\"\"\n # The metafits file has been modified to contain some coarse channels < 129,\n # and to have an uncorrected cable length.\n mwa_uv = UVData()\n uvfits_uv = UVData()\n messages = [\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n ]\n files = [filelist[1], filelist[5]]\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv.read(files, correct_cable_len=True, phase_to_pointing_center=True)\n testfile = str(tmp_path / \"outtest_MWAcorr.uvfits\")\n mwa_uv.write_uvfits(testfile, spoof_nonessential=True)\n uvfits_uv.read_uvfits(testfile)\n assert mwa_uv == uvfits_uv\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\"ignore:some coarse channel files were not submitted\")\[email protected](\"ignore:Combined frequencies are not contiguous\")\ndef test_read_mwa_multi():\n \"\"\"Test reading in two sets of files.\"\"\"\n set1 = filelist[0:2]\n set2 = [filelist[0], filelist[2]]\n mwa_uv = UVData()\n\n mwa_uv.read([set1, set2])\n\n mwa_uv2 = UVData()\n messages = [\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n \"Combined frequencies are not contiguous\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv2.read([set1, set2], file_type=\"mwa_corr_fits\")\n\n assert mwa_uv == mwa_uv2\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_read_mwa_multi_concat(tmp_path):\n \"\"\"Test reading in two sets of files with fast concatenation.\"\"\"\n # modify file so that time arrays are matching\n mod_mini_6 = str(tmp_path / \"mini_gpubox06_01.fits\")\n with fits.open(filelist[2]) as mini6:\n mini6[1].header[\"time\"] = 1447698337\n mini6.writeto(mod_mini_6)\n set1 = filelist[0:2]\n set2 = [filelist[0], mod_mini_6]\n mwa_uv = UVData()\n mwa_uv.read([set1, set2], axis=\"freq\")\n\n mwa_uv2 = UVData()\n messages = [\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv2.read([set1, set2], axis=\"freq\", file_type=\"mwa_corr_fits\")\n\n assert mwa_uv == mwa_uv2\n os.remove(mod_mini_6)\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_read_mwa_flags():\n \"\"\"Test handling of flag files.\"\"\"\n mwa_uv = UVData()\n subfiles = [filelist[0], filelist[1], filelist[3], filelist[4]]\n messages = [\n \"mwaf files submitted with use_cotter_flags=False\",\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv.read(subfiles, use_cotter_flags=False)\n\n del mwa_uv\n\n mwa_uv = UVData()\n with pytest.raises(ValueError) as cm:\n mwa_uv.read(subfiles[0:2], use_cotter_flags=True)\n assert str(cm.value).startswith(\"no flag files submitted\")\n del mwa_uv\n\n\ndef test_multiple_coarse():\n \"\"\"\n Test two coarse channel files.\n\n Read in MWA correlator files with two different orderings of the files\n and check for object equality.\n \"\"\"\n order1 = [filelist[0:3]]\n order2 = [filelist[0], filelist[2], filelist[1]]\n mwa_uv1 = UVData()\n mwa_uv2 = UVData()\n messages = [\n \"telescope_location is not set\",\n \"coarse channels are not contiguous for this observation\",\n \"some coarse channel files were not submitted\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv1.read(order1)\n with uvtest.check_warnings(UserWarning, messages):\n mwa_uv2.read(order2)\n\n assert mwa_uv1 == mwa_uv2\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_ppds(tmp_path):\n \"\"\"Test handling of ppds files\"\"\"\n # turnaround test with just ppds file given\n mwa_uv = UVData()\n mwa_uv.read_mwa_corr_fits([filelist[1], filelist[7]], phase_to_pointing_center=True)\n testfile = str(tmp_path / \"outtest_MWAcorr.uvfits\")\n mwa_uv.write_uvfits(testfile, spoof_nonessential=True)\n uvfits_uv = UVData()\n uvfits_uv.read_uvfits(testfile)\n assert mwa_uv == uvfits_uv\n\n del mwa_uv\n del uvfits_uv\n\n # check that extra keywords are added when both ppds file and metafits file given\n mwa_uv = UVData()\n mwa_uv.read_mwa_corr_fits([filelist[0], filelist[1], filelist[7]])\n assert \"MWAVER\" in mwa_uv.extra_keywords and \"MWADATE\" in mwa_uv.extra_keywords\n\n\ndef test_fine_channels(tmp_path):\n \"\"\"\n Break read_mwa_corr_fits by submitting files with different fine channels.\n\n Test that error is raised if files with different numbers of fine channels\n are submitted.\n \"\"\"\n mwa_uv = UVData()\n bad_fine = str(tmp_path / \"bad_gpubox06_01.fits\")\n with fits.open(filelist[2]) as mini6:\n mini6[1].data = np.concatenate((mini6[1].data, mini6[1].data))\n mini6.writeto(bad_fine)\n with pytest.raises(ValueError) as cm:\n mwa_uv.read([bad_fine, filelist[1]])\n assert str(cm.value).startswith(\"files submitted have different fine\")\n del mwa_uv\n\n\[email protected](\n \"files,err_msg\",\n [\n ([filelist[0]], \"no data files submitted\"),\n ([filelist[1]], \"no metafits file submitted\"),\n (\n [filelist[0], filelist[1], filelist[5]],\n \"multiple metafits files in filelist\",\n ),\n ],\n)\ndef test_break_read_mwacorrfits(files, err_msg):\n \"\"\"Break read_mwa_corr_fits by submitting files incorrectly.\"\"\"\n mwa_uv = UVData()\n with pytest.raises(ValueError) as cm:\n mwa_uv.read(files)\n assert str(cm.value).startswith(err_msg)\n del mwa_uv\n\n\ndef test_file_extension(tmp_path):\n \"\"\"\n Break read_mwa_corr_fits by submitting file with the wrong extension.\n\n Test that error is raised if a file with an extension that is not fits,\n metafits, or mwaf is submitted.\n \"\"\"\n mwa_uv = UVData()\n bad_ext = str(tmp_path / \"1131733552.meta\")\n with fits.open(filelist[0]) as meta:\n meta.writeto(bad_ext)\n with pytest.raises(ValueError) as cm:\n mwa_uv.read(bad_ext, file_type=\"mwa_corr_fits\")\n assert str(cm.value).startswith(\"only fits, metafits, and mwaf files supported\")\n del mwa_uv\n\n\ndef test_diff_obs(tmp_path):\n \"\"\"\n Break read_mwa_corr_fits by submitting files from different observations.\n\n Test that error is raised if files from different observations are\n submitted in the same file list.\n \"\"\"\n mwa_uv = UVData()\n bad_obs = str(tmp_path / \"bad2_gpubox06_01.fits\")\n with fits.open(filelist[2]) as mini6:\n mini6[0].header[\"OBSID\"] = \"1131733555\"\n mini6.writeto(bad_obs)\n with pytest.raises(ValueError) as cm:\n mwa_uv.read([bad_obs, filelist[0], filelist[1]])\n assert str(cm.value).startswith(\"files from different observations\")\n del mwa_uv\n\n\ndef test_misaligned_times(tmp_path):\n \"\"\"\n Break read_mwa_corr_fits by submitting files with misaligned times.\n\n Test that error is raised if file start times are different by an amount\n that is not an integer multiiple of the integration time.\n \"\"\"\n mwa_uv = UVData()\n bad_obs = str(tmp_path / \"bad3_gpubox06_01.fits\")\n with fits.open(filelist[2]) as mini6:\n mini6[1].header[\"MILLITIM\"] = 250\n mini6.writeto(bad_obs)\n with pytest.raises(ValueError) as cm:\n mwa_uv.read([bad_obs, filelist[0], filelist[1]])\n assert str(cm.value).startswith(\"coarse channel start times are misaligned\")\n del mwa_uv\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\n \"ignore:coarse channels are not contiguous for this observation\"\n)\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_flag_nsample_basic():\n \"\"\"\n Test that the flag(without flag_int) and nsample arrays correctly reflect data.\n \"\"\"\n uv = UVData()\n uv.read_mwa_corr_fits(filelist[0:3], flag_init=False)\n # check that only bad antennas are flagged for all times, freqs, pols\n bad_ants = [59, 114]\n good_ants = list(range(128))\n for j in bad_ants:\n good_ants.remove(j)\n bad = uv.select(antenna_nums=bad_ants, inplace=False)\n good = uv.select(antenna_nums=good_ants, inplace=False)\n assert np.all(bad.flag_array)\n # TODO: Spw axis to be collapsed in future release\n good.flag_array = good.flag_array.reshape(\n (good.Ntimes, good.Nbls, 1, good.Nfreqs, good.Npols)\n )\n # good ants should be flagged except for the first time and second freq,\n # and for the second time and first freq\n assert np.all(good.flag_array[1:-1, :, :, :, :])\n assert np.all(good.flag_array[0, :, :, 1, :] == 0)\n assert np.all(good.flag_array[-1, :, :, 0, :] == 0)\n assert np.all(good.flag_array[0, :, :, 0, :])\n assert np.all(good.flag_array[-1, :, :, 1, :])\n # check that nsample array is filled properly\n # TODO: Spw axis to be collapsed in future release\n uv.nsample_array = uv.nsample_array.reshape(\n (uv.Ntimes, uv.Nbls, 1, uv.Nfreqs, uv.Npols)\n )\n assert np.all(uv.nsample_array[1:-1, :, :, :, :] == 0.0)\n assert np.all(uv.nsample_array[0, :, :, 1, :] == 1.0)\n assert np.all(uv.nsample_array[-1, :, :, 0, :] == 1.0)\n assert np.all(uv.nsample_array[0, :, :, 0, :] == 0.0)\n assert np.all(uv.nsample_array[-1, :, :, 1, :] == 0.0)\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\n \"ignore:coarse channels are not contiguous for this observation\"\n)\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_flag_init(flag_file_init):\n \"\"\"\n Test that routine MWA flagging works as intended.\n \"\"\"\n uv = UVData()\n uv.read(flag_file_init, flag_init=True, start_flag=0, end_flag=0)\n\n freq_inds = [0, 1, 4, 6, 7, 8, 9, 12, 14, 15]\n freq_inds_complement = [ind for ind in range(16) if ind not in freq_inds]\n\n assert np.all(\n uv.flag_array[:, :, freq_inds, :]\n ), \"Not all of edge and center channels are flagged!\"\n assert not np.any(\n np.all(uv.flag_array[:, :, freq_inds_complement, :], axis=(0, 1, -1))\n ), \"Some non-edge/center channels are entirely flagged!\"\n\n\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\n \"ignore:coarse channels are not contiguous for this observation\"\n)\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_flag_start_flag(flag_file_init):\n uv = UVData()\n uv.read(\n flag_file_init,\n flag_init=True,\n start_flag=1.0,\n end_flag=1.0,\n edge_width=0,\n flag_dc_offset=False,\n )\n\n # TODO: Spw axis to be collapsed in future release\n reshape = [uv.Ntimes, uv.Nbls, 1, uv.Nfreqs, uv.Npols]\n time_inds = [0, 1, -1, -2]\n assert np.all(\n uv.flag_array.reshape(reshape)[time_inds, :, :, :, :]\n ), \"Not all of start and end times are flagged.\"\n # Check that it didn't just flag everything\n # Should have unflagged data for time inds [2, -3]\n assert not np.any(\n np.all(uv.flag_array.reshape(reshape)[[2, -3], :, :, :, :], axis=(1, 2, 3, 4))\n ), \"All the data is flagged for some intermediate times!\"\n\n\[email protected](\n \"err_type,read_kwargs,err_msg\",\n [\n (\n ValueError,\n {\"flag_init\": True, \"start_flag\": 0, \"end_flag\": 0, \"edge_width\": 90e3},\n \"The edge_width must be an integer multiple of the channel_width\",\n ),\n (\n ValueError,\n {\"flag_init\": True, \"start_flag\": 1.2, \"end_flag\": 0},\n \"The start_flag must be an integer multiple of the integration_time\",\n ),\n (\n ValueError,\n {\"flag_init\": True, \"start_flag\": 0, \"end_flag\": 1.2},\n \"The end_flag must be an integer multiple of the integration_time\",\n ),\n ],\n)\[email protected](\"ignore:telescope_location is not set. \")\[email protected](\n \"ignore:coarse channels are not contiguous for this observation\"\n)\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_flag_init_errors(flag_file_init, err_type, read_kwargs, err_msg):\n uv = UVData()\n # give noninteger multiple inputs\n with pytest.raises(err_type) as cm:\n uv.read(flag_file_init, **read_kwargs)\n assert str(cm.value).startswith(err_msg)\n\n\ndef test_read_metadata_only(tmp_path):\n \"\"\"Test reading an MWA corr fits file as metadata only.\"\"\"\n uvd = UVData()\n messages = [\n \"telescope_location is not set\",\n \"some coarse channel files were not submitted\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n uvd.read_mwa_corr_fits(\n filelist[0:2],\n correct_cable_len=True,\n phase_to_pointing_center=True,\n read_data=False,\n )\n\n assert uvd.metadata_only\n\n\[email protected](\"ignore:telescope_location is not set.\")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_data_array_precision():\n uv = UVData()\n uv2 = UVData()\n # read in data array as single precision\n uv.read(filelist[0:2], data_array_dtype=np.complex64)\n # now read as double precision\n uv2.read(filelist[0:2], data_array_dtype=np.complex128)\n\n assert uv == uv2\n assert uv.data_array.dtype.type is np.complex64\n assert uv2.data_array.dtype.type is np.complex128\n\n return\n\n\[email protected](\"ignore:telescope_location is not set.\")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_nsample_array_precision():\n uv = UVData()\n uv2 = UVData()\n uv3 = UVData()\n # read in nsample array at different precisions\n uv.read(filelist[0:2], nsample_array_dtype=np.float32)\n uv2.read(filelist[0:2], nsample_array_dtype=np.float64)\n uv3.read(filelist[0:2], nsample_array_dtype=np.float16)\n\n assert uv == uv2\n assert uv == uv3\n assert uv.nsample_array.dtype.type is np.float32\n assert uv2.nsample_array.dtype.type is np.float64\n assert uv3.nsample_array.dtype.type is np.float16\n\n return\n\n\ndef test_invalid_precision_errors():\n uv = UVData()\n\n # raise errors by passing bogus precision values\n with pytest.raises(ValueError, match=\"data_array_dtype must be np.complex64\"):\n uv.read_mwa_corr_fits(filelist[0:2], data_array_dtype=np.float64)\n\n with pytest.raises(\n ValueError, match=\"nsample_array_dtype must be one of: np.float64\"\n ):\n uv.read_mwa_corr_fits(filelist[0:2], nsample_array_dtype=np.complex128)\n\n return\n\n\[email protected](\"ignore:telescope_location is not set.\")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_remove_dig_gains():\n \"\"\"Test digital gain removal.\"\"\"\n uv1 = UVData()\n uv1.read(filelist[0:2])\n\n uv2 = UVData()\n uv2.read(filelist[0:2], remove_dig_gains=False)\n\n with fits.open(filelist[0]) as meta:\n meta_tbl = meta[1].data\n antenna_numbers = meta_tbl[\"Antenna\"][1::2]\n dig_gains = meta_tbl[\"Gains\"][1::2, :].astype(np.float64) / 64\n reordered_inds = antenna_numbers.argsort()\n dig_gains = dig_gains[reordered_inds, :]\n dig_gains = dig_gains[:, np.array([23])]\n dig_gains = np.repeat(dig_gains, 1, axis=1)\n dig_gains1 = dig_gains[uv2.ant_1_array, :]\n dig_gains2 = dig_gains[uv2.ant_2_array, :]\n dig_gains1 = dig_gains1[:, :, np.newaxis, np.newaxis]\n dig_gains2 = dig_gains2[:, :, np.newaxis, np.newaxis]\n uv2.data_array = uv2.data_array / (dig_gains1 * dig_gains2)\n\n assert uv1 == uv2\n\n\[email protected](\"ignore:telescope_location is not set.\")\[email protected](\"ignore:some coarse channel files were not submitted\")\ndef test_remove_coarse_band():\n \"\"\"Test coarse band removal.\"\"\"\n uv1 = UVData()\n uv1.read(filelist[0:2])\n\n uv2 = UVData()\n uv2.read(filelist[0:2], remove_coarse_band=False)\n\n with open(DATA_PATH + \"/mwa_config_data/MWA_rev_cb_10khz_doubles.txt\", \"r\") as f:\n cb = f.read().splitlines()\n cb_array = np.array(cb).astype(np.float64)\n cb_array = cb_array.reshape(32, 4)\n cb_array = np.average(cb_array, axis=1)\n cb_array = cb_array[0]\n\n uv2.data_array = uv2.data_array.swapaxes(2, 3)\n uv2.data_array = uv2.data_array / cb_array\n uv2.data_array = uv2.data_array.swapaxes(2, 3)\n\n assert uv1 == uv2\n\n\ndef test_cotter_flags():\n \"\"\"Test using cotter flags\"\"\"\n uv = UVData()\n files = filelist[0:2]\n files.append(filelist[3])\n messages = [\n \"telescope_location is not set.\",\n \"some coarse channel files were not submitted\",\n \"coarse channel, start time, and end time flagging will default\",\n ]\n with uvtest.check_warnings(UserWarning, messages):\n uv.read_mwa_corr_fits(files, flag_init=False)\n\n with fits.open(filelist[3]) as aoflags:\n flags = aoflags[1].data.field(\"FLAGS\")\n flags = flags[:, np.newaxis, :, np.newaxis]\n flags = np.repeat(flags, 4, axis=3)\n\n assert np.all(uv.flag_array == flags)\n\n\[email protected](\"ignore:telescope_location is not set.\")\[email protected](\n \"ignore:coarse channels are not contiguous for this observation\"\n)\[email protected](\"ignore:some coarse channel files were not submitted\")\[email protected](\"ignore:coarse channel, start time, and end time flagging\")\ndef test_cotter_flags_multiple(tmp_path):\n \"\"\"Test cotter flags with multiple coarse bands\"\"\"\n mod_mini_6 = str(tmp_path / \"mini_gpubox06_01.fits\")\n with fits.open(filelist[2]) as mini6:\n mini6[1].header[\"time\"] = 1447698337\n mini6.writeto(mod_mini_6)\n files = filelist[0:2] + filelist[3:5]\n files.append(mod_mini_6)\n\n uv = UVData()\n uv.read_mwa_corr_fits(files, flag_init=False)\n\n with fits.open(filelist[3]) as aoflags:\n flags1 = aoflags[1].data.field(\"FLAGS\")\n with fits.open(filelist[4]) as aoflags:\n flags2 = aoflags[1].data.field(\"FLAGS\")\n flags = np.array([flags2[:, 0], flags1[:, 0]])\n flags = np.transpose(flags)\n flags = flags[:, np.newaxis, :, np.newaxis]\n flags = np.repeat(flags, 4, axis=3)\n\n assert np.all(uv.flag_array == flags)\n\n\[email protected](\"ignore:telescope_location is not set.\")\[email protected](\"ignore:some coarse channel files were not submitted\")\[email protected](\"ignore:coarse channel, start time, and end time flagging\")\ndef test_mismatch_flags():\n \"\"\"Break by submitting flag and gpubox files from different coarse bands.\"\"\"\n uv = UVData()\n files = filelist[0:2]\n files.append(filelist[4])\n with pytest.raises(ValueError) as cm:\n uv.read(files)\n assert str(cm.value).startswith(\"flag file coarse bands do not match\")\n", "id": "3622605", "language": "Python", "matching_score": 4.1487579345703125, "max_stars_count": 0, "path": "pyuvdata/uvdata/tests/test_mwa_corr_fits.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Tests for UVFITS object.\n\n\"\"\"\nimport os\n\nimport pytest\nimport numpy as np\nimport astropy\nfrom astropy.io import fits\n\nfrom pyuvdata import UVData\nimport pyuvdata.utils as uvutils\nimport pyuvdata.tests as uvtest\nfrom pyuvdata.data import DATA_PATH\n\ncasa_tutorial_uvfits = os.path.join(\n DATA_PATH, \"day2_TDEM0003_10s_norx_1src_1spw.uvfits\"\n)\n\npaper_uvfits = os.path.join(DATA_PATH, \"zen.2456865.60537.xy.uvcRREAAM.uvfits\")\n\n\[email protected](scope=\"session\")\ndef uvfits_nospw_main():\n uv_in = UVData()\n # This file has a crazy epoch (2291.34057617) which breaks the uvw_antpos check\n # Since it's a PAPER file, I think this is a bug in the file, not in the check.\n uv_in.read(paper_uvfits, run_check_acceptability=False)\n\n return uv_in\n\n\[email protected](scope=\"function\")\ndef uvfits_nospw(uvfits_nospw_main):\n return uvfits_nospw_main.copy()\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_read_nrao(casa_uvfits):\n \"\"\"Test reading in a CASA tutorial uvfits file.\"\"\"\n uvobj = casa_uvfits\n expected_extra_keywords = [\"OBSERVER\", \"SORTORD\", \"SPECSYS\", \"RESTFREQ\", \"ORIGIN\"]\n assert expected_extra_keywords.sort() == list(uvobj.extra_keywords.keys()).sort()\n\n # test reading metadata only\n uvobj2 = UVData()\n uvobj2.read(casa_tutorial_uvfits, read_data=False)\n\n assert expected_extra_keywords.sort() == list(uvobj2.extra_keywords.keys()).sort()\n assert uvobj2.check()\n\n uvobj3 = uvobj.copy(metadata_only=True)\n assert uvobj2 == uvobj3\n\n\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_break_read_uvfits():\n \"\"\"Test errors on reading in a uvfits file with subarrays and other problems.\"\"\"\n uvobj = UVData()\n multi_subarray_file = os.path.join(DATA_PATH, \"multi_subarray.uvfits\")\n with pytest.raises(ValueError, match=\"This file appears to have multiple subarray\"):\n uvobj.read(multi_subarray_file)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_source_group_params(casa_uvfits, tmp_path):\n # make a file with a single source to test that it works\n uv_in = casa_uvfits\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n write_file2 = str(tmp_path / \"outtest_casa2.uvfits\")\n uv_in.write_uvfits(write_file)\n\n with fits.open(write_file, memmap=True) as hdu_list:\n hdunames = uvutils._fits_indexhdus(hdu_list)\n vis_hdu = hdu_list[0]\n vis_hdr = vis_hdu.header.copy()\n raw_data_array = vis_hdu.data.data\n\n par_names = vis_hdu.data.parnames\n group_parameter_list = []\n\n lst_ind = 0\n for index, name in enumerate(par_names):\n par_value = vis_hdu.data.par(name)\n # lst_array needs to be split in 2 parts to get high enough accuracy\n if name.lower() == \"lst\":\n if lst_ind == 0:\n # first lst entry, par_value has full lst value\n # (astropy adds the 2 values)\n lst_array_1 = np.float32(par_value)\n lst_array_2 = np.float32(par_value - np.float64(lst_array_1))\n par_value = lst_array_1\n lst_ind = 1\n else:\n par_value = lst_array_2\n\n # need to account for PZERO values\n group_parameter_list.append(par_value - vis_hdr[\"PZERO\" + str(index + 1)])\n\n par_names.append(\"SOURCE\")\n source_array = np.ones_like(vis_hdu.data.par(\"BASELINE\"))\n group_parameter_list.append(source_array)\n\n vis_hdu = fits.GroupData(\n raw_data_array, parnames=par_names, pardata=group_parameter_list, bitpix=-32\n )\n vis_hdu = fits.GroupsHDU(vis_hdu)\n vis_hdu.header = vis_hdr\n ant_hdu = hdu_list[hdunames[\"AIPS AN\"]]\n\n hdulist = fits.HDUList(hdus=[vis_hdu, ant_hdu])\n hdulist.writeto(write_file2, overwrite=True)\n hdulist.close()\n\n uv_out = UVData()\n uv_out.read(write_file2)\n assert uv_in == uv_out\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_multisource_error(casa_uvfits, tmp_path):\n # make a file with multiple sources to test error condition\n uv_in = casa_uvfits\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n write_file2 = str(tmp_path / \"outtest_casa2.uvfits\")\n uv_in.write_uvfits(write_file)\n\n with fits.open(write_file, memmap=True) as hdu_list:\n hdunames = uvutils._fits_indexhdus(hdu_list)\n vis_hdu = hdu_list[0]\n vis_hdr = vis_hdu.header.copy()\n raw_data_array = vis_hdu.data.data\n\n par_names = vis_hdu.data.parnames\n group_parameter_list = []\n\n lst_ind = 0\n for index, name in enumerate(par_names):\n par_value = vis_hdu.data.par(name)\n # lst_array needs to be split in 2 parts to get high enough accuracy\n if name.lower() == \"lst\":\n if lst_ind == 0:\n # first lst entry, par_value has full lst value\n # (astropy adds the 2 values)\n lst_array_1 = np.float32(par_value)\n lst_array_2 = np.float32(par_value - np.float64(lst_array_1))\n par_value = lst_array_1\n lst_ind = 1\n else:\n par_value = lst_array_2\n\n # need to account for PZERO values\n group_parameter_list.append(par_value - vis_hdr[\"PZERO\" + str(index + 1)])\n\n par_names.append(\"SOURCE\")\n source_array = np.ones_like(vis_hdu.data.par(\"BASELINE\"))\n mid_index = source_array.shape[0] // 2\n source_array[mid_index:] = source_array[mid_index:] * 2\n group_parameter_list.append(source_array)\n\n vis_hdu = fits.GroupData(\n raw_data_array, parnames=par_names, pardata=group_parameter_list, bitpix=-32\n )\n vis_hdu = fits.GroupsHDU(vis_hdu)\n vis_hdu.header = vis_hdr\n ant_hdu = hdu_list[hdunames[\"AIPS AN\"]]\n\n hdulist = fits.HDUList(hdus=[vis_hdu, ant_hdu])\n hdulist.writeto(write_file2, overwrite=True)\n hdulist.close()\n\n with pytest.raises(ValueError) as cm:\n uv_in.read(write_file2)\n assert str(cm.value).startswith(\"This file has multiple sources\")\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_spwsupported():\n \"\"\"Test reading in a uvfits file with multiple spws.\"\"\"\n uvobj = UVData()\n testfile = os.path.join(DATA_PATH, \"day2_TDEM0003_10s_norx_1scan.uvfits\")\n uvobj.read(testfile)\n\n # We know this file has two spws\n assert uvobj.Nspws == 2\n\n # Verify that the data array has the right shape\n assert np.size(uvobj.data_array, axis=1) == 1\n assert np.size(uvobj.data_array, axis=2) == uvobj.Nfreqs\n\n # Verify that the freq array has the right shape\n assert np.size(uvobj.freq_array, axis=0) == 1\n assert np.size(uvobj.freq_array, axis=1) == uvobj.Nfreqs\n\n # Verift thaat the spw_array is the right length\n assert len(uvobj.spw_array) == uvobj.Nspws\n\n\ndef test_casa_nonascii_bytes_antenna_names():\n \"\"\"Test that nonascii bytes in antenna names are handled properly.\"\"\"\n uv1 = UVData()\n testfile = os.path.join(DATA_PATH, \"corrected2_zen.2458106.28114.ant012.HH.uvfits\")\n # this file has issues with the telescope location so turn checking off\n with uvtest.check_warnings(\n UserWarning, \"Telescope mock-HERA is not in known_telescopes.\"\n ):\n uv1.read(testfile, run_check=False)\n # fmt: off\n expected_ant_names = [\n 'HH0', 'HH1', 'HH2', 'H2', 'H2', 'H2', 'H2', 'H2', 'H2', 'H2',\n 'H2', 'HH11', 'HH12', 'HH13', 'HH14', 'H14', 'H14', 'H14', 'H14',\n 'H14', 'H14', 'H14', 'H14', 'HH23', 'HH24', 'HH25', 'HH26', 'HH27',\n 'H27', 'H27', 'H27', 'H27', 'H27', 'H27', 'H27', 'H27', 'HH36',\n 'HH37', 'HH38', 'HH39', 'HH40', 'HH41', 'H41', 'H41', 'H41', 'H41',\n 'H41', 'H41', 'H41', 'H41', 'HH50', 'HH51', 'HH52', 'HH53', 'HH54',\n 'HH55', 'H55', 'H55', 'H55', 'H55', 'H55', 'H55', 'H55', 'H55',\n 'H55', 'HH65', 'HH66', 'HH67', 'HH68', 'HH69', 'HH70', 'HH71',\n 'H71', 'H71', 'H71', 'H71', 'H71', 'H71', 'H71', 'H71', 'H71',\n 'H71', 'HH82', 'HH83', 'HH84', 'HH85', 'HH86', 'HH87', 'HH88',\n 'H88', 'H88', 'H88', 'H88', 'H88', 'H88', 'H88', 'H88', 'H88',\n 'HH98', 'H98', 'H98', 'H98', 'H98', 'H98', 'H98', 'H98', 'H98',\n 'H98', 'H98', 'H98', 'H98', 'H98', 'H98', 'H98', 'H98', 'H98',\n 'H98', 'H98', 'H98', 'H98', 'HH120', 'HH121', 'HH122', 'HH123',\n 'HH124', 'H124', 'H124', 'H124', 'H124', 'H124', 'H124', 'H124',\n 'H124', 'H124', 'H124', 'H124', 'HH136', 'HH137', 'HH138', 'HH139',\n 'HH140', 'HH141', 'HH142', 'HH143']\n # fmt: on\n assert uv1.antenna_names == expected_ant_names\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread(tmp_path, casa_uvfits):\n \"\"\"\n CASA tutorial uvfits loopback test.\n\n Read in uvfits file, write out new uvfits file, read back in and check for\n object equality.\n \"\"\"\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n uv_in.write_uvfits(write_file)\n uv_out.read(write_file)\n assert uv_in == uv_out\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_no_lst(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n # test that it works with write_lst = False\n uv_in.write_uvfits(write_file, write_lst=False)\n uv_out.read(write_file)\n assert uv_in == uv_out\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_x_orientation(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n # check that if x_orientation is set, it's read back out properly\n uv_in.x_orientation = \"east\"\n uv_in.write_uvfits(write_file)\n uv_out.read(write_file)\n assert uv_in == uv_out\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_antenna_diameters(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n # check that if antenna_diameters is set, it's read back out properly\n uv_in.antenna_diameters = np.zeros((uv_in.Nants_telescope,), dtype=np.float) + 14.0\n uv_in.write_uvfits(write_file)\n uv_out.read(write_file)\n assert uv_in == uv_out\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_large_antnums(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n # check that if antenna_numbers are > 256 everything works\n uv_in.antenna_numbers = uv_in.antenna_numbers + 256\n uv_in.ant_1_array = uv_in.ant_1_array + 256\n uv_in.ant_2_array = uv_in.ant_2_array + 256\n uv_in.baseline_array = uv_in.antnums_to_baseline(\n uv_in.ant_1_array, uv_in.ant_2_array\n )\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions\",\n \"antnums_to_baseline: found > 256 antennas, using 2048 baseline\",\n ],\n ):\n uv_in.write_uvfits(write_file)\n uv_out.read(write_file)\n assert uv_in == uv_out\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_missing_info(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n write_file2 = str(tmp_path / \"outtest_casa2.uvfits\")\n\n # check missing telescope_name, timesys vs timsys spelling, xyz_telescope_frame=????\n uv_in.write_uvfits(write_file)\n with fits.open(write_file, memmap=True) as hdu_list:\n hdunames = uvutils._fits_indexhdus(hdu_list)\n vis_hdu = hdu_list[0]\n vis_hdr = vis_hdu.header.copy()\n\n vis_hdr.pop(\"TELESCOP\")\n\n vis_hdu.header = vis_hdr\n\n ant_hdu = hdu_list[hdunames[\"AIPS AN\"]]\n ant_hdr = ant_hdu.header.copy()\n\n time_sys = ant_hdr.pop(\"TIMSYS\")\n ant_hdr[\"TIMESYS\"] = time_sys\n ant_hdr[\"FRAME\"] = \"????\"\n\n ant_hdu.header = ant_hdr\n\n hdulist = fits.HDUList(hdus=[vis_hdu, ant_hdu])\n hdulist.writeto(write_file2, overwrite=True)\n\n uv_out.read(write_file2)\n assert uv_out.telescope_name == \"EVLA\"\n assert uv_out.timesys == time_sys\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_error_timesys(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n # check error if timesys is 'IAT'\n uv_in.timesys = \"IAT\"\n with pytest.raises(ValueError) as cm:\n uv_in.write_uvfits(write_file)\n assert str(cm.value).startswith(\n \"This file has a time system IAT. \" 'Only \"UTC\" time system files are supported'\n )\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_error_single_time(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n write_file2 = str(tmp_path / \"outtest_casa2.uvfits\")\n\n # check error if one time & no inttime specified\n uv_singlet = uv_in.select(times=uv_in.time_array[0], inplace=False)\n uv_singlet.write_uvfits(write_file)\n\n with fits.open(write_file, memmap=True) as hdu_list:\n hdunames = uvutils._fits_indexhdus(hdu_list)\n vis_hdu = hdu_list[0]\n vis_hdr = vis_hdu.header.copy()\n raw_data_array = vis_hdu.data.data\n\n par_names = np.array(vis_hdu.data.parnames)\n pars_use = np.where(par_names != \"INTTIM\")[0]\n par_names = par_names[pars_use].tolist()\n\n group_parameter_list = [vis_hdu.data.par(name) for name in par_names]\n\n vis_hdu = fits.GroupData(\n raw_data_array, parnames=par_names, pardata=group_parameter_list, bitpix=-32\n )\n vis_hdu = fits.GroupsHDU(vis_hdu)\n vis_hdu.header = vis_hdr\n\n ant_hdu = hdu_list[hdunames[\"AIPS AN\"]]\n\n hdulist = fits.HDUList(hdus=[vis_hdu, ant_hdu])\n hdulist.writeto(write_file2, overwrite=True)\n\n with pytest.raises(ValueError) as cm:\n with uvtest.check_warnings(\n [\n UserWarning,\n astropy._erfa.core.ErfaWarning,\n astropy._erfa.core.ErfaWarning,\n UserWarning,\n ],\n [\n \"Telescope EVLA is not\",\n 'ERFA function \"utcut1\" yielded 1 of \"dubious year (Note 3)\"',\n 'ERFA function \"utctai\" yielded 1 of \"dubious year (Note 3)\"',\n \"LST values stored in this file are not self-consistent\",\n ],\n ):\n uv_out.read(write_file2),\n\n assert str(cm.value).startswith(\n \"integration time not specified and only one time present\"\n )\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_readwriteread_unflagged_data_warnings(tmp_path, casa_uvfits):\n uv_in = casa_uvfits\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n # check that unflagged data with nsample = 0 will cause warnings\n uv_in.nsample_array[list(range(11, 22))] = 0\n uv_in.flag_array[list(range(11, 22))] = False\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions\",\n \"Some unflagged data has nsample = 0\",\n ],\n ):\n uv_in.write_uvfits(write_file)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\n \"kwd_name,kwd_value,warnstr,errstr\",\n (\n [\n \"testdict\",\n {\"testkey\": 23},\n \"testdict in extra_keywords is a list, array or dict\",\n \"Extra keyword testdict is of <class 'dict'>\",\n ],\n [\n \"testlist\",\n [12, 14, 90],\n \"testlist in extra_keywords is a list, array or dict\",\n \"Extra keyword testlist is of <class 'list'>\",\n ],\n [\n \"testarr\",\n np.array([12, 14, 90]),\n \"testarr in extra_keywords is a list, array or dict\",\n \"Extra keyword testarr is of <class 'numpy.ndarray'>\",\n ],\n [\n \"test_long_key\",\n True,\n \"key test_long_key in extra_keywords is longer than 8 characters\",\n None,\n ],\n ),\n)\ndef test_extra_keywords_errors(\n casa_uvfits, tmp_path, kwd_name, kwd_value, warnstr, errstr\n):\n uv_in = casa_uvfits\n testfile = str(tmp_path / \"outtest_casa.uvfits\")\n\n uvw_warn_str = \"The uvw_array does not match the expected values\"\n # check for warnings & errors with extra_keywords that are dicts, lists or arrays\n uv_in.extra_keywords[kwd_name] = kwd_value\n if warnstr is None:\n warnstr_list = [uvw_warn_str]\n else:\n warnstr_list = [warnstr, uvw_warn_str]\n\n with uvtest.check_warnings(UserWarning, match=warnstr_list):\n uv_in.check()\n\n if errstr is not None:\n with pytest.raises(TypeError, match=errstr):\n uv_in.write_uvfits(testfile, run_check=False)\n else:\n with uvtest.check_warnings(UserWarning, match=warnstr):\n uv_in.write_uvfits(testfile, run_check=False)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\n \"kwd_names,kwd_values\",\n (\n [[\"bool\", \"bool2\"], [True, False]],\n [[\"int1\", \"int2\"], [np.int(5), 7]],\n [[\"float1\", \"float2\"], [np.int64(5.3), 6.9]],\n [[\"complex1\", \"complex2\"], [np.complex64(5.3 + 1.2j), 6.9 + 4.6j]],\n [\n [\"str\", \"comment\"],\n [\n \"hello\",\n \"this is a very long comment that will be broken into several \"\n \"lines\\nif everything works properly.\",\n ],\n ],\n ),\n)\ndef test_extra_keywords(casa_uvfits, tmp_path, kwd_names, kwd_values):\n uv_in = casa_uvfits\n uv_out = UVData()\n testfile = str(tmp_path / \"outtest_casa.uvfits\")\n\n for name, value in zip(kwd_names, kwd_values):\n uv_in.extra_keywords[name] = value\n uv_in.write_uvfits(testfile)\n uv_out.read(testfile)\n\n assert uv_in == uv_out\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_roundtrip_blt_order(casa_uvfits, tmp_path):\n uv_in = casa_uvfits\n uv_out = UVData()\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n\n uv_in.reorder_blts()\n\n uv_in.write_uvfits(write_file)\n uv_out.read(write_file)\n assert uv_in == uv_out\n\n # test with bda as well (single entry in tuple)\n uv_in.reorder_blts(order=\"bda\")\n\n uv_in.write_uvfits(write_file)\n uv_out.read(write_file)\n assert uv_in == uv_out\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"select_kwargs\",\n [\n {\"antenna_nums\": np.array([0, 19, 11, 24, 3, 23, 1, 20, 21])},\n {\"freq_chans\": np.arange(12, 22)},\n {\"freq_chans\": [0]},\n {\"polarizations\": [-1, -2]},\n {\"time_inds\": np.array([0, 1])},\n {\n \"antenna_nums\": np.array([0, 19, 11, 24, 3, 23, 1, 20, 21]),\n \"freq_chans\": np.arange(12, 22),\n \"polarizations\": [-1, -2],\n },\n {\n \"antenna_nums\": np.array([0, 1]),\n \"freq_chans\": np.arange(12, 22),\n \"polarizations\": [-1, -2],\n },\n {\n \"antenna_nums\": np.array([0, 1, 2, 3, 6, 7, 8, 11, 14, 18, 19, 20, 21, 22]),\n \"freq_chans\": np.arange(12, 64),\n \"polarizations\": [-1, -2],\n },\n ],\n)\ndef test_select_read(casa_uvfits, tmp_path, select_kwargs):\n uvfits_uv = UVData()\n uvfits_uv2 = UVData()\n\n uvfits_uv2 = casa_uvfits\n if \"time_inds\" in select_kwargs.keys():\n time_inds = select_kwargs.pop(\"time_inds\")\n unique_times = np.unique(uvfits_uv2.time_array)\n select_kwargs[\"time_range\"] = unique_times[time_inds]\n\n uvfits_uv.read(casa_tutorial_uvfits, **select_kwargs)\n uvfits_uv2.select(**select_kwargs)\n assert uvfits_uv == uvfits_uv2\n\n testfile = str(tmp_path / \"outtest_casa.uvfits\")\n uvfits_uv.write_uvfits(testfile)\n uvfits_uv2.read(testfile)\n assert uvfits_uv == uvfits_uv2\n\n\[email protected](\"ignore:Required Antenna frame keyword\")\[email protected](\"ignore:telescope_location is not set\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"select_kwargs\",\n [{\"antenna_nums\": np.array([2, 4, 5])}, {\"freq_chans\": np.arange(4, 8)}],\n)\ndef test_select_read_nospw(uvfits_nospw, tmp_path, select_kwargs):\n uvfits_uv2 = uvfits_nospw\n\n uvfits_uv = UVData()\n # This file has a crazy epoch (2291.34057617) which breaks the uvw_antpos check\n # Since it's a PAPER file, I think this is a bug in the file, not in the check.\n uvfits_uv.read(paper_uvfits, run_check_acceptability=False, **select_kwargs)\n\n uvfits_uv2.select(run_check_acceptability=False, **select_kwargs)\n assert uvfits_uv == uvfits_uv2\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_read_nospw_pol(casa_uvfits, tmp_path):\n # this requires writing a new file because the no spw file we have has only 1 pol\n\n with fits.open(casa_tutorial_uvfits, memmap=True) as hdu_list:\n hdunames = uvutils._fits_indexhdus(hdu_list)\n vis_hdu = hdu_list[0]\n vis_hdr = vis_hdu.header.copy()\n raw_data_array = vis_hdu.data.data\n raw_data_array = raw_data_array[:, :, :, 0, :, :, :]\n\n vis_hdr[\"NAXIS\"] = 6\n\n vis_hdr[\"NAXIS5\"] = vis_hdr[\"NAXIS6\"]\n vis_hdr[\"CTYPE5\"] = vis_hdr[\"CTYPE6\"]\n vis_hdr[\"CRVAL5\"] = vis_hdr[\"CRVAL6\"]\n vis_hdr[\"CDELT5\"] = vis_hdr[\"CDELT6\"]\n vis_hdr[\"CRPIX5\"] = vis_hdr[\"CRPIX6\"]\n vis_hdr[\"CROTA5\"] = vis_hdr[\"CROTA6\"]\n\n vis_hdr[\"NAXIS6\"] = vis_hdr[\"NAXIS7\"]\n vis_hdr[\"CTYPE6\"] = vis_hdr[\"CTYPE7\"]\n vis_hdr[\"CRVAL6\"] = vis_hdr[\"CRVAL7\"]\n vis_hdr[\"CDELT6\"] = vis_hdr[\"CDELT7\"]\n vis_hdr[\"CRPIX6\"] = vis_hdr[\"CRPIX7\"]\n vis_hdr[\"CROTA6\"] = vis_hdr[\"CROTA7\"]\n\n vis_hdr.pop(\"NAXIS7\")\n vis_hdr.pop(\"CTYPE7\")\n vis_hdr.pop(\"CRVAL7\")\n vis_hdr.pop(\"CDELT7\")\n vis_hdr.pop(\"CRPIX7\")\n vis_hdr.pop(\"CROTA7\")\n\n par_names = vis_hdu.data.parnames\n\n group_parameter_list = [vis_hdu.data.par(ind) for ind in range(len(par_names))]\n\n vis_hdu = fits.GroupData(\n raw_data_array, parnames=par_names, pardata=group_parameter_list, bitpix=-32\n )\n vis_hdu = fits.GroupsHDU(vis_hdu)\n vis_hdu.header = vis_hdr\n\n ant_hdu = hdu_list[hdunames[\"AIPS AN\"]]\n\n write_file = str(tmp_path / \"outtest_casa.uvfits\")\n hdulist = fits.HDUList(hdus=[vis_hdu, ant_hdu])\n hdulist.writeto(write_file, overwrite=True)\n\n pols_to_keep = [-1, -2]\n uvfits_uv = UVData()\n uvfits_uv.read(write_file, polarizations=pols_to_keep)\n uvfits_uv2 = casa_uvfits\n uvfits_uv2.select(polarizations=pols_to_keep)\n assert uvfits_uv == uvfits_uv2\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_read_uvfits_write_miriad(casa_uvfits, tmp_path):\n \"\"\"\n read uvfits, write miriad test.\n Read in uvfits file, write out as miriad, read back in and check for\n object equality.\n \"\"\"\n pytest.importorskip(\"pyuvdata._miriad\")\n uvfits_uv = casa_uvfits\n miriad_uv = UVData()\n testfile = str(tmp_path / \"outtest_miriad\")\n uvfits_uv.write_miriad(testfile, clobber=True)\n miriad_uv.read_miriad(testfile)\n\n assert miriad_uv == uvfits_uv\n\n # check that setting the phase_type keyword also works\n miriad_uv.read_miriad(testfile, phase_type=\"phased\")\n\n # check that setting the phase_type to drift raises an error\n with pytest.raises(\n ValueError, match='phase_type is \"drift\" but the RA values are constant.'\n ):\n miriad_uv.read_miriad(testfile, phase_type=\"drift\")\n\n # check that setting it works after selecting a single time\n uvfits_uv.select(times=uvfits_uv.time_array[0])\n uvfits_uv.write_miriad(testfile, clobber=True)\n miriad_uv.read_miriad(testfile)\n\n assert miriad_uv == uvfits_uv\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_multi_files(casa_uvfits, tmp_path):\n \"\"\"\n Reading multiple files at once.\n \"\"\"\n uv_full = casa_uvfits\n testfile1 = str(tmp_path / \"uv1.uvfits\")\n testfile2 = str(tmp_path / \"uv2.uvfits\")\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv1.write_uvfits(testfile1)\n uv2.write_uvfits(testfile2)\n uv1.read(np.array([testfile1, testfile2]), file_type=\"uvfits\")\n\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # again, setting axis\n uv1.read([testfile1, testfile2], axis=\"freq\")\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # check with metadata_only\n uv_full = uv_full.copy(metadata_only=True)\n uv1 = UVData()\n uv1.read([testfile1, testfile2], read_data=False)\n\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\ndef test_multi_unphase_on_read(casa_uvfits, tmp_path):\n uv_full = casa_uvfits\n uv_full2 = UVData()\n testfile1 = str(tmp_path / \"uv1.uvfits\")\n testfile2 = str(tmp_path / \"uv2.uvfits\")\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv1.write_uvfits(testfile1)\n uv2.write_uvfits(testfile2)\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Telescope EVLA is not\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"Telescope EVLA is not\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"Unphasing this UVData object to drift\",\n \"Unphasing other UVData object to drift\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n ],\n ):\n uv1.read(np.array([testfile1, testfile2]), unphase_to_drift=True)\n\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv_full.unphase_to_drift()\n\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # check unphasing when reading only one file\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Telescope EVLA is not\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"Unphasing this UVData object to drift\",\n ],\n ):\n uv_full2.read(casa_tutorial_uvfits, unphase_to_drift=True)\n assert uv_full2 == uv_full\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\ndef test_multi_phase_on_read(casa_uvfits, tmp_path):\n uv_full = casa_uvfits\n uv_full2 = UVData()\n testfile1 = str(tmp_path / \"uv1.uvfits\")\n testfile2 = str(tmp_path / \"uv2.uvfits\")\n phase_center_radec = [\n uv_full.phase_center_ra + 0.01,\n uv_full.phase_center_dec + 0.01,\n ]\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv1.write_uvfits(testfile1)\n uv2.write_uvfits(testfile2)\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Telescope EVLA is not\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"Telescope EVLA is not\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"Phasing this UVData object to phase_center_radec\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"Phasing this UVData object to phase_center_radec\",\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n ],\n ):\n uv1.read(\n np.array([testfile1, testfile2]), phase_center_radec=phase_center_radec\n )\n\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv_full.phase(*phase_center_radec)\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # check phasing when reading only one file\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Telescope EVLA is not\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Phasing this UVData object to phase_center_radec\",\n ],\n ):\n uv_full2.read(casa_tutorial_uvfits, phase_center_radec=phase_center_radec)\n assert uv_full2 == uv_full\n\n with pytest.raises(ValueError) as cm:\n uv_full2.read(casa_tutorial_uvfits, phase_center_radec=phase_center_radec[0])\n assert str(cm.value).startswith(\"phase_center_radec should have length 2.\")\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"ignore:Telescope EVLA is not\")\ndef test_read_ms_write_uvfits_casa_history(tmp_path):\n \"\"\"\n read in .ms file.\n Write to a uvfits file, read back in and check for casa_history parameter\n \"\"\"\n pytest.importorskip(\"casacore\")\n ms_uv = UVData()\n uvfits_uv = UVData()\n ms_file = os.path.join(DATA_PATH, \"day2_TDEM0003_10s_norx_1src_1spw.ms\")\n testfile = str(tmp_path / \"outtest.uvfits\")\n ms_uv.read_ms(ms_file)\n ms_uv.write_uvfits(testfile, spoof_nonessential=True)\n uvfits_uv.read(testfile)\n assert ms_uv == uvfits_uv\n", "id": "2648234", "language": "Python", "matching_score": 4.675361156463623, "max_stars_count": 0, "path": "pyuvdata/uvdata/tests/test_uvfits.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Tests for uvdata object.\"\"\"\nimport pytest\nimport os\nimport copy\nimport itertools\nimport h5py\n\nimport numpy as np\nfrom astropy.time import Time\nfrom astropy.coordinates import Angle\nfrom astropy.utils import iers\n\nfrom pyuvdata import UVData, UVCal\nimport pyuvdata.utils as uvutils\nimport pyuvdata.tests as uvtest\nfrom pyuvdata.data import DATA_PATH\n\n# needed for multifile read error test\nfrom pyuvdata.uvdata.tests.test_mwa_corr_fits import filelist as mwa_corr_files\nfrom pyuvdata.uvdata.tests.test_fhd import testfiles as fhd_files\n\n\nfrom collections import Counter\n\n\[email protected](scope=\"function\")\ndef uvdata_props():\n required_parameters = [\n \"_data_array\",\n \"_nsample_array\",\n \"_flag_array\",\n \"_Ntimes\",\n \"_Nbls\",\n \"_Nblts\",\n \"_Nfreqs\",\n \"_Npols\",\n \"_Nspws\",\n \"_uvw_array\",\n \"_time_array\",\n \"_ant_1_array\",\n \"_ant_2_array\",\n \"_lst_array\",\n \"_baseline_array\",\n \"_freq_array\",\n \"_polarization_array\",\n \"_spw_array\",\n \"_integration_time\",\n \"_channel_width\",\n \"_object_name\",\n \"_telescope_name\",\n \"_instrument\",\n \"_telescope_location\",\n \"_history\",\n \"_vis_units\",\n \"_Nants_data\",\n \"_Nants_telescope\",\n \"_antenna_names\",\n \"_antenna_numbers\",\n \"_antenna_positions\",\n \"_phase_type\",\n \"_flex_spw\",\n ]\n\n required_properties = [\n \"data_array\",\n \"nsample_array\",\n \"flag_array\",\n \"Ntimes\",\n \"Nbls\",\n \"Nblts\",\n \"Nfreqs\",\n \"Npols\",\n \"Nspws\",\n \"uvw_array\",\n \"time_array\",\n \"ant_1_array\",\n \"ant_2_array\",\n \"lst_array\",\n \"baseline_array\",\n \"freq_array\",\n \"polarization_array\",\n \"spw_array\",\n \"integration_time\",\n \"channel_width\",\n \"object_name\",\n \"telescope_name\",\n \"instrument\",\n \"telescope_location\",\n \"history\",\n \"vis_units\",\n \"Nants_data\",\n \"Nants_telescope\",\n \"antenna_names\",\n \"antenna_numbers\",\n \"antenna_positions\",\n \"phase_type\",\n \"flex_spw\",\n ]\n\n extra_parameters = [\n \"_extra_keywords\",\n \"_x_orientation\",\n \"_antenna_diameters\",\n \"_blt_order\",\n \"_gst0\",\n \"_rdate\",\n \"_earth_omega\",\n \"_dut1\",\n \"_timesys\",\n \"_uvplane_reference_time\",\n \"_phase_center_ra\",\n \"_phase_center_dec\",\n \"_phase_center_epoch\",\n \"_phase_center_frame\",\n \"_eq_coeffs\",\n \"_eq_coeffs_convention\",\n \"_flex_spw_id_array\",\n ]\n\n extra_properties = [\n \"extra_keywords\",\n \"x_orientation\",\n \"antenna_diameters\",\n \"blt_order\",\n \"gst0\",\n \"rdate\",\n \"earth_omega\",\n \"dut1\",\n \"timesys\",\n \"uvplane_reference_time\",\n \"phase_center_ra\",\n \"phase_center_dec\",\n \"phase_center_epoch\",\n \"phase_center_frame\",\n \"eq_coeffs\",\n \"eq_coeffs_convention\",\n \"flex_spw_id_array\",\n ]\n\n other_properties = [\n \"telescope_location_lat_lon_alt\",\n \"telescope_location_lat_lon_alt_degrees\",\n \"phase_center_ra_degrees\",\n \"phase_center_dec_degrees\",\n \"pyuvdata_version_str\",\n ]\n\n uv_object = UVData()\n\n class DataHolder:\n def __init__(\n self,\n uv_object,\n required_parameters,\n required_properties,\n extra_parameters,\n extra_properties,\n other_properties,\n ):\n self.uv_object = uv_object\n self.required_parameters = required_parameters\n self.required_properties = required_properties\n self.extra_parameters = extra_parameters\n self.extra_properties = extra_properties\n self.other_properties = other_properties\n\n uvdata_props = DataHolder(\n uv_object,\n required_parameters,\n required_properties,\n extra_parameters,\n extra_properties,\n other_properties,\n )\n # yields the data we need but will continue to the del call after tests\n yield uvdata_props\n\n # some post-test object cleanup\n del uvdata_props\n\n return\n\n\[email protected](scope=\"session\")\ndef hera_uvh5_main():\n # read in test file for the resampling in time functions\n uv_object = UVData()\n testfile = os.path.join(DATA_PATH, \"zen.2458661.23480.HH.uvh5\")\n uv_object.read(testfile)\n\n yield uv_object\n\n # cleanup\n del uv_object\n\n return\n\n\[email protected](scope=\"function\")\ndef hera_uvh5(hera_uvh5_main):\n # read in test file for the resampling in time functions\n uv_object = hera_uvh5_main.copy()\n\n yield uv_object\n\n # cleanup\n del uv_object\n\n return\n\n\[email protected](scope=\"session\")\ndef paper_uvh5_main():\n # read in test file for the resampling in time functions\n uv_object = UVData()\n uvh5_file = os.path.join(DATA_PATH, \"zen.2456865.60537.xy.uvcRREAA.uvh5\")\n uv_object.read_uvh5(uvh5_file)\n\n yield uv_object\n\n # cleanup\n del uv_object\n\n return\n\n\[email protected](scope=\"function\")\ndef paper_uvh5(paper_uvh5_main):\n # read in test file for the resampling in time functions\n uv_object = paper_uvh5_main.copy()\n\n yield uv_object\n\n # cleanup\n del uv_object\n\n return\n\n\[email protected](scope=\"session\")\ndef bda_test_file_main():\n # read in test file for BDA-like data\n uv_object = UVData()\n testfile = os.path.join(DATA_PATH, \"simulated_bda_file.uvh5\")\n uv_object.read(testfile)\n\n yield uv_object\n\n # cleanup\n del uv_object\n\n return\n\n\[email protected](scope=\"function\")\ndef bda_test_file(bda_test_file_main):\n # read in test file for BDA-like data\n uv_object = bda_test_file_main.copy()\n\n yield uv_object\n\n # cleanup\n del uv_object\n\n return\n\n\[email protected](scope=\"function\")\ndef uvdata_data(casa_uvfits):\n uv_object = casa_uvfits\n\n class DataHolder:\n def __init__(self, uv_object):\n self.uv_object = uv_object\n self.uv_object2 = uv_object.copy()\n\n uvdata_data = DataHolder(uv_object)\n # yields the data we need but will continue to the del call after tests\n yield uvdata_data\n\n # some post-test object cleanup\n del uvdata_data\n\n return\n\n\[email protected](scope=\"function\")\ndef uvdata_baseline():\n uv_object = UVData()\n uv_object.Nants_telescope = 128\n uv_object2 = UVData()\n uv_object2.Nants_telescope = 2049\n\n class DataHolder:\n def __init__(self, uv_object, uv_object2):\n self.uv_object = uv_object\n self.uv_object2 = uv_object2\n\n uvdata_baseline = DataHolder(uv_object, uv_object2)\n\n # yields the data we need but will continue to the del call after tests\n yield uvdata_baseline\n\n # Post test clean-up\n del uvdata_baseline\n return\n\n\[email protected](scope=\"session\")\ndef set_uvws_main(hera_uvh5_main):\n uv1 = hera_uvh5_main.copy()\n # uvws in the file are wrong. reset them.\n uv1.set_uvws_from_antenna_positions()\n\n yield uv1\n\n del uv1\n\n return\n\n\[email protected]\ndef uv1_2_set_uvws(set_uvws_main):\n uv1 = set_uvws_main.copy()\n uv2 = set_uvws_main.copy()\n\n yield uv1, uv2\n\n del uv1, uv2\n\n return\n\n\[email protected]()\ndef uv_phase_time_split(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n\n uv_phase.reorder_blts(order=\"time\", minor_order=\"baseline\")\n uv_raw.reorder_blts(order=\"time\", minor_order=\"baseline\")\n\n uv_phase.phase(ra=0, dec=0, epoch=\"J2000\", use_ant_pos=True)\n times = np.unique(uv_phase.time_array)\n time_set_1, time_set_2 = times[::2], times[1::2]\n\n uv_phase_1 = uv_phase.select(times=time_set_1, inplace=False)\n uv_phase_2 = uv_phase.select(times=time_set_2, inplace=False)\n\n uv_raw_1 = uv_raw.select(times=time_set_1, inplace=False)\n uv_raw_2 = uv_raw.select(times=time_set_2, inplace=False)\n\n yield uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw\n\n del uv_phase_1, uv_phase_2, uv_raw_1, uv_raw_2, uv_phase, uv_raw\n\n\ndef test_parameter_iter(uvdata_props):\n \"\"\"Test expected parameters.\"\"\"\n all_params = []\n for prop in uvdata_props.uv_object:\n all_params.append(prop)\n for a in uvdata_props.required_parameters + uvdata_props.extra_parameters:\n assert a in all_params, (\n \"expected attribute \" + a + \" not returned in object iterator\"\n )\n\n\ndef test_required_parameter_iter(uvdata_props):\n \"\"\"Test expected required parameters.\"\"\"\n # at first it's a metadata_only object, so need to modify required_parameters\n required = []\n for prop in uvdata_props.uv_object.required():\n required.append(prop)\n expected_required = copy.copy(uvdata_props.required_parameters)\n expected_required.remove(\"_data_array\")\n expected_required.remove(\"_nsample_array\")\n expected_required.remove(\"_flag_array\")\n for a in expected_required:\n assert a in required, (\n \"expected attribute \" + a + \" not returned in required iterator\"\n )\n\n uvdata_props.uv_object.data_array = 1\n uvdata_props.uv_object.nsample_array = 1\n uvdata_props.uv_object.flag_array = 1\n required = []\n for prop in uvdata_props.uv_object.required():\n required.append(prop)\n for a in uvdata_props.required_parameters:\n assert a in required, (\n \"expected attribute \" + a + \" not returned in required iterator\"\n )\n\n\ndef test_extra_parameter_iter(uvdata_props):\n \"\"\"Test expected optional parameters.\"\"\"\n extra = []\n for prop in uvdata_props.uv_object.extra():\n extra.append(prop)\n for a in uvdata_props.extra_parameters:\n assert a in extra, \"expected attribute \" + a + \" not returned in extra iterator\"\n\n\ndef test_unexpected_parameters(uvdata_props):\n \"\"\"Test for extra parameters.\"\"\"\n expected_parameters = (\n uvdata_props.required_parameters + uvdata_props.extra_parameters\n )\n attributes = [i for i in uvdata_props.uv_object.__dict__.keys() if i[0] == \"_\"]\n for a in attributes:\n assert a in expected_parameters, (\n \"unexpected parameter \" + a + \" found in UVData\"\n )\n\n\ndef test_unexpected_attributes(uvdata_props):\n \"\"\"Test for extra attributes.\"\"\"\n expected_attributes = (\n uvdata_props.required_properties\n + uvdata_props.extra_properties\n + uvdata_props.other_properties\n )\n attributes = [i for i in uvdata_props.uv_object.__dict__.keys() if i[0] != \"_\"]\n for a in attributes:\n assert a in expected_attributes, (\n \"unexpected attribute \" + a + \" found in UVData\"\n )\n\n\ndef test_properties(uvdata_props):\n \"\"\"Test that properties can be get and set properly.\"\"\"\n prop_dict = dict(\n list(\n zip(\n uvdata_props.required_properties + uvdata_props.extra_properties,\n uvdata_props.required_parameters + uvdata_props.extra_parameters,\n )\n )\n )\n for k, v in prop_dict.items():\n rand_num = np.random.rand()\n setattr(uvdata_props.uv_object, k, rand_num)\n this_param = getattr(uvdata_props.uv_object, v)\n try:\n assert rand_num == this_param.value\n except AssertionError:\n print(\"setting {prop_name} to a random number failed\".format(prop_name=k))\n raise\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_metadata_only_property(uvdata_data):\n uvdata_data.uv_object.data_array = None\n assert uvdata_data.uv_object.metadata_only is False\n pytest.raises(ValueError, uvdata_data.uv_object.check)\n uvdata_data.uv_object.flag_array = None\n assert uvdata_data.uv_object.metadata_only is False\n pytest.raises(ValueError, uvdata_data.uv_object.check)\n uvdata_data.uv_object.nsample_array = None\n assert uvdata_data.uv_object.metadata_only is True\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_equality(uvdata_data):\n \"\"\"Basic equality test.\"\"\"\n assert uvdata_data.uv_object == uvdata_data.uv_object\n\n\[email protected](\"ignore:Telescope location derived from obs\")\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_check(uvdata_data):\n \"\"\"Test simple check function.\"\"\"\n assert uvdata_data.uv_object.check()\n # Check variety of special cases\n uvdata_data.uv_object.Nants_data += 1\n with pytest.raises(\n ValueError,\n match=(\n \"Nants_data must be equal to the number of unique values in \"\n \"ant_1_array and ant_2_array\"\n ),\n ):\n uvdata_data.uv_object.check()\n uvdata_data.uv_object.Nants_data -= 1\n uvdata_data.uv_object.Nbls += 1\n with pytest.raises(\n ValueError,\n match=(\n \"Nbls must be equal to the number of unique baselines in the data_array\"\n ),\n ):\n uvdata_data.uv_object.check()\n uvdata_data.uv_object.Nbls -= 1\n uvdata_data.uv_object.Ntimes += 1\n with pytest.raises(\n ValueError,\n match=(\"Ntimes must be equal to the number of unique times in the time_array\"),\n ):\n uvdata_data.uv_object.check()\n uvdata_data.uv_object.Ntimes -= 1\n\n with pytest.raises(\n ValueError,\n match=(\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\"\n ),\n ):\n uvdata_data.uv_object.check(strict_uvw_antpos_check=True)\n\n # Check case where all data is autocorrelations\n # Currently only test files that have autos are fhd files\n testdir = os.path.join(DATA_PATH, \"fhd_vis_data/\")\n file_list = [\n testdir + \"1061316296_flags.sav\",\n testdir + \"1061316296_vis_XX.sav\",\n testdir + \"1061316296_params.sav\",\n testdir + \"1061316296_layout.sav\",\n testdir + \"1061316296_settings.txt\",\n ]\n\n uvdata_data.uv_object.read_fhd(file_list)\n\n uvdata_data.uv_object.select(\n blt_inds=np.where(\n uvdata_data.uv_object.ant_1_array == uvdata_data.uv_object.ant_2_array\n )[0]\n )\n assert uvdata_data.uv_object.check()\n\n # test auto and cross corr uvw_array\n uvd = UVData()\n uvd.read_uvh5(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA.uvh5\"))\n autos = np.isclose(uvd.ant_1_array - uvd.ant_2_array, 0.0)\n auto_inds = np.where(autos)[0]\n cross_inds = np.where(~autos)[0]\n\n # make auto have non-zero uvw coords, assert ValueError\n uvd.uvw_array[auto_inds[0], 0] = 0.1\n with pytest.raises(\n ValueError,\n match=(\"Some auto-correlations have non-zero uvw_array coordinates.\"),\n ):\n uvd.check()\n\n # make cross have |uvw| zero, assert ValueError\n uvd.read_uvh5(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA.uvh5\"))\n uvd.uvw_array[cross_inds[0]][:] = 0.0\n with pytest.raises(\n ValueError,\n match=(\"Some cross-correlations have near-zero uvw_array magnitudes.\"),\n ):\n uvd.check()\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_nants_data_telescope_larger(uvdata_data):\n # make sure it's okay for Nants_telescope to be strictly greater than Nants_data\n uvdata_data.uv_object.Nants_telescope += 1\n # add dummy information for \"new antenna\" to pass object check\n uvdata_data.uv_object.antenna_names = np.concatenate(\n (uvdata_data.uv_object.antenna_names, [\"dummy_ant\"])\n )\n uvdata_data.uv_object.antenna_numbers = np.concatenate(\n (uvdata_data.uv_object.antenna_numbers, [20])\n )\n uvdata_data.uv_object.antenna_positions = np.concatenate(\n (uvdata_data.uv_object.antenna_positions, np.zeros((1, 3))), axis=0\n )\n assert uvdata_data.uv_object.check()\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_ant1_array_not_in_antnums(uvdata_data):\n # make sure an error is raised if antennas in ant_1_array not in antenna_numbers\n # remove antennas from antenna_names & antenna_numbers by hand\n uvdata_data.uv_object.antenna_names = uvdata_data.uv_object.antenna_names[1:]\n uvdata_data.uv_object.antenna_numbers = uvdata_data.uv_object.antenna_numbers[1:]\n uvdata_data.uv_object.antenna_positions = uvdata_data.uv_object.antenna_positions[\n 1:, :\n ]\n uvdata_data.uv_object.Nants_telescope = uvdata_data.uv_object.antenna_numbers.size\n with pytest.raises(ValueError) as cm:\n uvdata_data.uv_object.check()\n assert str(cm.value).startswith(\n \"All antennas in ant_1_array must be in antenna_numbers\"\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_ant2_array_not_in_antnums(uvdata_data):\n # make sure an error is raised if antennas in ant_2_array not in antenna_numbers\n # remove antennas from antenna_names & antenna_numbers by hand\n uvobj = uvdata_data.uv_object\n uvobj.antenna_names = uvobj.antenna_names[:-1]\n uvobj.antenna_numbers = uvobj.antenna_numbers[:-1]\n uvobj.antenna_positions = uvobj.antenna_positions[:-1]\n uvobj.Nants_telescope = uvobj.antenna_numbers.size\n with pytest.raises(ValueError) as cm:\n uvobj.check()\n assert str(cm.value).startswith(\n \"All antennas in ant_2_array must be in antenna_numbers\"\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_converttofiletype(uvdata_data):\n fhd_obj = uvdata_data.uv_object._convert_to_filetype(\"fhd\")\n uvdata_data.uv_object._convert_from_filetype(fhd_obj)\n assert uvdata_data.uv_object == uvdata_data.uv_object2\n\n with pytest.raises(ValueError) as cm:\n uvdata_data.uv_object._convert_to_filetype(\"foo\")\n assert str(cm.value).startswith(\n \"filetype must be uvfits, mir, miriad, fhd, or uvh5\"\n )\n\n\ndef test_baseline_to_antnums(uvdata_baseline):\n \"\"\"Test baseline to antnum conversion for 256 & larger conventions.\"\"\"\n assert uvdata_baseline.uv_object.baseline_to_antnums(67585) == (0, 0)\n with pytest.raises(Exception) as cm:\n uvdata_baseline.uv_object2.baseline_to_antnums(67585)\n assert str(cm.value).startswith(\n \"error Nants={Nants}>2048\"\n \" not supported\".format(Nants=uvdata_baseline.uv_object2.Nants_telescope)\n )\n\n ant_pairs = [(10, 20), (280, 310)]\n for pair in ant_pairs:\n if np.max(np.array(pair)) < 255:\n bl = uvdata_baseline.uv_object.antnums_to_baseline(\n pair[0], pair[1], attempt256=True\n )\n ant_pair_out = uvdata_baseline.uv_object.baseline_to_antnums(bl)\n assert pair == ant_pair_out\n\n bl = uvdata_baseline.uv_object.antnums_to_baseline(\n pair[0], pair[1], attempt256=False\n )\n ant_pair_out = uvdata_baseline.uv_object.baseline_to_antnums(bl)\n assert pair == ant_pair_out\n\n\ndef test_baseline_to_antnums_vectorized(uvdata_baseline):\n \"\"\"Test vectorized antnum to baseline conversion.\"\"\"\n ant_1 = [10, 280]\n ant_2 = [20, 310]\n baseline_array = uvdata_baseline.uv_object.antnums_to_baseline(ant_1, ant_2)\n assert np.array_equal(baseline_array, [88085, 641335])\n ant_1_out, ant_2_out = uvdata_baseline.uv_object.baseline_to_antnums(\n baseline_array.tolist()\n )\n assert np.array_equal(ant_1, ant_1_out)\n assert np.array_equal(ant_2, ant_2_out)\n\n\ndef test_antnums_to_baselines(uvdata_baseline):\n \"\"\"Test antums to baseline conversion for 256 & larger conventions.\"\"\"\n assert uvdata_baseline.uv_object.antnums_to_baseline(0, 0) == 67585\n assert uvdata_baseline.uv_object.antnums_to_baseline(257, 256) == 594177\n assert uvdata_baseline.uv_object.baseline_to_antnums(594177) == (257, 256)\n # Check attempt256\n assert uvdata_baseline.uv_object.antnums_to_baseline(0, 0, attempt256=True) == 257\n assert uvdata_baseline.uv_object.antnums_to_baseline(257, 256) == 594177\n with uvtest.check_warnings(UserWarning, \"found > 256 antennas\"):\n uvdata_baseline.uv_object.antnums_to_baseline(257, 256, attempt256=True)\n pytest.raises(Exception, uvdata_baseline.uv_object2.antnums_to_baseline, 0, 0)\n # check a len-1 array returns as an array\n ant1 = np.array([1])\n ant2 = np.array([2])\n assert isinstance(\n uvdata_baseline.uv_object.antnums_to_baseline(ant1, ant2), np.ndarray\n )\n\n\ndef test_known_telescopes():\n \"\"\"Test known_telescopes method returns expected results.\"\"\"\n uv_object = UVData()\n known_telescopes = [\"PAPER\", \"HERA\", \"MWA\", \"SMA\"]\n # calling np.sort().tolist() because [].sort() acts inplace and returns None\n # Before test had None == None\n assert (\n np.sort(known_telescopes).tolist()\n == np.sort(uv_object.known_telescopes()).tolist()\n )\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_hera_diameters(paper_uvh5):\n uv_in = paper_uvh5\n\n uv_in.telescope_name = \"HERA\"\n with uvtest.check_warnings(\n UserWarning, \"antenna_diameters is not set. Using known values for HERA.\"\n ):\n uv_in.set_telescope_params()\n\n assert uv_in.telescope_name == \"HERA\"\n assert uv_in.antenna_diameters is not None\n\n uv_in.check()\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_generic_read():\n uv_in = UVData()\n uvfits_file = os.path.join(DATA_PATH, \"day2_TDEM0003_10s_norx_1src_1spw.uvfits\")\n uv_in.read(uvfits_file, read_data=False)\n unique_times = np.unique(uv_in.time_array)\n\n pytest.raises(\n ValueError,\n uv_in.read,\n uvfits_file,\n times=unique_times[0:2],\n time_range=[unique_times[0], unique_times[1]],\n )\n\n pytest.raises(\n ValueError,\n uv_in.read,\n uvfits_file,\n antenna_nums=uv_in.antenna_numbers[0],\n antenna_names=uv_in.antenna_names[1],\n )\n\n pytest.raises(ValueError, uv_in.read, \"foo\")\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"phase_kwargs\",\n [\n {\"ra\": 0.0, \"dec\": 0.0, \"epoch\": \"J2000\"},\n {\"ra\": Angle(\"5d\").rad, \"dec\": Angle(\"30d\").rad, \"phase_frame\": \"gcrs\"},\n {\n \"ra\": Angle(\"180d\").rad,\n \"dec\": Angle(\"90d\"),\n \"epoch\": Time(\"2010-01-01T00:00:00\", format=\"isot\", scale=\"utc\"),\n },\n ],\n)\ndef test_phase_unphase_hera(uv1_2_set_uvws, phase_kwargs):\n \"\"\"\n Read in drift data, phase to an RA/DEC, unphase and check for object equality.\n \"\"\"\n uv1, uv_raw = uv1_2_set_uvws\n uv1.phase(**phase_kwargs)\n uv1.unphase_to_drift()\n # check that phase + unphase gets back to raw\n assert uv_raw == uv1\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_phase_unphase_hera_one_bl(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n # check that phase + unphase work with one baseline\n uv_raw_small = uv_raw.select(blt_inds=[0], inplace=False)\n uv_phase_small = uv_raw_small.copy()\n uv_phase_small.phase(Angle(\"23h\").rad, Angle(\"15d\").rad)\n uv_phase_small.unphase_to_drift()\n assert uv_raw_small == uv_phase_small\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_phase_unphase_hera_antpos(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n # check that they match if you phase & unphase using antenna locations\n # first replace the uvws with the right values\n antenna_enu = uvutils.ENU_from_ECEF(\n (uv_raw.antenna_positions + uv_raw.telescope_location),\n *uv_raw.telescope_location_lat_lon_alt,\n )\n uvw_calc = np.zeros_like(uv_raw.uvw_array)\n unique_times, unique_inds = np.unique(uv_raw.time_array, return_index=True)\n for ind, jd in enumerate(unique_times):\n inds = np.where(uv_raw.time_array == jd)[0]\n for bl_ind in inds:\n wh_ant1 = np.where(uv_raw.antenna_numbers == uv_raw.ant_1_array[bl_ind])\n ant1_index = wh_ant1[0][0]\n wh_ant2 = np.where(uv_raw.antenna_numbers == uv_raw.ant_2_array[bl_ind])\n ant2_index = wh_ant2[0][0]\n uvw_calc[bl_ind, :] = (\n antenna_enu[ant2_index, :] - antenna_enu[ant1_index, :]\n )\n\n uv_raw_new = uv_raw.copy()\n uv_raw_new.uvw_array = uvw_calc\n uv_phase.phase(0.0, 0.0, epoch=\"J2000\", use_ant_pos=True)\n uv_phase2 = uv_raw_new.copy()\n uv_phase2.phase(0.0, 0.0, epoch=\"J2000\")\n\n # The uvw's only agree to ~1mm. should they be better?\n assert np.allclose(uv_phase2.uvw_array, uv_phase.uvw_array, atol=1e-3)\n # the data array are just multiplied by the w's for phasing, so a difference\n # at the 1e-3 level makes the data array different at that level too.\n # -> change the tolerance on data_array for this test\n uv_phase2._data_array.tols = (0, 1e-3 * np.amax(np.abs(uv_phase2.data_array)))\n assert uv_phase2 == uv_phase\n\n # check that phase + unphase gets back to raw using antpos\n uv_phase.unphase_to_drift(use_ant_pos=True)\n assert uv_raw_new == uv_phase\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_phase_hera_zenith_timestamp_minimal_changes(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n # check that phasing to zenith with one timestamp has small changes\n # (it won't be identical because of precession/nutation changing the\n # coordinate axes)\n # use gcrs rather than icrs to reduce differences (don't include abberation)\n uv_raw_small = uv_raw.select(times=uv_raw.time_array[0], inplace=False)\n uv_phase_simple_small = uv_raw_small.copy()\n uv_phase_simple_small.phase_to_time(\n time=Time(uv_raw.time_array[0], format=\"jd\"), phase_frame=\"gcrs\"\n )\n\n # it's unclear to me how close this should be...\n assert np.allclose(\n uv_phase_simple_small.uvw_array, uv_raw_small.uvw_array, atol=1e-1\n )\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_phase_to_time_jd_input(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n uv_phase.phase_to_time(uv_raw.time_array[0])\n uv_phase.unphase_to_drift()\n assert uv_phase == uv_raw\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_phase_to_time_error(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n # check error if not passing a Time object to phase_to_time\n with pytest.raises(TypeError) as cm:\n uv_phase.phase_to_time(\"foo\")\n assert str(cm.value).startswith(\"time must be an astropy.time.Time object\")\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_unphase_drift_data_error(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n # check error if not passing a Time object to phase_to_time\n with pytest.raises(ValueError) as cm:\n uv_phase.unphase_to_drift()\n assert str(cm.value).startswith(\"The data is already drift scanning;\")\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"phase_func,phase_kwargs,err_msg\",\n [\n (\n \"unphase_to_drift\",\n {},\n \"The phasing type of the data is unknown. Set the phase_type\",\n ),\n (\n \"phase\",\n {\"ra\": 0, \"dec\": 0, \"epoch\": \"J2000\", \"allow_rephase\": False},\n \"The phasing type of the data is unknown. Set the phase_type\",\n ),\n (\n \"phase_to_time\",\n {\"time\": 0, \"allow_rephase\": False},\n \"The phasing type of the data is unknown. Set the phase_type\",\n ),\n ],\n)\ndef test_unknown_phase_unphase_hera_errors(\n uv1_2_set_uvws, phase_func, phase_kwargs, err_msg\n):\n uv_phase, uv_raw = uv1_2_set_uvws\n\n # Set phase type to unkown on some tests, ignore on others.\n uv_phase._set_unknown_phase_type()\n # if this is phase_to_time, use this index set in the dictionary and\n # assign the value of the time_array associated with that index\n # this is a little hacky, but we cannot acces uv_phase.time_array in the\n # parametrize\n if phase_func == \"phase_to_time\":\n phase_kwargs[\"time\"] = uv_phase.time_array[phase_kwargs[\"time\"]]\n\n with pytest.raises(ValueError) as cm:\n getattr(uv_phase, phase_func)(**phase_kwargs)\n assert str(cm.value).startswith(err_msg)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"phase_func,phase_kwargs,err_msg\",\n [\n (\n \"phase\",\n {\"ra\": 0, \"dec\": 0, \"epoch\": \"J2000\", \"allow_rephase\": False},\n \"The data is already phased;\",\n ),\n (\n \"phase_to_time\",\n {\"time\": 0, \"allow_rephase\": False},\n \"The data is already phased;\",\n ),\n ],\n)\ndef test_phase_rephase_hera_errors(uv1_2_set_uvws, phase_func, phase_kwargs, err_msg):\n uv_phase, uv_raw = uv1_2_set_uvws\n\n uv_phase.phase(0.0, 0.0, epoch=\"J2000\")\n # if this is phase_to_time, use this index set in the dictionary and\n # assign the value of the time_array associated with that index\n # this is a little hacky, but we cannot acces uv_phase.time_array in the\n # parametrize\n if phase_func == \"phase_to_time\":\n phase_kwargs[\"time\"] = uv_phase.time_array[int(phase_kwargs[\"time\"])]\n\n with pytest.raises(ValueError) as cm:\n getattr(uv_phase, phase_func)(**phase_kwargs)\n assert str(cm.value).startswith(err_msg)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_phase_unphase_hera_bad_frame(uv1_2_set_uvws):\n uv_phase, uv_raw = uv1_2_set_uvws\n # check errors when trying to phase to an unsupported frame\n with pytest.raises(ValueError) as cm:\n uv_phase.phase(0.0, 0.0, epoch=\"J2000\", phase_frame=\"cirs\")\n assert str(cm.value).startswith(\"phase_frame can only be set to icrs or gcrs.\")\n\n\ndef test_phasing():\n \"\"\"Use MWA files phased to 2 different places to test phasing.\"\"\"\n file1 = os.path.join(DATA_PATH, \"1133866760.uvfits\")\n file2 = os.path.join(DATA_PATH, \"1133866760_rephase.uvfits\")\n uvd1 = UVData()\n uvd2 = UVData()\n uvd1.read_uvfits(file1)\n uvd2.read_uvfits(file2)\n\n uvd1_drift = uvd1.copy()\n uvd1_drift.unphase_to_drift(phase_frame=\"gcrs\")\n uvd1_drift_antpos = uvd1.copy()\n uvd1_drift_antpos.unphase_to_drift(phase_frame=\"gcrs\", use_ant_pos=True)\n\n uvd2_drift = uvd2.copy()\n uvd2_drift.unphase_to_drift(phase_frame=\"gcrs\")\n uvd2_drift_antpos = uvd2.copy()\n uvd2_drift_antpos.unphase_to_drift(phase_frame=\"gcrs\", use_ant_pos=True)\n\n # the tolerances here are empirical -- based on what was seen in the\n # external phasing test. See the phasing memo in docs/references for\n # details.\n assert np.allclose(uvd1_drift.uvw_array, uvd2_drift.uvw_array, atol=2e-2)\n assert np.allclose(uvd1_drift_antpos.uvw_array, uvd2_drift_antpos.uvw_array)\n\n uvd2_rephase = uvd2.copy()\n uvd2_rephase.phase(\n uvd1.phase_center_ra,\n uvd1.phase_center_dec,\n uvd1.phase_center_epoch,\n orig_phase_frame=\"gcrs\",\n phase_frame=\"gcrs\",\n )\n uvd2_rephase_antpos = uvd2.copy()\n uvd2_rephase_antpos.phase(\n uvd1.phase_center_ra,\n uvd1.phase_center_dec,\n uvd1.phase_center_epoch,\n orig_phase_frame=\"gcrs\",\n phase_frame=\"gcrs\",\n use_ant_pos=True,\n )\n\n # the tolerances here are empirical -- based on what was seen in the\n # external phasing test. See the phasing memo in docs/references for\n # details.\n assert np.allclose(uvd1.uvw_array, uvd2_rephase.uvw_array, atol=2e-2)\n assert np.allclose(uvd1.uvw_array, uvd2_rephase_antpos.uvw_array, atol=5e-3)\n\n # rephase the drift objects to the original pointing and verify that they\n # match\n uvd1_drift.phase(\n uvd1.phase_center_ra,\n uvd1.phase_center_dec,\n uvd1.phase_center_epoch,\n phase_frame=\"gcrs\",\n )\n uvd1_drift_antpos.phase(\n uvd1.phase_center_ra,\n uvd1.phase_center_dec,\n uvd1.phase_center_epoch,\n phase_frame=\"gcrs\",\n use_ant_pos=True,\n )\n\n # the tolerances here are empirical -- caused by one unphase/phase cycle.\n # the antpos-based phasing differences are based on what was seen in the\n # external phasing test. See the phasing memo in docs/references for\n # details.\n assert np.allclose(uvd1.uvw_array, uvd1_drift.uvw_array, atol=1e-4)\n assert np.allclose(uvd1.uvw_array, uvd1_drift_antpos.uvw_array, atol=5e-3)\n\n uvd2_drift.phase(\n uvd2.phase_center_ra,\n uvd2.phase_center_dec,\n uvd2.phase_center_epoch,\n phase_frame=\"gcrs\",\n )\n uvd2_drift_antpos.phase(\n uvd2.phase_center_ra,\n uvd2.phase_center_dec,\n uvd2.phase_center_epoch,\n phase_frame=\"gcrs\",\n use_ant_pos=True,\n )\n\n # the tolerances here are empirical -- caused by one unphase/phase cycle.\n # the antpos-based phasing differences are based on what was seen in the\n # external phasing test. See the phasing memo in docs/references for\n # details.\n assert np.allclose(uvd2.uvw_array, uvd2_drift.uvw_array, atol=1e-4)\n assert np.allclose(uvd2.uvw_array, uvd2_drift_antpos.uvw_array, atol=2e-2)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_set_phase_unknown(casa_uvfits):\n uv_object = casa_uvfits\n\n uv_object._set_unknown_phase_type()\n assert uv_object.phase_type == \"unknown\"\n assert not uv_object._phase_center_epoch.required\n assert not uv_object._phase_center_ra.required\n assert not uv_object._phase_center_dec.required\n assert uv_object.check()\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_blts(paper_uvh5):\n uv_object = paper_uvh5\n old_history = uv_object.history\n # fmt: off\n blt_inds = np.array([172, 182, 132, 227, 144, 44, 16, 104, 385, 134, 326, 140, 116,\n 218, 178, 391, 111, 276, 274, 308, 38, 64, 317, 76, 239, 246,\n 34, 39, 83, 184, 208, 60, 374, 295, 118, 337, 261, 21, 375,\n 396, 355, 187, 95, 122, 186, 113, 260, 264, 156, 13, 228, 291,\n 302, 72, 137, 216, 299, 341, 207, 256, 223, 250, 268, 147, 73,\n 32, 142, 383, 221, 203, 258, 286, 324, 265, 170, 236, 8, 275,\n 304, 117, 29, 167, 15, 388, 171, 82, 322, 248, 160, 85, 66,\n 46, 272, 328, 323, 152, 200, 119, 359, 23, 363, 56, 219, 257,\n 11, 307, 336, 289, 136, 98, 37, 163, 158, 80, 125, 40, 298,\n 75, 320, 74, 57, 346, 121, 129, 332, 238, 93, 18, 330, 339,\n 381, 234, 176, 22, 379, 199, 266, 100, 90, 292, 205, 58, 222,\n 350, 109, 273, 191, 368, 88, 101, 65, 155, 2, 296, 306, 398,\n 369, 378, 254, 67, 249, 102, 348, 392, 20, 28, 169, 262, 269,\n 287, 86, 300, 143, 177, 42, 290, 284, 123, 189, 175, 97, 340,\n 242, 342, 331, 282, 235, 344, 63, 115, 78, 30, 226, 157, 133,\n 71, 35, 212, 333])\n # fmt: on\n selected_data = uv_object.data_array[np.sort(blt_inds), :, :, :]\n\n uv_object2 = uv_object.copy()\n uv_object2.select(blt_inds=blt_inds)\n assert len(blt_inds) == uv_object2.Nblts\n\n # verify that histories are different\n assert not uvutils._check_histories(old_history, uv_object2.history)\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific baseline-times using pyuvdata.\",\n uv_object2.history,\n )\n\n assert np.all(selected_data == uv_object2.data_array)\n\n # check that it also works with higher dimension array\n uv_object2 = uv_object.copy()\n uv_object2.select(blt_inds=blt_inds[np.newaxis, :])\n assert len(blt_inds) == uv_object2.Nblts\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific baseline-times using pyuvdata.\",\n uv_object2.history,\n )\n assert np.all(selected_data == uv_object2.data_array)\n\n # check that just doing the metadata works properly\n uv_object3 = uv_object.copy()\n uv_object3.data_array = None\n uv_object3.flag_array = None\n uv_object3.nsample_array = None\n assert uv_object3.metadata_only is True\n uv_object4 = uv_object3.select(blt_inds=blt_inds, inplace=False)\n for param in uv_object4:\n param_name = getattr(uv_object4, param).name\n if param_name not in [\"data_array\", \"flag_array\", \"nsample_array\"]:\n assert getattr(uv_object4, param) == getattr(uv_object2, param)\n else:\n assert getattr(uv_object4, param_name) is None\n\n # also check with inplace=True\n uv_object3.select(blt_inds=blt_inds)\n assert uv_object3 == uv_object4\n\n # check for errors associated with out of bounds indices\n pytest.raises(ValueError, uv_object.select, blt_inds=np.arange(-10, -5))\n pytest.raises(\n ValueError,\n uv_object.select,\n blt_inds=np.arange(uv_object.Nblts + 1, uv_object.Nblts + 10),\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_antennas(casa_uvfits):\n uv_object = casa_uvfits\n old_history = uv_object.history\n unique_ants = np.unique(\n uv_object.ant_1_array.tolist() + uv_object.ant_2_array.tolist()\n )\n ants_to_keep = np.array([0, 19, 11, 24, 3, 23, 1, 20, 21])\n\n blts_select = [\n (a1 in ants_to_keep) & (a2 in ants_to_keep)\n for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)\n ]\n Nblts_selected = np.sum(blts_select)\n\n uv_object2 = uv_object.copy()\n uv_object2.select(antenna_nums=ants_to_keep)\n\n assert len(ants_to_keep) == uv_object2.Nants_data\n assert Nblts_selected == uv_object2.Nblts\n for ant in ants_to_keep:\n assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array\n for ant in np.unique(\n uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()\n ):\n assert ant in ants_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific antennas using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that it also works with higher dimension array\n uv_object2 = uv_object.copy()\n uv_object2.select(antenna_nums=ants_to_keep[np.newaxis, :])\n\n assert len(ants_to_keep) == uv_object2.Nants_data\n assert Nblts_selected == uv_object2.Nblts\n for ant in ants_to_keep:\n assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array\n for ant in np.unique(\n uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()\n ):\n assert ant in ants_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific antennas using pyuvdata.\",\n uv_object2.history,\n )\n\n # now test using antenna_names to specify antennas to keep\n uv_object3 = uv_object.copy()\n ants_to_keep = np.array(sorted(ants_to_keep))\n ant_names = []\n for a in ants_to_keep:\n ind = np.where(uv_object3.antenna_numbers == a)[0][0]\n ant_names.append(uv_object3.antenna_names[ind])\n\n uv_object3.select(antenna_names=ant_names)\n\n assert uv_object2 == uv_object3\n\n # check that it also works with higher dimension array\n uv_object3 = uv_object.copy()\n ants_to_keep = np.array(sorted(ants_to_keep))\n ant_names = []\n for a in ants_to_keep:\n ind = np.where(uv_object3.antenna_numbers == a)[0][0]\n ant_names.append(uv_object3.antenna_names[ind])\n\n uv_object3.select(antenna_names=[ant_names])\n\n assert uv_object2 == uv_object3\n\n # test removing metadata associated with antennas that are no longer present\n # also add (different) antenna_diameters to test downselection\n uv_object.antenna_diameters = 1.0 * np.ones(\n (uv_object.Nants_telescope,), dtype=np.float\n )\n for i in range(uv_object.Nants_telescope):\n uv_object.antenna_diameters += i\n uv_object4 = uv_object.copy()\n uv_object4.select(antenna_nums=ants_to_keep, keep_all_metadata=False)\n assert uv_object4.Nants_telescope == 9\n assert set(uv_object4.antenna_numbers) == set(ants_to_keep)\n for a in ants_to_keep:\n idx1 = uv_object.antenna_numbers.tolist().index(a)\n idx2 = uv_object4.antenna_numbers.tolist().index(a)\n assert uv_object.antenna_names[idx1] == uv_object4.antenna_names[idx2]\n assert np.allclose(\n uv_object.antenna_positions[idx1, :], uv_object4.antenna_positions[idx2, :]\n )\n assert uv_object.antenna_diameters[idx1], uv_object4.antenna_diameters[idx2]\n\n # remove antenna_diameters from object\n uv_object.antenna_diameters = None\n\n # check for errors associated with antennas not included in data, bad names\n # or providing numbers and names\n pytest.raises(\n ValueError, uv_object.select, antenna_nums=np.max(unique_ants) + np.arange(1, 3)\n )\n pytest.raises(ValueError, uv_object.select, antenna_names=\"test1\")\n pytest.raises(\n ValueError, uv_object.select, antenna_nums=ants_to_keep, antenna_names=ant_names\n )\n\n\ndef sort_bl(p):\n \"\"\"Sort a tuple that starts with a pair of antennas, and may have stuff after.\"\"\"\n if p[1] >= p[0]:\n return p\n return (p[1], p[0]) + p[2:]\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_bls(casa_uvfits):\n uv_object = casa_uvfits\n old_history = uv_object.history\n first_ants = [6, 2, 7, 2, 21, 27, 8]\n second_ants = [0, 20, 8, 1, 2, 3, 22]\n new_unique_ants = np.unique(first_ants + second_ants)\n ant_pairs_to_keep = list(zip(first_ants, second_ants))\n sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]\n\n blts_select = [\n sort_bl((a1, a2)) in sorted_pairs_to_keep\n for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)\n ]\n Nblts_selected = np.sum(blts_select)\n\n uv_object2 = uv_object.copy()\n uv_object2.select(bls=ant_pairs_to_keep)\n sorted_pairs_object2 = [\n sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)\n ]\n\n assert len(new_unique_ants) == uv_object2.Nants_data\n assert Nblts_selected == uv_object2.Nblts\n for ant in new_unique_ants:\n assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array\n for ant in np.unique(\n uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()\n ):\n assert ant in new_unique_ants\n for pair in sorted_pairs_to_keep:\n assert pair in sorted_pairs_object2\n for pair in sorted_pairs_object2:\n assert pair in sorted_pairs_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific baselines using pyuvdata.\",\n uv_object2.history,\n )\n\n # check using baseline number parameter\n uv_object3 = uv_object.copy()\n bls_nums_to_keep = [\n uv_object.antnums_to_baseline(ant1, ant2) for ant1, ant2 in sorted_pairs_to_keep\n ]\n\n uv_object3.select(bls=bls_nums_to_keep)\n sorted_pairs_object3 = [\n sort_bl(p) for p in zip(uv_object3.ant_1_array, uv_object3.ant_2_array)\n ]\n\n assert len(new_unique_ants) == uv_object3.Nants_data\n assert Nblts_selected == uv_object3.Nblts\n for ant in new_unique_ants:\n assert ant in uv_object3.ant_1_array or ant in uv_object3.ant_2_array\n for ant in np.unique(\n uv_object3.ant_1_array.tolist() + uv_object3.ant_2_array.tolist()\n ):\n assert ant in new_unique_ants\n for pair in sorted_pairs_to_keep:\n assert pair in sorted_pairs_object3\n for pair in sorted_pairs_object3:\n assert pair in sorted_pairs_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific baselines using pyuvdata.\",\n uv_object3.history,\n )\n\n # check select with polarizations\n first_ants = [6, 2, 7, 2, 21, 27, 8]\n second_ants = [0, 20, 8, 1, 2, 3, 22]\n pols = [\"RR\", \"RR\", \"RR\", \"RR\", \"RR\", \"RR\", \"RR\"]\n new_unique_ants = np.unique(first_ants + second_ants)\n bls_to_keep = list(zip(first_ants, second_ants, pols))\n sorted_bls_to_keep = [sort_bl(p) for p in bls_to_keep]\n\n blts_select = [\n sort_bl((a1, a2, \"RR\")) in sorted_bls_to_keep\n for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)\n ]\n Nblts_selected = np.sum(blts_select)\n\n uv_object2 = uv_object.copy()\n uv_object2.select(bls=bls_to_keep)\n sorted_pairs_object2 = [\n sort_bl(p) + (\"RR\",)\n for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)\n ]\n\n assert len(new_unique_ants) == uv_object2.Nants_data\n assert Nblts_selected == uv_object2.Nblts\n for ant in new_unique_ants:\n assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array\n for ant in np.unique(\n uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()\n ):\n assert ant in new_unique_ants\n for bl in sorted_bls_to_keep:\n assert bl in sorted_pairs_object2\n for bl in sorted_pairs_object2:\n assert bl in sorted_bls_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \"\n \"specific baselines, polarizations using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that you can use numpy integers with out errors:\n first_ants = list(map(np.int32, [6, 2, 7, 2, 21, 27, 8]))\n second_ants = list(map(np.int32, [0, 20, 8, 1, 2, 3, 22]))\n ant_pairs_to_keep = list(zip(first_ants, second_ants))\n\n uv_object2 = uv_object.select(bls=ant_pairs_to_keep, inplace=False)\n sorted_pairs_object2 = [\n sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)\n ]\n\n assert len(new_unique_ants) == uv_object2.Nants_data\n assert Nblts_selected == uv_object2.Nblts\n for ant in new_unique_ants:\n assert ant in uv_object2.ant_1_array or ant in uv_object2.ant_2_array\n for ant in np.unique(\n uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()\n ):\n assert ant in new_unique_ants\n for pair in sorted_pairs_to_keep:\n assert pair in sorted_pairs_object2\n for pair in sorted_pairs_object2:\n assert pair in sorted_pairs_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific baselines using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that you can specify a single pair without errors\n uv_object2.select(bls=(0, 6))\n sorted_pairs_object2 = [\n sort_bl(p) for p in zip(uv_object2.ant_1_array, uv_object2.ant_2_array)\n ]\n assert list(set(sorted_pairs_object2)) == [(0, 6)]\n\n # check for errors associated with antenna pairs not included in data and bad inputs\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=list(zip(first_ants, second_ants)) + [0, 6])\n assert str(cm.value).startswith(\"bls must be a list of tuples of antenna numbers\")\n\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=[(uv_object.antenna_names[0], uv_object.antenna_names[1])])\n assert str(cm.value).startswith(\"bls must be a list of tuples of antenna numbers\")\n\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=(5, 1))\n assert str(cm.value).startswith(\n \"Antenna number 5 is not present in the \" \"ant_1_array or ant_2_array\"\n )\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=(0, 5))\n assert str(cm.value).startswith(\n \"Antenna number 5 is not present in the \" \"ant_1_array or ant_2_array\"\n )\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=(27, 27))\n assert str(cm.value).startswith(\"Antenna pair (27, 27) does not have any data\")\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=(6, 0, \"RR\"), polarizations=\"RR\")\n assert str(cm.value).startswith(\n \"Cannot provide length-3 tuples and also \" \"specify polarizations.\"\n )\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=(6, 0, 8))\n assert str(cm.value).startswith(\n \"The third element in each bl must be a \" \"polarization string\"\n )\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=[])\n assert str(cm.value).startswith(\"bls must be a list of tuples of antenna numbers\")\n with pytest.raises(ValueError) as cm:\n uv_object.select(bls=[100])\n assert str(cm.value).startswith(\"Baseline number 100 is not present in the\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_times(casa_uvfits):\n uv_object = casa_uvfits\n old_history = uv_object.history\n unique_times = np.unique(uv_object.time_array)\n times_to_keep = unique_times[[0, 3, 5, 6, 7, 10, 14]]\n\n Nblts_selected = np.sum([t in times_to_keep for t in uv_object.time_array])\n\n uv_object2 = uv_object.copy()\n uv_object2.select(times=times_to_keep)\n\n assert len(times_to_keep) == uv_object2.Ntimes\n assert Nblts_selected == uv_object2.Nblts\n for t in times_to_keep:\n assert t in uv_object2.time_array\n for t in np.unique(uv_object2.time_array):\n assert t in times_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific times using pyuvdata.\",\n uv_object2.history,\n )\n # check that it also works with higher dimension array\n uv_object2 = uv_object.copy()\n uv_object2.select(times=times_to_keep[np.newaxis, :])\n\n assert len(times_to_keep) == uv_object2.Ntimes\n assert Nblts_selected == uv_object2.Nblts\n for t in times_to_keep:\n assert t in uv_object2.time_array\n for t in np.unique(uv_object2.time_array):\n assert t in times_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific times using pyuvdata.\",\n uv_object2.history,\n )\n\n # check for errors associated with times not included in data\n pytest.raises(\n ValueError,\n uv_object.select,\n times=[np.min(unique_times) - uv_object.integration_time[0]],\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_time_range(casa_uvfits):\n uv_object = casa_uvfits\n old_history = uv_object.history\n unique_times = np.unique(uv_object.time_array)\n mean_time = np.mean(unique_times)\n time_range = [np.min(unique_times), mean_time]\n times_to_keep = unique_times[\n np.nonzero((unique_times <= time_range[1]) & (unique_times >= time_range[0]))\n ]\n\n Nblts_selected = np.nonzero(\n (uv_object.time_array <= time_range[1])\n & (uv_object.time_array >= time_range[0])\n )[0].size\n\n uv_object2 = uv_object.copy()\n uv_object2.select(time_range=time_range)\n\n assert times_to_keep.size == uv_object2.Ntimes\n assert Nblts_selected == uv_object2.Nblts\n for t in times_to_keep:\n assert t in uv_object2.time_array\n for t in np.unique(uv_object2.time_array):\n assert t in times_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific times using pyuvdata.\",\n uv_object2.history,\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_time_range_no_data(casa_uvfits):\n \"\"\"Check for error associated with times not included in data.\"\"\"\n uv_object = casa_uvfits\n unique_times = np.unique(uv_object.time_array)\n with pytest.raises(ValueError) as cm:\n uv_object.select(\n time_range=[\n np.min(unique_times) - uv_object.integration_time[0] * 2,\n np.min(unique_times) - uv_object.integration_time[0],\n ]\n )\n assert str(cm.value).startswith(\"No elements in time range\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_time_and_time_range(casa_uvfits):\n \"\"\"Check for error setting times and time_range.\"\"\"\n uv_object = casa_uvfits\n unique_times = np.unique(uv_object.time_array)\n mean_time = np.mean(unique_times)\n time_range = [np.min(unique_times), mean_time]\n times_to_keep = unique_times[[0, 3, 5, 6, 7, 10, 14]]\n with pytest.raises(ValueError) as cm:\n uv_object.select(time_range=time_range, times=times_to_keep)\n assert str(cm.value).startswith('Only one of \"times\" and \"time_range\" can be set')\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_time_range_one_elem(casa_uvfits):\n \"\"\"Check for error if time_range not length 2.\"\"\"\n uv_object = casa_uvfits\n unique_times = np.unique(uv_object.time_array)\n mean_time = np.mean(unique_times)\n time_range = [np.min(unique_times), mean_time]\n with pytest.raises(ValueError) as cm:\n uv_object.select(time_range=time_range[0])\n assert str(cm.value).startswith(\"time_range must be length 2\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_frequencies_uvfits(casa_uvfits, tmp_path):\n uv_object = casa_uvfits\n old_history = uv_object.history\n freqs_to_keep = uv_object.freq_array[0, np.arange(12, 22)]\n\n uv_object2 = uv_object.copy()\n uv_object2.select(frequencies=freqs_to_keep)\n\n assert len(freqs_to_keep) == uv_object2.Nfreqs\n for f in freqs_to_keep:\n assert f in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in freqs_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that it also works with higher dimension array\n uv_object2 = uv_object.copy()\n uv_object2.select(frequencies=freqs_to_keep[np.newaxis, :])\n\n assert len(freqs_to_keep) == uv_object2.Nfreqs\n for f in freqs_to_keep:\n assert f in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in freqs_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that selecting one frequency works\n uv_object2 = uv_object.copy()\n uv_object2.select(frequencies=freqs_to_keep[0])\n assert 1 == uv_object2.Nfreqs\n assert freqs_to_keep[0] in uv_object2.freq_array\n for f in uv_object2.freq_array:\n assert f in [freqs_to_keep[0]]\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # check for errors associated with frequencies not included in data\n pytest.raises(\n ValueError,\n uv_object.select,\n frequencies=[np.max(uv_object.freq_array) + uv_object.channel_width],\n )\n\n # check for warnings and errors associated with unevenly spaced or\n # non-contiguous frequencies\n uv_object2 = uv_object.copy()\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Selected frequencies are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv_object2.select(frequencies=uv_object2.freq_array[0, [0, 5, 6]])\n write_file_uvfits = str(tmp_path / \"select_test.uvfits\")\n pytest.raises(ValueError, uv_object2.write_uvfits, write_file_uvfits)\n\n uv_object2 = uv_object.copy()\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Selected frequencies are not contiguous\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv_object2.select(frequencies=uv_object2.freq_array[0, [0, 2, 4]])\n\n pytest.raises(ValueError, uv_object2.write_uvfits, write_file_uvfits)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_frequencies_miriad(casa_uvfits, tmp_path):\n pytest.importorskip(\"pyuvdata._miriad\")\n uv_object = casa_uvfits\n old_history = uv_object.history\n freqs_to_keep = uv_object.freq_array[0, np.arange(12, 22)]\n\n uv_object2 = uv_object.copy()\n uv_object2.select(frequencies=freqs_to_keep)\n\n assert len(freqs_to_keep) == uv_object2.Nfreqs\n for f in freqs_to_keep:\n assert f in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in freqs_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that it also works with higher dimension array\n uv_object2 = uv_object.copy()\n uv_object2.select(frequencies=freqs_to_keep[np.newaxis, :])\n\n assert len(freqs_to_keep) == uv_object2.Nfreqs\n for f in freqs_to_keep:\n assert f in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in freqs_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that selecting one frequency works\n uv_object2 = uv_object.copy()\n uv_object2.select(frequencies=freqs_to_keep[0])\n assert 1 == uv_object2.Nfreqs\n assert freqs_to_keep[0] in uv_object2.freq_array\n for f in uv_object2.freq_array:\n assert f in [freqs_to_keep[0]]\n\n assert uvutils._check_histories(\n old_history + \" Downselected to specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # check for errors associated with frequencies not included in data\n pytest.raises(\n ValueError,\n uv_object.select,\n frequencies=[np.max(uv_object.freq_array) + uv_object.channel_width],\n )\n\n # check for warnings and errors associated with unevenly spaced or\n # non-contiguous frequencies\n uv_object2 = uv_object.copy()\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Selected frequencies are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv_object2.select(frequencies=uv_object2.freq_array[0, [0, 5, 6]])\n write_file_miriad = str(tmp_path / \"select_test.uvfits\")\n pytest.raises(ValueError, uv_object2.write_miriad, write_file_miriad)\n\n uv_object2 = uv_object.copy()\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Selected frequencies are not contiguous\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv_object2.select(frequencies=uv_object2.freq_array[0, [0, 2, 4]])\n pytest.raises(ValueError, uv_object2.write_miriad, write_file_miriad)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_freq_chans(casa_uvfits):\n uv_object = casa_uvfits\n old_history = uv_object.history\n chans_to_keep = np.arange(12, 22)\n\n uv_object2 = uv_object.copy()\n uv_object2.select(freq_chans=chans_to_keep)\n\n assert len(chans_to_keep) == uv_object2.Nfreqs\n for chan in chans_to_keep:\n assert uv_object.freq_array[0, chan] in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in uv_object.freq_array[0, chans_to_keep]\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that it also works with higher dimension array\n uv_object2 = uv_object.copy()\n uv_object2.select(freq_chans=chans_to_keep[np.newaxis, :])\n\n assert len(chans_to_keep) == uv_object2.Nfreqs\n for chan in chans_to_keep:\n assert uv_object.freq_array[0, chan] in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in uv_object.freq_array[0, chans_to_keep]\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific frequencies using pyuvdata.\",\n uv_object2.history,\n )\n\n # Test selecting both channels and frequencies\n freqs_to_keep = uv_object.freq_array[0, np.arange(20, 30)] # Overlaps with chans\n all_chans_to_keep = np.arange(12, 30)\n\n uv_object2 = uv_object.copy()\n uv_object2.select(frequencies=freqs_to_keep, freq_chans=chans_to_keep)\n\n assert len(all_chans_to_keep) == uv_object2.Nfreqs\n for chan in all_chans_to_keep:\n assert uv_object.freq_array[0, chan] in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in uv_object.freq_array[0, all_chans_to_keep]\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_polarizations(casa_uvfits, tmp_path):\n uv_object = casa_uvfits\n old_history = uv_object.history\n pols_to_keep = [-1, -2]\n\n uv_object2 = uv_object.copy()\n uv_object2.select(polarizations=pols_to_keep)\n\n assert len(pols_to_keep) == uv_object2.Npols\n for p in pols_to_keep:\n assert p in uv_object2.polarization_array\n for p in np.unique(uv_object2.polarization_array):\n assert p in pols_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific polarizations using pyuvdata.\",\n uv_object2.history,\n )\n\n # check that it also works with higher dimension array\n uv_object2 = uv_object.copy()\n uv_object2.select(polarizations=[pols_to_keep])\n\n assert len(pols_to_keep) == uv_object2.Npols\n for p in pols_to_keep:\n assert p in uv_object2.polarization_array\n for p in np.unique(uv_object2.polarization_array):\n assert p in pols_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \" \"specific polarizations using pyuvdata.\",\n uv_object2.history,\n )\n\n # check for errors associated with polarizations not included in data\n pytest.raises(ValueError, uv_object2.select, polarizations=[-3, -4])\n\n # check for warnings and errors associated with unevenly spaced polarizations\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Selected polarization values are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv_object.select(polarizations=uv_object.polarization_array[[0, 1, 3]])\n write_file_uvfits = str(tmp_path / \"select_test.uvfits\")\n pytest.raises(ValueError, uv_object.write_uvfits, write_file_uvfits)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select(casa_uvfits):\n # now test selecting along all axes at once\n uv_object = casa_uvfits\n old_history = uv_object.history\n # fmt: off\n blt_inds = np.array([1057, 461, 1090, 354, 528, 654, 882, 775, 369, 906, 748,\n 875, 296, 773, 554, 395, 1003, 476, 762, 976, 1285, 874,\n 717, 383, 1281, 924, 264, 1163, 297, 857, 1258, 1000, 180,\n 1303, 1139, 393, 42, 135, 789, 713, 527, 1218, 576, 100,\n 1311, 4, 653, 724, 591, 889, 36, 1033, 113, 479, 322,\n 118, 898, 1263, 477, 96, 935, 238, 195, 531, 124, 198,\n 992, 1131, 305, 154, 961, 6, 1175, 76, 663, 82, 637,\n 288, 1152, 845, 1290, 379, 1225, 1240, 733, 1172, 937, 1325,\n 817, 416, 261, 1316, 957, 723, 215, 237, 270, 1309, 208,\n 17, 1028, 895, 574, 166, 784, 834, 732, 1022, 1068, 1207,\n 356, 474, 313, 137, 172, 181, 925, 201, 190, 1277, 1044,\n 1242, 702, 567, 557, 1032, 1352, 504, 545, 422, 179, 780,\n 280, 890, 774, 884])\n # fmt: on\n ants_to_keep = np.array([11, 6, 20, 26, 2, 27, 7, 14])\n\n ant_pairs_to_keep = [(2, 11), (20, 26), (6, 7), (3, 27), (14, 6)]\n sorted_pairs_to_keep = [sort_bl(p) for p in ant_pairs_to_keep]\n\n freqs_to_keep = uv_object.freq_array[0, np.arange(31, 39)]\n\n unique_times = np.unique(uv_object.time_array)\n times_to_keep = unique_times[[0, 2, 6, 8, 10, 13, 14]]\n\n pols_to_keep = [-1, -3]\n\n # Independently count blts that should be selected\n blts_blt_select = [i in blt_inds for i in np.arange(uv_object.Nblts)]\n blts_ant_select = [\n (a1 in ants_to_keep) & (a2 in ants_to_keep)\n for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)\n ]\n blts_pair_select = [\n sort_bl((a1, a2)) in sorted_pairs_to_keep\n for (a1, a2) in zip(uv_object.ant_1_array, uv_object.ant_2_array)\n ]\n blts_time_select = [t in times_to_keep for t in uv_object.time_array]\n Nblts_select = np.sum(\n [\n bi & (ai & pi) & ti\n for (bi, ai, pi, ti) in zip(\n blts_blt_select, blts_ant_select, blts_pair_select, blts_time_select\n )\n ]\n )\n\n uv_object2 = uv_object.copy()\n uv_object2.select(\n blt_inds=blt_inds,\n antenna_nums=ants_to_keep,\n bls=ant_pairs_to_keep,\n frequencies=freqs_to_keep,\n times=times_to_keep,\n polarizations=pols_to_keep,\n )\n\n assert Nblts_select == uv_object2.Nblts\n for ant in np.unique(\n uv_object2.ant_1_array.tolist() + uv_object2.ant_2_array.tolist()\n ):\n assert ant in ants_to_keep\n\n assert len(freqs_to_keep) == uv_object2.Nfreqs\n for f in freqs_to_keep:\n assert f in uv_object2.freq_array\n for f in np.unique(uv_object2.freq_array):\n assert f in freqs_to_keep\n\n for t in np.unique(uv_object2.time_array):\n assert t in times_to_keep\n\n assert len(pols_to_keep) == uv_object2.Npols\n for p in pols_to_keep:\n assert p in uv_object2.polarization_array\n for p in np.unique(uv_object2.polarization_array):\n assert p in pols_to_keep\n\n assert uvutils._check_histories(\n old_history + \" Downselected to \"\n \"specific baseline-times, antennas, \"\n \"baselines, times, frequencies, \"\n \"polarizations using pyuvdata.\",\n uv_object2.history,\n )\n\n # test that a ValueError is raised if the selection eliminates all blts\n pytest.raises(ValueError, uv_object.select, times=unique_times[0], antenna_nums=1)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_not_inplace(casa_uvfits):\n # Test non-inplace select\n uv_object = casa_uvfits\n old_history = uv_object.history\n uv1 = uv_object.select(freq_chans=np.arange(32), inplace=False)\n uv1 += uv_object.select(freq_chans=np.arange(32, 64), inplace=False)\n assert uvutils._check_histories(\n old_history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv1.history = old_history\n assert uv1 == uv_object\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"metadata_only\", [True, False])\ndef test_conjugate_bls(casa_uvfits, metadata_only):\n testfile = os.path.join(DATA_PATH, \"day2_TDEM0003_10s_norx_1src_1spw.uvfits\")\n\n if not metadata_only:\n uv1 = casa_uvfits\n else:\n uv1 = UVData()\n uv1.read_uvfits(testfile, read_data=False)\n if metadata_only:\n assert uv1.metadata_only\n # file comes in with ant1<ant2\n assert np.min(uv1.ant_2_array - uv1.ant_1_array) >= 0\n\n # check everything swapped & conjugated when go to ant2<ant1\n uv2 = uv1.copy()\n uv2.conjugate_bls(convention=\"ant2<ant1\")\n assert np.min(uv2.ant_1_array - uv2.ant_2_array) >= 0\n\n assert np.allclose(uv1.ant_1_array, uv2.ant_2_array)\n assert np.allclose(uv1.ant_2_array, uv2.ant_1_array)\n assert np.allclose(\n uv1.uvw_array,\n -1 * uv2.uvw_array,\n rtol=uv1._uvw_array.tols[0],\n atol=uv1._uvw_array.tols[1],\n )\n\n if not metadata_only:\n # complicated because of the polarization swaps\n # polarization_array = [-1 -2 -3 -4]\n assert np.allclose(\n uv1.data_array[:, :, :, :2],\n np.conj(uv2.data_array[:, :, :, :2]),\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n\n assert np.allclose(\n uv1.data_array[:, :, :, 2],\n np.conj(uv2.data_array[:, :, :, 3]),\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n\n assert np.allclose(\n uv1.data_array[:, :, :, 3],\n np.conj(uv2.data_array[:, :, :, 2]),\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n\n # check everything returned to original values with original convention\n uv2.conjugate_bls(convention=\"ant1<ant2\")\n assert uv1 == uv2\n\n # conjugate a particular set of blts\n blts_to_conjugate = np.arange(uv2.Nblts // 2)\n blts_not_conjugated = np.arange(uv2.Nblts // 2, uv2.Nblts)\n uv2.conjugate_bls(convention=blts_to_conjugate)\n\n assert np.allclose(\n uv1.ant_1_array[blts_to_conjugate], uv2.ant_2_array[blts_to_conjugate]\n )\n assert np.allclose(\n uv1.ant_2_array[blts_to_conjugate], uv2.ant_1_array[blts_to_conjugate]\n )\n assert np.allclose(\n uv1.ant_1_array[blts_not_conjugated], uv2.ant_1_array[blts_not_conjugated]\n )\n assert np.allclose(\n uv1.ant_2_array[blts_not_conjugated], uv2.ant_2_array[blts_not_conjugated]\n )\n\n assert np.allclose(\n uv1.uvw_array[blts_to_conjugate],\n -1 * uv2.uvw_array[blts_to_conjugate],\n rtol=uv1._uvw_array.tols[0],\n atol=uv1._uvw_array.tols[1],\n )\n assert np.allclose(\n uv1.uvw_array[blts_not_conjugated],\n uv2.uvw_array[blts_not_conjugated],\n rtol=uv1._uvw_array.tols[0],\n atol=uv1._uvw_array.tols[1],\n )\n if not metadata_only:\n # complicated because of the polarization swaps\n # polarization_array = [-1 -2 -3 -4]\n assert np.allclose(\n uv1.data_array[blts_to_conjugate, :, :, :2],\n np.conj(uv2.data_array[blts_to_conjugate, :, :, :2]),\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n assert np.allclose(\n uv1.data_array[blts_not_conjugated, :, :, :2],\n uv2.data_array[blts_not_conjugated, :, :, :2],\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n\n assert np.allclose(\n uv1.data_array[blts_to_conjugate, :, :, 2],\n np.conj(uv2.data_array[blts_to_conjugate, :, :, 3]),\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n assert np.allclose(\n uv1.data_array[blts_not_conjugated, :, :, 2],\n uv2.data_array[blts_not_conjugated, :, :, 2],\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n\n assert np.allclose(\n uv1.data_array[blts_to_conjugate, :, :, 3],\n np.conj(uv2.data_array[blts_to_conjugate, :, :, 2]),\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n assert np.allclose(\n uv1.data_array[blts_not_conjugated, :, :, 3],\n uv2.data_array[blts_not_conjugated, :, :, 3],\n rtol=uv1._data_array.tols[0],\n atol=uv1._data_array.tols[1],\n )\n\n # check uv half plane conventions\n uv2.conjugate_bls(convention=\"u<0\", use_enu=False)\n assert np.max(uv2.uvw_array[:, 0]) <= 0\n\n uv2.conjugate_bls(convention=\"u>0\", use_enu=False)\n assert np.min(uv2.uvw_array[:, 0]) >= 0\n\n uv2.conjugate_bls(convention=\"v<0\", use_enu=False)\n assert np.max(uv2.uvw_array[:, 1]) <= 0\n\n uv2.conjugate_bls(convention=\"v>0\", use_enu=False)\n assert np.min(uv2.uvw_array[:, 1]) >= 0\n\n # unphase to drift to test using ENU positions\n uv2.unphase_to_drift(use_ant_pos=True)\n uv2.conjugate_bls(convention=\"u<0\")\n assert np.max(uv2.uvw_array[:, 0]) <= 0\n\n uv2.conjugate_bls(convention=\"u>0\")\n assert np.min(uv2.uvw_array[:, 0]) >= 0\n\n uv2.conjugate_bls(convention=\"v<0\")\n assert np.max(uv2.uvw_array[:, 1]) <= 0\n\n uv2.conjugate_bls(convention=\"v>0\")\n assert np.min(uv2.uvw_array[:, 1]) >= 0\n\n # test errors\n with pytest.raises(ValueError) as cm:\n uv2.conjugate_bls(convention=\"foo\")\n assert str(cm.value).startswith(\"convention must be one of\")\n\n with pytest.raises(ValueError) as cm:\n uv2.conjugate_bls(convention=np.arange(5) - 1)\n assert str(cm.value).startswith(\"If convention is an index array\")\n\n with pytest.raises(ValueError) as cm:\n uv2.conjugate_bls(convention=[uv2.Nblts])\n\n assert str(cm.value).startswith(\"If convention is an index array\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_reorder_pols(casa_uvfits):\n # Test function to fix polarization order\n uv1 = casa_uvfits\n uv2 = uv1.copy()\n uv3 = uv1.copy()\n # reorder uv2 manually\n order = [1, 3, 2, 0]\n uv2.polarization_array = uv2.polarization_array[order]\n uv2.data_array = uv2.data_array[:, :, :, order]\n uv2.nsample_array = uv2.nsample_array[:, :, :, order]\n uv2.flag_array = uv2.flag_array[:, :, :, order]\n uv1.reorder_pols(order=order)\n assert uv1 == uv2\n\n # Restore original order\n uv1 = uv3.copy()\n uv2.reorder_pols()\n assert uv1 == uv2\n\n uv1.reorder_pols(order=\"AIPS\")\n # check that we have aips ordering\n aips_pols = np.array([-1, -2, -3, -4]).astype(int)\n assert np.all(uv1.polarization_array == aips_pols)\n\n uv2 = uv1.copy()\n uv2.reorder_pols(order=\"CASA\")\n # check that we have casa ordering\n casa_pols = np.array([-1, -3, -4, -2]).astype(int)\n assert np.all(uv2.polarization_array == casa_pols)\n order = np.array([0, 2, 3, 1])\n assert np.all(uv2.data_array == uv1.data_array[:, :, :, order])\n assert np.all(uv2.flag_array == uv1.flag_array[:, :, :, order])\n\n uv2.reorder_pols(order=\"AIPS\")\n # check that we have aips ordering again\n assert uv1 == uv2\n\n # check error on unknown order\n pytest.raises(ValueError, uv2.reorder_pols, {\"order\": \"foo\"})\n\n # check error if order is an array of the wrong length\n with pytest.raises(ValueError) as cm:\n uv2.reorder_pols(order=[3, 2, 1])\n assert str(cm.value).startswith(\"If order is an index array, it must\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_reorder_blts(casa_uvfits):\n uv1 = casa_uvfits\n\n # test default reordering in detail\n uv2 = uv1.copy()\n uv2.reorder_blts()\n assert uv2.blt_order == (\"time\", \"baseline\")\n assert np.min(np.diff(uv2.time_array)) >= 0\n for this_time in np.unique(uv2.time_array):\n bls_2 = uv2.baseline_array[np.where(uv2.time_array == this_time)]\n bls_1 = uv1.baseline_array[np.where(uv2.time_array == this_time)]\n assert bls_1.shape == bls_2.shape\n assert np.min(np.diff(bls_2)) >= 0\n bl_inds = [np.where(bls_1 == bl)[0][0] for bl in bls_2]\n assert np.allclose(bls_1[bl_inds], bls_2)\n\n uvw_1 = uv1.uvw_array[np.where(uv2.time_array == this_time)[0], :]\n uvw_2 = uv2.uvw_array[np.where(uv2.time_array == this_time)[0], :]\n assert uvw_1.shape == uvw_2.shape\n assert np.allclose(uvw_1[bl_inds, :], uvw_2)\n\n data_1 = uv1.data_array[np.where(uv2.time_array == this_time)[0], :, :, :]\n data_2 = uv2.data_array[np.where(uv2.time_array == this_time)[0], :, :, :]\n assert data_1.shape == data_2.shape\n assert np.allclose(data_1[bl_inds, :, :, :], data_2)\n\n # check that ordering by time, ant1 is identical to time, baseline\n uv3 = uv1.copy()\n uv3.reorder_blts(order=\"time\", minor_order=\"ant1\")\n assert uv3.blt_order == (\"time\", \"ant1\")\n assert np.min(np.diff(uv3.time_array)) >= 0\n uv3.blt_order = uv2.blt_order\n assert uv2 == uv3\n\n uv3.reorder_blts(order=\"time\", minor_order=\"ant2\")\n assert uv3.blt_order == (\"time\", \"ant2\")\n assert np.min(np.diff(uv3.time_array)) >= 0\n\n # check that loopback works\n uv3.reorder_blts()\n assert uv2 == uv3\n\n # sort with a specified index array\n new_order = np.lexsort((uv3.baseline_array, uv3.time_array))\n uv3.reorder_blts(order=new_order)\n assert uv3.blt_order is None\n assert np.min(np.diff(uv3.time_array)) >= 0\n uv3.blt_order = (\"time\", \"baseline\")\n assert uv2 == uv3\n\n # test sensible defaulting if minor order = major order\n uv3.reorder_blts(order=\"time\", minor_order=\"time\")\n assert uv2 == uv3\n\n # test all combinations of major, minor order\n uv3.reorder_blts(order=\"baseline\")\n assert uv3.blt_order == (\"baseline\", \"time\")\n assert np.min(np.diff(uv3.baseline_array)) >= 0\n\n uv3.reorder_blts(order=\"ant1\")\n assert uv3.blt_order == (\"ant1\", \"ant2\")\n assert np.min(np.diff(uv3.ant_1_array)) >= 0\n\n uv3.reorder_blts(order=\"ant1\", minor_order=\"time\")\n assert uv3.blt_order == (\"ant1\", \"time\")\n assert np.min(np.diff(uv3.ant_1_array)) >= 0\n\n uv3.reorder_blts(order=\"ant1\", minor_order=\"baseline\")\n assert uv3.blt_order == (\"ant1\", \"baseline\")\n assert np.min(np.diff(uv3.ant_1_array)) >= 0\n\n uv3.reorder_blts(order=\"ant2\")\n assert uv3.blt_order == (\"ant2\", \"ant1\")\n assert np.min(np.diff(uv3.ant_2_array)) >= 0\n\n uv3.reorder_blts(order=\"ant2\", minor_order=\"time\")\n assert uv3.blt_order == (\"ant2\", \"time\")\n assert np.min(np.diff(uv3.ant_2_array)) >= 0\n\n uv3.reorder_blts(order=\"ant2\", minor_order=\"baseline\")\n assert uv3.blt_order == (\"ant2\", \"baseline\")\n assert np.min(np.diff(uv3.ant_2_array)) >= 0\n\n uv3.reorder_blts(order=\"bda\")\n assert uv3.blt_order == (\"bda\",)\n assert np.min(np.diff(uv3.integration_time)) >= 0\n assert np.min(np.diff(uv3.baseline_array)) >= 0\n\n # test doing conjugation along with a reorder\n # the file is already conjugated this way, so should be equal\n uv3.reorder_blts(order=\"time\", conj_convention=\"ant1<ant2\")\n assert uv2 == uv3\n\n # test errors\n with pytest.raises(ValueError) as cm:\n uv3.reorder_blts(order=\"foo\")\n assert str(cm.value).startswith(\"order must be one of\")\n\n with pytest.raises(ValueError) as cm:\n uv3.reorder_blts(order=np.arange(5))\n assert str(cm.value).startswith(\"If order is an index array, it must\")\n\n with pytest.raises(ValueError) as cm:\n uv3.reorder_blts(order=np.arange(5, dtype=np.float))\n assert str(cm.value).startswith(\"If order is an index array, it must\")\n\n with pytest.raises(ValueError) as cm:\n uv3.reorder_blts(order=np.arange(uv3.Nblts), minor_order=\"time\")\n assert str(cm.value).startswith(\n \"Minor order cannot be set if order is an index array\"\n )\n\n with pytest.raises(ValueError) as cm:\n uv3.reorder_blts(order=\"bda\", minor_order=\"time\")\n assert str(cm.value).startswith(\"minor_order cannot be specified if order is\")\n\n with pytest.raises(ValueError) as cm:\n uv3.reorder_blts(order=\"baseline\", minor_order=\"ant1\")\n assert str(cm.value).startswith(\"minor_order conflicts with order\")\n\n with pytest.raises(ValueError) as cm:\n uv3.reorder_blts(order=\"time\", minor_order=\"foo\")\n assert str(cm.value).startswith(\"minor_order can only be one of\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_sum_vis(casa_uvfits):\n # check sum_vis\n uv_full = casa_uvfits\n\n uv_half = uv_full.copy()\n uv_half.data_array = uv_full.data_array / 2\n uv_summed = uv_half.sum_vis(uv_half)\n\n assert np.array_equal(uv_summed.data_array, uv_full.data_array)\n assert uvutils._check_histories(\n uv_half.history + \" Visibilities summed \" \"using pyuvdata.\", uv_summed.history\n )\n\n # check diff_vis\n uv_diffed = uv_full.diff_vis(uv_half)\n\n assert np.array_equal(uv_diffed.data_array, uv_half.data_array)\n assert uvutils._check_histories(\n uv_full.history + \" Visibilities \" \"differenced using pyuvdata.\",\n uv_diffed.history,\n )\n\n # check in place\n uv_summed.diff_vis(uv_half, inplace=True)\n assert np.array_equal(uv_summed.data_array, uv_half.data_array)\n\n # check object_name merge\n uv_zenith = uv_full.copy()\n uv_zenith.object_name = \"zenith\"\n uv_merged = uv_zenith.sum_vis(uv_full)\n assert uv_merged.object_name == \"zenith-J1008+0730\"\n\n # check extra_keywords handling\n uv_keys = uv_full.copy()\n uv_keys.extra_keywords[\"test_key\"] = \"test_value\"\n uv_keys.extra_keywords[\"SPECSYS\"] = \"altered_value\"\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Keyword SPECSYS in _extra_keywords is different in the two objects. \"\n \"Taking the first object's entry.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv_merged_keys = uv_keys.sum_vis(uv_full)\n assert uv_merged_keys.extra_keywords[\"test_key\"] == \"test_value\"\n assert uv_merged_keys.extra_keywords[\"SPECSYS\"] == \"altered_value\"\n\n # check override_params\n uv_overrides = uv_full.copy()\n uv_overrides.instrument = \"test_telescope\"\n uv_overrides.telescope_location = [\n -1601183.15377712,\n -5042003.74810822,\n 3554841.17192104,\n ]\n uv_overrides_2 = uv_overrides.sum_vis(\n uv_full, override_params=[\"instrument\", \"telescope_location\"]\n )\n\n assert uv_overrides_2.instrument == \"test_telescope\"\n assert uv_overrides_2.telescope_location == [\n -1601183.15377712,\n -5042003.74810822,\n 3554841.17192104,\n ]\n\n # check error messages\n with pytest.raises(ValueError) as cm:\n uv_overrides = uv_overrides.sum_vis(uv_full, override_params=[\"fake\"])\n assert str(cm.value).startswith(\"Provided parameter fake is not a recognizable\")\n\n with pytest.raises(ValueError) as cm:\n uv_full.sum_vis(\"foo\")\n assert str(cm.value).startswith(\"Only UVData (or subclass) objects can be\")\n\n uv_full.instrument = \"foo\"\n with pytest.raises(ValueError) as cm:\n uv_full.sum_vis(uv_half, inplace=True)\n assert str(cm.value).startswith(\"UVParameter instrument \" \"does not match\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_add(casa_uvfits):\n uv_full = casa_uvfits\n\n # Add frequencies\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv1 += uv2\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add frequencies - out of order\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv2 += uv1\n uv2.history = uv_full.history\n assert uv2 == uv_full\n\n # Add polarizations\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific polarizations using pyuvdata. \"\n \"Combined data along polarization axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add polarizations - out of order\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n uv2 += uv1\n uv2.history = uv_full.history\n assert uv2 == uv_full\n\n # Add times\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2])\n uv2.select(times=times[len(times) // 2 :])\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times using pyuvdata. \"\n \"Combined data along baseline-time axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add baselines\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n ant_list = list(range(15)) # Roughly half the antennas in the data\n # All blts where ant_1 is in list\n ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]\n ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]\n uv1.select(blt_inds=ind1)\n uv2.select(blt_inds=ind2)\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific baseline-times using pyuvdata. \"\n \"Combined data along baseline-time axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add baselines - out of order\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv3 = uv_full.copy()\n ants = uv_full.get_ants()\n ants1 = ants[0:6]\n ants2 = ants[6:12]\n ants3 = ants[12:]\n\n # All blts where ant_1 is in list\n ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ants1]\n ind2 = [i for i in range(uv2.Nblts) if uv2.ant_1_array[i] in ants2]\n ind3 = [i for i in range(uv3.Nblts) if uv3.ant_1_array[i] in ants3]\n uv1.select(blt_inds=ind1)\n uv2.select(blt_inds=ind2)\n uv3.select(blt_inds=ind3)\n uv3.data_array = uv3.data_array[-1::-1, :, :, :]\n uv3.nsample_array = uv3.nsample_array[-1::-1, :, :, :]\n uv3.flag_array = uv3.flag_array[-1::-1, :, :, :]\n uv3.uvw_array = uv3.uvw_array[-1::-1, :]\n uv3.time_array = uv3.time_array[-1::-1]\n uv3.lst_array = uv3.lst_array[-1::-1]\n uv3.ant_1_array = uv3.ant_1_array[-1::-1]\n uv3.ant_2_array = uv3.ant_2_array[-1::-1]\n uv3.baseline_array = uv3.baseline_array[-1::-1]\n uv1 += uv3\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific baseline-times using pyuvdata. \"\n \"Combined data along baseline-time axis \"\n \"using pyuvdata. Combined data along \"\n \"baseline-time axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add multiple axes\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv_ref = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(\n times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]\n )\n uv2.select(\n times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]\n )\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times, polarizations using \"\n \"pyuvdata. Combined data along \"\n \"baseline-time, polarization axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n blt_ind1 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[0 : len(times) // 2]\n ]\n )\n blt_ind2 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[len(times) // 2 :]\n ]\n )\n # Zero out missing data in reference object\n uv_ref.data_array[blt_ind1, :, :, 2:] = 0.0\n uv_ref.nsample_array[blt_ind1, :, :, 2:] = 0.0\n uv_ref.flag_array[blt_ind1, :, :, 2:] = True\n uv_ref.data_array[blt_ind2, :, :, 0:2] = 0.0\n uv_ref.nsample_array[blt_ind2, :, :, 0:2] = 0.0\n uv_ref.flag_array[blt_ind2, :, :, 0:2] = True\n uv1.history = uv_full.history\n assert uv1 == uv_ref\n\n # Another combo\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv_ref = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))\n uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times, frequencies using \"\n \"pyuvdata. Combined data along \"\n \"baseline-time, frequency axis using \"\n \"pyuvdata.\",\n uv1.history,\n )\n blt_ind1 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[0 : len(times) // 2]\n ]\n )\n blt_ind2 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[len(times) // 2 :]\n ]\n )\n # Zero out missing data in reference object\n uv_ref.data_array[blt_ind1, :, 32:, :] = 0.0\n uv_ref.nsample_array[blt_ind1, :, 32:, :] = 0.0\n uv_ref.flag_array[blt_ind1, :, 32:, :] = True\n uv_ref.data_array[blt_ind2, :, 0:32, :] = 0.0\n uv_ref.nsample_array[blt_ind2, :, 0:32, :] = 0.0\n uv_ref.flag_array[blt_ind2, :, 0:32, :] = True\n uv1.history = uv_full.history\n assert uv1 == uv_ref\n\n # Add without inplace\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2])\n uv2.select(times=times[len(times) // 2 :])\n uv1 = uv1 + uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times using pyuvdata. \"\n \"Combined data along baseline-time \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Check warnings\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(33, 64))\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined frequencies are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.__add__(uv2)\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=[0])\n uv2.select(freq_chans=[3])\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined frequencies are not contiguous\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.__iadd__(uv2)\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=[0])\n uv2.select(freq_chans=[1])\n uv2.freq_array += uv2._channel_width.tols[1] / 2.0\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.__iadd__(uv2)\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[3])\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined polarizations are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.__iadd__(uv2)\n\n # Combining histories\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n uv2.history += \" testing the history. AIPS WTSCAL = 1.0\"\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific polarizations using pyuvdata. \"\n \"Combined data along polarization \"\n \"axis using pyuvdata. testing the history.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # test add of autocorr-only and crosscorr-only objects\n uv_full = UVData()\n uv_full.read_uvh5(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA.uvh5\"))\n bls = uv_full.get_antpairs()\n autos = [bl for bl in bls if bl[0] == bl[1]]\n cross = sorted(set(bls) - set(autos))\n uv_auto = uv_full.select(bls=autos, inplace=False)\n uv_cross = uv_full.select(bls=cross, inplace=False)\n uv1 = uv_auto + uv_cross\n assert uv1.Nbls == uv_auto.Nbls + uv_cross.Nbls\n uv2 = uv_cross + uv_auto\n assert uv2.Nbls == uv_auto.Nbls + uv_cross.Nbls\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_add_drift(casa_uvfits):\n uv_full = casa_uvfits\n uv_full.unphase_to_drift()\n\n # Add frequencies\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv1 += uv2\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add polarizations\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific polarizations using pyuvdata. \"\n \"Combined data along polarization \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add times\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2])\n uv2.select(times=times[len(times) // 2 :])\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times using pyuvdata. \"\n \"Combined data along baseline-time \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add baselines\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n ant_list = list(range(15)) # Roughly half the antennas in the data\n # All blts where ant_1 is in list\n ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]\n ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]\n uv1.select(blt_inds=ind1)\n uv2.select(blt_inds=ind2)\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific baseline-times using pyuvdata. \"\n \"Combined data along baseline-time \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add multiple axes\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv_ref = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(\n times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]\n )\n uv2.select(\n times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]\n )\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times, polarizations using \"\n \"pyuvdata. Combined data along \"\n \"baseline-time, polarization \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n blt_ind1 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[0 : len(times) // 2]\n ]\n )\n blt_ind2 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[len(times) // 2 :]\n ]\n )\n # Zero out missing data in reference object\n uv_ref.data_array[blt_ind1, :, :, 2:] = 0.0\n uv_ref.nsample_array[blt_ind1, :, :, 2:] = 0.0\n uv_ref.flag_array[blt_ind1, :, :, 2:] = True\n uv_ref.data_array[blt_ind2, :, :, 0:2] = 0.0\n uv_ref.nsample_array[blt_ind2, :, :, 0:2] = 0.0\n uv_ref.flag_array[blt_ind2, :, :, 0:2] = True\n uv1.history = uv_full.history\n assert uv1 == uv_ref\n\n # Another combo\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv_ref = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))\n uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times, frequencies using \"\n \"pyuvdata. Combined data along \"\n \"baseline-time, frequency \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n blt_ind1 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[0 : len(times) // 2]\n ]\n )\n blt_ind2 = np.array(\n [\n ind\n for ind in range(uv_full.Nblts)\n if uv_full.time_array[ind] in times[len(times) // 2 :]\n ]\n )\n # Zero out missing data in reference object\n uv_ref.data_array[blt_ind1, :, 32:, :] = 0.0\n uv_ref.nsample_array[blt_ind1, :, 32:, :] = 0.0\n uv_ref.flag_array[blt_ind1, :, 32:, :] = True\n uv_ref.data_array[blt_ind2, :, 0:32, :] = 0.0\n uv_ref.nsample_array[blt_ind2, :, 0:32, :] = 0.0\n uv_ref.flag_array[blt_ind2, :, 0:32, :] = True\n uv1.history = uv_full.history\n assert uv1 == uv_ref\n\n # Add without inplace\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2])\n uv2.select(times=times[len(times) // 2 :])\n uv1 = uv1 + uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times using pyuvdata. \"\n \"Combined data along baseline-time \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Check warnings\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(33, 64))\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined frequencies are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.__add__(uv2)\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=[0])\n uv2.select(freq_chans=[3])\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined frequencies are not contiguous\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.__iadd__(uv2)\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[3])\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined polarizations are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.__iadd__(uv2)\n\n # Combining histories\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n uv2.history += \" testing the history. AIPS WTSCAL = 1.0\"\n uv1 += uv2\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific polarizations using pyuvdata. \"\n \"Combined data along polarization \"\n \"axis using pyuvdata. testing the history.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_break_add(casa_uvfits):\n # Test failure modes of add function\n uv_full = casa_uvfits\n\n # Wrong class\n uv1 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n pytest.raises(ValueError, uv1.__iadd__, np.zeros(5))\n\n # One phased, one not\n uv2 = uv_full.copy()\n uv2.unphase_to_drift()\n\n pytest.raises(ValueError, uv1.__iadd__, uv2)\n\n # Different units\n uv2 = uv_full.copy()\n uv2.select(freq_chans=np.arange(32, 64))\n uv2.vis_units = \"Jy\"\n pytest.raises(ValueError, uv1.__iadd__, uv2)\n\n # Overlapping data\n uv2 = uv_full.copy()\n pytest.raises(ValueError, uv1.__iadd__, uv2)\n\n # Different integration_time\n uv2 = uv_full.copy()\n uv2.select(freq_chans=np.arange(32, 64))\n uv2.integration_time *= 2\n pytest.raises(ValueError, uv1.__iadd__, uv2)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"test_func,extra_kwargs\", [(\"__add__\", {}), (\"fast_concat\", {\"axis\": \"blt\"})]\n)\ndef test_add_error_drift_and_rephase(casa_uvfits, test_func, extra_kwargs):\n uv_full = casa_uvfits\n\n with pytest.raises(ValueError) as cm:\n getattr(uv_full, test_func)(\n uv_full, phase_center_radec=(0, 45), unphase_to_drift=True, **extra_kwargs\n )\n assert str(cm.value).startswith(\n \"phase_center_radec cannot be set if \" \"unphase_to_drift is True.\"\n )\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"test_func,extra_kwargs\", [(\"__add__\", {}), (\"fast_concat\", {\"axis\": \"blt\"})]\n)\ndef test_add_this_phased_unphase_to_drift(uv_phase_time_split, test_func, extra_kwargs):\n (uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split\n\n func_kwargs = {\n \"unphase_to_drift\": True,\n \"inplace\": False,\n }\n func_kwargs.update(extra_kwargs)\n with uvtest.check_warnings(UserWarning, \"Unphasing this UVData object to drift\"):\n uv_out = getattr(uv_phase_1, test_func)(uv_raw_2, **func_kwargs)\n # the histories will be different here\n # but everything else should match.\n uv_out.history = copy.deepcopy(uv_raw.history)\n # ensure baseline time order is the same\n # because fast_concat will not order for us\n uv_out.reorder_blts(order=\"time\", minor_order=\"baseline\")\n assert uv_out.phase_type == \"drift\"\n assert uv_out == uv_raw\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"test_func,extra_kwargs\", [(\"__add__\", {}), (\"fast_concat\", {\"axis\": \"blt\"})]\n)\ndef test_add_other_phased_unphase_to_drift(\n uv_phase_time_split, test_func, extra_kwargs\n):\n (uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split\n\n func_kwargs = {\n \"unphase_to_drift\": True,\n \"inplace\": False,\n }\n func_kwargs.update(extra_kwargs)\n with uvtest.check_warnings(UserWarning, \"Unphasing other UVData object to drift\"):\n uv_out = getattr(uv_raw_1, test_func)(uv_phase_2, **func_kwargs)\n\n # the histories will be different here\n # but everything else should match.\n uv_out.history = copy.deepcopy(uv_raw.history)\n # ensure baseline time order is the same\n # because fast_concat will not order for us\n uv_out.reorder_blts(order=\"time\", minor_order=\"baseline\")\n assert uv_out.phase_type == \"drift\"\n assert uv_out == uv_raw\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"test_func,extra_kwargs\", [(\"__add__\", {}), (\"fast_concat\", {\"axis\": \"blt\"})]\n)\ndef test_add_this_rephase_new_phase_center(\n uv_phase_time_split, test_func, extra_kwargs\n):\n (uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split\n\n phase_center_radec = (Angle(\"0d\").rad, Angle(\"-30d\").rad)\n\n # phase each half to different spots\n uv_raw_1.phase(\n ra=0, dec=0, use_ant_pos=True,\n )\n uv_raw_2.phase(\n ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True\n )\n # phase original to phase_center_radec\n uv_raw.phase(ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True)\n\n func_kwargs = {\n \"inplace\": False,\n \"phase_center_radec\": phase_center_radec,\n \"use_ant_pos\": True,\n }\n func_kwargs.update(extra_kwargs)\n with uvtest.check_warnings(\n UserWarning, \"Phasing this UVData object to phase_center_radec\"\n ):\n uv_out = getattr(uv_raw_1, test_func)(uv_raw_2, **func_kwargs)\n\n # the histories will be different here\n # but everything else should match.\n uv_out.history = copy.deepcopy(uv_raw.history)\n # ensure baseline time order is the same\n # because fast_concat will not order for us\n uv_out.reorder_blts(order=\"time\", minor_order=\"baseline\")\n assert (uv_out.phase_center_ra, uv_out.phase_center_dec) == phase_center_radec\n assert uv_out == uv_raw\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"test_func,extra_kwargs\", [(\"__add__\", {}), (\"fast_concat\", {\"axis\": \"blt\"})]\n)\ndef test_add_other_rephase_new_phase_center(\n uv_phase_time_split, test_func, extra_kwargs\n):\n (uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split\n\n phase_center_radec = (Angle(\"0d\").rad, Angle(\"-30d\").rad)\n\n # phase each half to different spots\n uv_raw_1.phase(\n ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True,\n )\n uv_raw_2.phase(\n ra=0, dec=0, use_ant_pos=True,\n )\n # phase original to phase_center_radec\n uv_raw.phase(\n ra=phase_center_radec[0], dec=phase_center_radec[1], use_ant_pos=True,\n )\n\n func_kwargs = {\n \"inplace\": False,\n \"phase_center_radec\": phase_center_radec,\n \"use_ant_pos\": True,\n }\n func_kwargs.update(extra_kwargs)\n with uvtest.check_warnings(\n UserWarning, \"Phasing other UVData object to phase_center_radec\"\n ):\n uv_out = getattr(uv_raw_1, test_func)(uv_raw_2, **func_kwargs)\n\n # the histories will be different here\n # but everything else should match.\n uv_out.history = copy.deepcopy(uv_raw.history)\n\n # ensure baseline time order is the same\n # because fast_concat will not order for us\n uv_out.reorder_blts(order=\"time\", minor_order=\"baseline\")\n assert uv_out.phase_type == \"phased\"\n assert (uv_out.phase_center_ra, uv_out.phase_center_dec) == phase_center_radec\n assert uv_out == uv_raw\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"test_func,extra_kwargs\", [(\"__add__\", {}), (\"fast_concat\", {\"axis\": \"blt\"})]\n)\ndef test_add_error_too_long_phase_center(uv_phase_time_split, test_func, extra_kwargs):\n (uv_phase_1, uv_phase_2, uv_phase, uv_raw_1, uv_raw_2, uv_raw) = uv_phase_time_split\n phase_center_radec = (Angle(\"0d\").rad, Angle(\"-30d\").rad, 7)\n func_kwargs = {\n \"inplace\": False,\n \"phase_center_radec\": phase_center_radec,\n }\n func_kwargs.update(extra_kwargs)\n with pytest.raises(ValueError) as cm:\n getattr(uv_phase_1, test_func)(uv_phase_2, **func_kwargs)\n assert str(cm.value).startswith(\"phase_center_radec should have length 2.\")\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_fast_concat(casa_uvfits):\n uv_full = casa_uvfits\n\n # Add frequencies\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n uv1.fast_concat(uv2, \"freq\", inplace=True)\n # Check history is correct, before replacing and doing a full object check\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific frequencies using pyuvdata. \"\n \"Combined data along frequency axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add frequencies - out of order\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined frequencies are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv2.fast_concat(uv1, \"freq\", inplace=True)\n\n assert uv2.Nfreqs == uv_full.Nfreqs\n assert uv2._freq_array != uv_full._freq_array\n assert uv2._data_array != uv_full._data_array\n\n # reorder frequencies and test that they are equal\n index_array = np.argsort(uv2.freq_array[0, :])\n uv2.freq_array = uv2.freq_array[:, index_array]\n uv2.data_array = uv2.data_array[:, :, index_array, :]\n uv2.nsample_array = uv2.nsample_array[:, :, index_array, :]\n uv2.flag_array = uv2.flag_array[:, :, index_array, :]\n uv2.history = uv_full.history\n assert uv2._freq_array == uv_full._freq_array\n assert uv2 == uv_full\n\n # Add polarizations\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n uv1.fast_concat(uv2, \"polarization\", inplace=True)\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific polarizations using pyuvdata. \"\n \"Combined data along polarization axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add polarizations - out of order\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined polarizations are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv2.fast_concat(uv1, \"polarization\", inplace=True)\n\n assert uv2._polarization_array != uv_full._polarization_array\n assert uv2._data_array != uv_full._data_array\n\n # reorder pols\n uv2.reorder_pols()\n uv2.history = uv_full.history\n assert uv2 == uv_full\n\n # Add times\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2])\n uv2.select(times=times[len(times) // 2 :])\n uv1.fast_concat(uv2, \"blt\", inplace=True)\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times using pyuvdata. \"\n \"Combined data along baseline-time axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Add baselines\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n # divide in half to keep in order\n ind1 = np.arange(uv1.Nblts // 2)\n ind2 = np.arange(uv1.Nblts // 2, uv1.Nblts)\n uv1.select(blt_inds=ind1)\n uv2.select(blt_inds=ind2)\n uv1.fast_concat(uv2, \"blt\", inplace=True)\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific baseline-times using pyuvdata. \"\n \"Combined data along baseline-time axis \"\n \"using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1, uv_full\n\n # Add baselines out of order\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(blt_inds=ind1)\n uv2.select(blt_inds=ind2)\n uv2.fast_concat(uv1, \"blt\", inplace=True)\n # test freq & pol arrays equal\n assert uv2._freq_array == uv_full._freq_array\n assert uv2._polarization_array == uv_full._polarization_array\n\n # test Nblt length arrays not equal but same shape\n assert uv2._ant_1_array != uv_full._ant_1_array\n assert uv2.ant_1_array.shape == uv_full.ant_1_array.shape\n assert uv2._ant_2_array != uv_full._ant_2_array\n assert uv2.ant_2_array.shape == uv_full.ant_2_array.shape\n assert uv2._uvw_array != uv_full._uvw_array\n assert uv2.uvw_array.shape == uv_full.uvw_array.shape\n assert uv2._time_array != uv_full._time_array\n assert uv2.time_array.shape == uv_full.time_array.shape\n assert uv2._baseline_array != uv_full._baseline_array\n assert uv2.baseline_array.shape == uv_full.baseline_array.shape\n assert uv2._data_array != uv_full._data_array\n assert uv2.data_array.shape == uv_full.data_array.shape\n\n # reorder blts to enable comparison\n uv2.reorder_blts()\n assert uv2.blt_order == (\"time\", \"baseline\")\n uv2.blt_order = None\n uv2.history = uv_full.history\n assert uv2 == uv_full\n\n # add baselines such that Nants_data needs to change\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n ant_list = list(range(15)) # Roughly half the antennas in the data\n # All blts where ant_1 is in list\n ind1 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] in ant_list]\n ind2 = [i for i in range(uv1.Nblts) if uv1.ant_1_array[i] not in ant_list]\n uv1.select(blt_inds=ind1)\n uv2.select(blt_inds=ind2)\n uv2.fast_concat(uv1, \"blt\", inplace=True)\n\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific baseline-times using pyuvdata. \"\n \"Combined data along baseline-time \"\n \"axis using pyuvdata.\",\n uv2.history,\n )\n\n # test freq & pol arrays equal\n assert uv2._freq_array == uv_full._freq_array\n assert uv2._polarization_array == uv_full._polarization_array\n\n # test Nblt length arrays not equal but same shape\n assert uv2._ant_1_array != uv_full._ant_1_array\n assert uv2.ant_1_array.shape == uv_full.ant_1_array.shape\n assert uv2._ant_2_array != uv_full._ant_2_array\n assert uv2.ant_2_array.shape == uv_full.ant_2_array.shape\n assert uv2._uvw_array != uv_full._uvw_array\n assert uv2.uvw_array.shape == uv_full.uvw_array.shape\n assert uv2._time_array != uv_full._time_array\n assert uv2.time_array.shape == uv_full.time_array.shape\n assert uv2._baseline_array != uv_full._baseline_array\n assert uv2.baseline_array.shape == uv_full.baseline_array.shape\n assert uv2._data_array != uv_full._data_array\n assert uv2.data_array.shape == uv_full.data_array.shape\n\n # reorder blts to enable comparison\n uv2.reorder_blts()\n assert uv2.blt_order == (\"time\", \"baseline\")\n uv2.blt_order = None\n uv2.history = uv_full.history\n assert uv2 == uv_full\n\n # Add multiple axes\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(\n times=times[0 : len(times) // 2], polarizations=uv1.polarization_array[0:2]\n )\n uv2.select(\n times=times[len(times) // 2 :], polarizations=uv2.polarization_array[2:4]\n )\n pytest.raises(ValueError, uv1.fast_concat, uv2, \"blt\", inplace=True)\n\n # Another combo\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2], freq_chans=np.arange(0, 32))\n uv2.select(times=times[len(times) // 2 :], freq_chans=np.arange(32, 64))\n pytest.raises(ValueError, uv1.fast_concat, uv2, \"blt\", inplace=True)\n\n # Add without inplace\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n times = np.unique(uv_full.time_array)\n uv1.select(times=times[0 : len(times) // 2])\n uv2.select(times=times[len(times) // 2 :])\n uv1 = uv1.fast_concat(uv2, \"blt\", inplace=False)\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific times using pyuvdata. \"\n \"Combined data along baseline-time \"\n \"axis using pyuvdata.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # Check warnings\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(33, 64))\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined frequencies are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.fast_concat(uv1, \"freq\", inplace=True)\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=[0])\n uv2.select(freq_chans=[3])\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined frequencies are not contiguous\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.fast_concat(uv2, \"freq\")\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=[0])\n uv2.select(freq_chans=[1])\n uv2.freq_array += uv2._channel_width.tols[1] / 2.0\n with uvtest.check_warnings(\n UserWarning,\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n nwarnings=3,\n ):\n uv1.fast_concat(uv2, \"freq\")\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[3])\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"Combined polarizations are not evenly spaced\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv1.fast_concat(uv2, \"polarization\")\n\n # Combining histories\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(polarizations=uv1.polarization_array[0:2])\n uv2.select(polarizations=uv2.polarization_array[2:4])\n uv2.history += \" testing the history. AIPS WTSCAL = 1.0\"\n uv1.fast_concat(uv2, \"polarization\", inplace=True)\n assert uvutils._check_histories(\n uv_full.history + \" Downselected to \"\n \"specific polarizations using pyuvdata. \"\n \"Combined data along polarization \"\n \"axis using pyuvdata. testing the history.\",\n uv1.history,\n )\n uv1.history = uv_full.history\n assert uv1 == uv_full\n\n # test add of autocorr-only and crosscorr-only objects\n uv_full = UVData()\n uv_full.read_uvh5(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA.uvh5\"))\n bls = uv_full.get_antpairs()\n autos = [bl for bl in bls if bl[0] == bl[1]]\n cross = sorted(set(bls) - set(autos))\n uv_auto = uv_full.select(bls=autos, inplace=False)\n uv_cross = uv_full.select(bls=cross, inplace=False)\n uv1 = uv_auto.fast_concat(uv_cross, \"blt\")\n assert uv1.Nbls == uv_auto.Nbls + uv_cross.Nbls\n uv2 = uv_cross.fast_concat(uv_auto, \"blt\")\n assert uv2.Nbls == uv_auto.Nbls + uv_cross.Nbls\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_fast_concat_errors(casa_uvfits):\n uv_full = casa_uvfits\n\n uv1 = uv_full.copy()\n uv2 = uv_full.copy()\n uv1.select(freq_chans=np.arange(0, 32))\n uv2.select(freq_chans=np.arange(32, 64))\n pytest.raises(ValueError, uv1.fast_concat, uv2, \"foo\", inplace=True)\n\n cal = UVCal()\n pytest.raises(ValueError, uv1.fast_concat, cal, \"freq\", inplace=True)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_key2inds(casa_uvfits):\n # Test function to interpret key as antpair, pol\n uv = casa_uvfits\n\n # Get an antpair/pol combo\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n pol = uv.polarization_array[0]\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n ind1, ind2, indp = uv._key2inds((ant1, ant2, pol))\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal([0], indp[0])\n # Any of these inputs can also be a tuple of a tuple, so need to be checked twice.\n ind1, ind2, indp = uv._key2inds(((ant1, ant2, pol),))\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal([0], indp[0])\n\n # Combo with pol as string\n ind1, ind2, indp = uv._key2inds((ant1, ant2, uvutils.polnum2str(pol)))\n assert np.array_equal([0], indp[0])\n ind1, ind2, indp = uv._key2inds(((ant1, ant2, uvutils.polnum2str(pol)),))\n assert np.array_equal([0], indp[0])\n\n # Check conjugation\n ind1, ind2, indp = uv._key2inds((ant2, ant1, pol))\n assert np.array_equal(bltind, ind2)\n assert np.array_equal(np.array([]), ind1)\n assert np.array_equal([0], indp[1])\n # Conjugation with pol as string\n ind1, ind2, indp = uv._key2inds((ant2, ant1, uvutils.polnum2str(pol)))\n assert np.array_equal(bltind, ind2)\n assert np.array_equal(np.array([]), ind1)\n assert np.array_equal([0], indp[1])\n assert np.array_equal([], indp[0])\n\n # Antpair only\n ind1, ind2, indp = uv._key2inds((ant1, ant2))\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.arange(uv.Npols), indp[0])\n ind1, ind2, indp = uv._key2inds(((ant1, ant2)))\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.arange(uv.Npols), indp[0])\n\n # Baseline number only\n ind1, ind2, indp = uv._key2inds(uv.antnums_to_baseline(ant1, ant2))\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.arange(uv.Npols), indp[0])\n ind1, ind2, indp = uv._key2inds((uv.antnums_to_baseline(ant1, ant2),))\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.arange(uv.Npols), indp[0])\n\n # Pol number only\n ind1, ind2, indp = uv._key2inds(pol)\n assert np.array_equal(np.arange(uv.Nblts), ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.array([0]), indp[0])\n ind1, ind2, indp = uv._key2inds((pol))\n assert np.array_equal(np.arange(uv.Nblts), ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.array([0]), indp[0])\n\n # Pol string only\n ind1, ind2, indp = uv._key2inds(\"LL\")\n assert np.array_equal(np.arange(uv.Nblts), ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.array([1]), indp[0])\n ind1, ind2, indp = uv._key2inds((\"LL\"))\n assert np.array_equal(np.arange(uv.Nblts), ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.array([1]), indp[0])\n\n # Test invalid keys\n pytest.raises(KeyError, uv._key2inds, \"I\") # pol str not in data\n pytest.raises(KeyError, uv._key2inds, -8) # pol num not in data\n pytest.raises(KeyError, uv._key2inds, 6) # bl num not in data\n pytest.raises(KeyError, uv._key2inds, (1, 1)) # ant pair not in data\n pytest.raises(KeyError, uv._key2inds, (1, 1, \"rr\")) # ant pair not in data\n pytest.raises(KeyError, uv._key2inds, (0, 1, \"xx\")) # pol not in data\n\n # Test autos are handled correctly\n uv.ant_2_array[0] = uv.ant_1_array[0]\n ind1, ind2, indp = uv._key2inds((ant1, ant1, pol))\n assert np.array_equal(ind1, [0])\n assert np.array_equal(ind2, [])\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_key2inds_conj_all_pols(casa_uvfits):\n uv = casa_uvfits\n\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n ind1, ind2, indp = uv._key2inds((ant2, ant1))\n\n # Pols in data are 'rr', 'll', 'rl', 'lr'\n # So conjugated order should be [0, 1, 3, 2]\n assert np.array_equal(bltind, ind2)\n assert np.array_equal(np.array([]), ind1)\n assert np.array_equal(np.array([]), indp[0])\n assert np.array_equal([0, 1, 3, 2], indp[1])\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_key2inds_conj_all_pols_fringe(casa_uvfits):\n uv = casa_uvfits\n\n uv.select(polarizations=[\"rl\"])\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n # Mix one instance of this baseline.\n uv.ant_1_array[0] = ant2\n uv.ant_2_array[0] = ant1\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n ind1, ind2, indp = uv._key2inds((ant1, ant2))\n\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.array([0]), indp[0])\n assert np.array_equal(np.array([]), indp[1])\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_key2inds_conj_all_pols_bl_fringe(casa_uvfits):\n uv = casa_uvfits\n\n uv.select(polarizations=[\"rl\"])\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n # Mix one instance of this baseline.\n uv.ant_1_array[0] = ant2\n uv.ant_2_array[0] = ant1\n uv.baseline_array[0] = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)\n bl = uvutils.antnums_to_baseline(ant1, ant2, uv.Nants_telescope)\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n ind1, ind2, indp = uv._key2inds(bl)\n\n assert np.array_equal(bltind, ind1)\n assert np.array_equal(np.array([]), ind2)\n assert np.array_equal(np.array([0]), indp[0])\n assert np.array_equal(np.array([]), indp[1])\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_key2inds_conj_all_pols_missing_data(casa_uvfits):\n uv = casa_uvfits\n\n uv.select(polarizations=[\"rl\"])\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n\n pytest.raises(KeyError, uv._key2inds, (ant2, ant1))\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_key2inds_conj_all_pols_bls(casa_uvfits):\n uv = casa_uvfits\n\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n ind1, ind2, indp = uv._key2inds(bl)\n\n # Pols in data are 'rr', 'll', 'rl', 'lr'\n # So conjugated order should be [0, 1, 3, 2]\n assert np.array_equal(bltind, ind2)\n assert np.array_equal(np.array([]), ind1)\n assert np.array_equal(np.array([]), indp[0])\n assert np.array_equal([0, 1, 3, 2], indp[1])\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_key2inds_conj_all_pols_missing_data_bls(casa_uvfits):\n uv = casa_uvfits\n uv.select(polarizations=[\"rl\"])\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n bl = uvutils.antnums_to_baseline(ant2, ant1, uv.Nants_telescope)\n\n pytest.raises(KeyError, uv._key2inds, bl)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_smart_slicing(casa_uvfits):\n # Test function to slice data\n uv = casa_uvfits\n\n # ind1 reg, ind2 empty, pol reg\n ind1 = 10 * np.arange(9)\n ind2 = []\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))\n dcheck = uv.data_array[ind1, :, :, :]\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n assert not d.flags.writeable\n # Ensure a view was returned\n uv.data_array[ind1[1], 0, 0, indp[0]] = 5.43\n assert d[1, 0, 0] == uv.data_array[ind1[1], 0, 0, indp[0]]\n\n # force copy\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []), force_copy=True)\n dcheck = uv.data_array[ind1, :, :, :]\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n assert d.flags.writeable\n # Ensure a copy was returned\n uv.data_array[ind1[1], 0, 0, indp[0]] = 4.3\n assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]\n\n # ind1 reg, ind2 empty, pol not reg\n ind1 = 10 * np.arange(9)\n ind2 = []\n indp = [0, 1, 3]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))\n dcheck = uv.data_array[ind1, :, :, :]\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n assert not d.flags.writeable\n # Ensure a copy was returned\n uv.data_array[ind1[1], 0, 0, indp[0]] = 1.2\n assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]\n\n # ind1 not reg, ind2 empty, pol reg\n ind1 = [0, 4, 5]\n ind2 = []\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))\n dcheck = uv.data_array[ind1, :, :, :]\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n assert not d.flags.writeable\n # Ensure a copy was returned\n uv.data_array[ind1[1], 0, 0, indp[0]] = 8.2\n assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]\n\n # ind1 not reg, ind2 empty, pol not reg\n ind1 = [0, 4, 5]\n ind2 = []\n indp = [0, 1, 3]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))\n dcheck = uv.data_array[ind1, :, :, :]\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n assert not d.flags.writeable\n # Ensure a copy was returned\n uv.data_array[ind1[1], 0, 0, indp[0]] = 3.4\n assert d[1, 0, 0] != uv.data_array[ind1[1], 0, 0, indp[0]]\n\n # ind1 empty, ind2 reg, pol reg\n # Note conjugation test ensures the result is a copy, not a view.\n ind1 = []\n ind2 = 10 * np.arange(9)\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))\n dcheck = uv.data_array[ind2, :, :, :]\n dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))\n assert np.all(d == dcheck)\n\n # ind1 empty, ind2 reg, pol not reg\n ind1 = []\n ind2 = 10 * np.arange(9)\n indp = [0, 1, 3]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))\n dcheck = uv.data_array[ind2, :, :, :]\n dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))\n assert np.all(d == dcheck)\n\n # ind1 empty, ind2 not reg, pol reg\n ind1 = []\n ind2 = [1, 4, 5, 10]\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))\n dcheck = uv.data_array[ind2, :, :, :]\n dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))\n assert np.all(d == dcheck)\n\n # ind1 empty, ind2 not reg, pol not reg\n ind1 = []\n ind2 = [1, 4, 5, 10]\n indp = [0, 1, 3]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))\n dcheck = uv.data_array[ind2, :, :, :]\n dcheck = np.squeeze(np.conj(dcheck[:, :, :, indp]))\n assert np.all(d == dcheck)\n\n # ind1, ind2 not empty, pol reg\n ind1 = np.arange(20)\n ind2 = np.arange(30, 40)\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, indp))\n dcheck = np.append(\n uv.data_array[ind1, :, :, :], np.conj(uv.data_array[ind2, :, :, :]), axis=0\n )\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n\n # ind1, ind2 not empty, pol not reg\n ind1 = np.arange(20)\n ind2 = np.arange(30, 40)\n indp = [0, 1, 3]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, indp))\n dcheck = np.append(\n uv.data_array[ind1, :, :, :], np.conj(uv.data_array[ind2, :, :, :]), axis=0\n )\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n\n # test single element\n ind1 = [45]\n ind2 = []\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []))\n dcheck = uv.data_array[ind1, :, :, :]\n dcheck = np.squeeze(dcheck[:, :, :, indp], axis=1)\n assert np.all(d == dcheck)\n\n # test single element\n ind1 = []\n ind2 = [45]\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, ([], indp))\n assert np.all(d == np.conj(dcheck))\n\n # Full squeeze\n ind1 = [45]\n ind2 = []\n indp = [0, 1]\n d = uv._smart_slicing(uv.data_array, ind1, ind2, (indp, []), squeeze=\"full\")\n dcheck = uv.data_array[ind1, :, :, :]\n dcheck = np.squeeze(dcheck[:, :, :, indp])\n assert np.all(d == dcheck)\n\n # Test invalid squeeze\n pytest.raises(\n ValueError,\n uv._smart_slicing,\n uv.data_array,\n ind1,\n ind2,\n (indp, []),\n squeeze=\"notasqueeze\",\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_data(casa_uvfits):\n # Test get_data function for easy access to data\n uv = casa_uvfits\n\n # Get an antpair/pol combo\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n pol = uv.polarization_array[0]\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n dcheck = np.squeeze(uv.data_array[bltind, :, :, 0])\n d = uv.get_data(ant1, ant2, pol)\n assert np.all(dcheck == d)\n\n d = uv.get_data(ant1, ant2, uvutils.polnum2str(pol))\n assert np.all(dcheck == d)\n\n d = uv.get_data((ant1, ant2, pol))\n assert np.all(dcheck == d)\n\n with pytest.raises(ValueError) as cm:\n uv.get_data((ant1, ant2, pol), (ant1, ant2, pol))\n assert str(cm.value).startswith(\"no more than 3 key values can be passed\")\n\n # Check conjugation\n d = uv.get_data(ant2, ant1, pol)\n assert np.all(dcheck == np.conj(d))\n\n # Check cross pol conjugation\n d = uv.get_data(ant2, ant1, uv.polarization_array[2])\n d1 = uv.get_data(ant1, ant2, uv.polarization_array[3])\n assert np.all(d == np.conj(d1))\n\n # Antpair only\n dcheck = np.squeeze(uv.data_array[bltind, :, :, :])\n d = uv.get_data(ant1, ant2)\n assert np.all(dcheck == d)\n\n # Pol number only\n dcheck = np.squeeze(uv.data_array[:, :, :, 0])\n d = uv.get_data(pol)\n assert np.all(dcheck == d)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_flags(casa_uvfits):\n # Test function for easy access to flags\n uv = casa_uvfits\n\n # Get an antpair/pol combo\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n pol = uv.polarization_array[0]\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n dcheck = np.squeeze(uv.flag_array[bltind, :, :, 0])\n d = uv.get_flags(ant1, ant2, pol)\n assert np.all(dcheck == d)\n\n d = uv.get_flags(ant1, ant2, uvutils.polnum2str(pol))\n assert np.all(dcheck == d)\n\n d = uv.get_flags((ant1, ant2, pol))\n assert np.all(dcheck == d)\n\n with pytest.raises(ValueError) as cm:\n uv.get_flags((ant1, ant2, pol), (ant1, ant2, pol))\n assert str(cm.value).startswith(\"no more than 3 key values can be passed\")\n\n # Check conjugation\n d = uv.get_flags(ant2, ant1, pol)\n assert np.all(dcheck == d)\n assert d.dtype == np.bool\n\n # Antpair only\n dcheck = np.squeeze(uv.flag_array[bltind, :, :, :])\n d = uv.get_flags(ant1, ant2)\n assert np.all(dcheck == d)\n\n # Pol number only\n dcheck = np.squeeze(uv.flag_array[:, :, :, 0])\n d = uv.get_flags(pol)\n assert np.all(dcheck == d)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_nsamples(casa_uvfits):\n # Test function for easy access to nsample array\n uv = casa_uvfits\n\n # Get an antpair/pol combo\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n pol = uv.polarization_array[0]\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n dcheck = np.squeeze(uv.nsample_array[bltind, :, :, 0])\n d = uv.get_nsamples(ant1, ant2, pol)\n assert np.all(dcheck == d)\n\n d = uv.get_nsamples(ant1, ant2, uvutils.polnum2str(pol))\n assert np.all(dcheck == d)\n\n d = uv.get_nsamples((ant1, ant2, pol))\n assert np.all(dcheck == d)\n\n with pytest.raises(ValueError) as cm:\n uv.get_nsamples((ant1, ant2, pol), (ant1, ant2, pol))\n assert str(cm.value).startswith(\"no more than 3 key values can be passed\")\n\n # Check conjugation\n d = uv.get_nsamples(ant2, ant1, pol)\n assert np.all(dcheck == d)\n\n # Antpair only\n dcheck = np.squeeze(uv.nsample_array[bltind, :, :, :])\n d = uv.get_nsamples(ant1, ant2)\n assert np.all(dcheck == d)\n\n # Pol number only\n dcheck = np.squeeze(uv.nsample_array[:, :, :, 0])\n d = uv.get_nsamples(pol)\n assert np.all(dcheck == d)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_antpair2ind(paper_uvh5):\n # Test for baseline-time axis indexer\n uv = paper_uvh5\n\n # get indices\n inds = uv.antpair2ind(0, 1, ordered=False)\n # fmt: off\n np.testing.assert_array_equal(\n inds,\n np.array(\n [\n 1, 22, 43, 64, 85, 106, 127, 148, 169,\n 190, 211, 232, 253, 274, 295, 316, 337,\n 358, 379\n ]\n )\n )\n # fmt: on\n assert np.issubdtype(inds.dtype, np.integer)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_antpair2ind_conj(paper_uvh5):\n # conjugate (and use key rather than arg expansion)\n uv = paper_uvh5\n inds = uv.antpair2ind(0, 1, ordered=False)\n inds2 = uv.antpair2ind((1, 0), ordered=False)\n np.testing.assert_array_equal(inds, inds2)\n assert np.issubdtype(inds2.dtype, np.integer)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_antpair2ind_ordered(paper_uvh5):\n # test ordered\n uv = paper_uvh5\n inds = uv.antpair2ind(0, 1, ordered=False)\n\n # make sure conjugated baseline returns nothing\n inds2 = uv.antpair2ind(1, 0, ordered=True)\n assert inds2.size == 0\n\n # now use baseline actually in data\n inds2 = uv.antpair2ind(0, 1, ordered=True)\n np.testing.assert_array_equal(inds, inds2)\n assert np.issubdtype(inds2.dtype, np.integer)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_antpair2ind_autos(paper_uvh5):\n # test autos w/ and w/o ordered\n uv = paper_uvh5\n\n inds = uv.antpair2ind(0, 0, ordered=True)\n inds2 = uv.antpair2ind(0, 0, ordered=False)\n np.testing.assert_array_equal(inds, inds2)\n assert np.issubdtype(inds.dtype, np.integer)\n assert np.issubdtype(inds2.dtype, np.integer)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_antpair2ind_exceptions(paper_uvh5):\n # test exceptions\n uv = paper_uvh5\n\n with pytest.raises(ValueError, match=\"antpair2ind must be fed an antpair tuple\"):\n uv.antpair2ind(1)\n with pytest.raises(ValueError, match=\"antpair2ind must be fed an antpair tuple\"):\n uv.antpair2ind(\"bar\", \"foo\")\n with pytest.raises(ValueError, match=\"ordered must be a boolean\"):\n uv.antpair2ind(0, 1, \"foo\")\n\n return\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_times(casa_uvfits):\n # Test function for easy access to times, to work in conjunction with get_data\n uv = casa_uvfits\n # Get an antpair/pol combo (pol shouldn't actually effect result)\n ant1 = uv.ant_1_array[0]\n ant2 = uv.ant_2_array[0]\n pol = uv.polarization_array[0]\n bltind = np.where((uv.ant_1_array == ant1) & (uv.ant_2_array == ant2))[0]\n dcheck = uv.time_array[bltind]\n d = uv.get_times(ant1, ant2, pol)\n assert np.all(dcheck == d)\n\n d = uv.get_times(ant1, ant2, uvutils.polnum2str(pol))\n assert np.all(dcheck == d)\n\n d = uv.get_times((ant1, ant2, pol))\n assert np.all(dcheck == d)\n\n with pytest.raises(ValueError) as cm:\n uv.get_times((ant1, ant2, pol), (ant1, ant2, pol))\n assert str(cm.value).startswith(\"no more than 3 key values can be passed\")\n\n # Check conjugation\n d = uv.get_times(ant2, ant1, pol)\n assert np.all(dcheck == d)\n\n # Antpair only\n d = uv.get_times(ant1, ant2)\n assert np.all(dcheck == d)\n\n # Pol number only\n d = uv.get_times(pol)\n assert np.all(d == uv.time_array)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_antpairpol_iter(casa_uvfits):\n # Test generator\n uv = casa_uvfits\n pol_dict = {\n uvutils.polnum2str(uv.polarization_array[i]): i for i in range(uv.Npols)\n }\n keys = []\n pols = set()\n bls = set()\n for key, d in uv.antpairpol_iter():\n keys += key\n bl = uv.antnums_to_baseline(key[0], key[1])\n blind = np.where(uv.baseline_array == bl)[0]\n bls.add(bl)\n pols.add(key[2])\n dcheck = np.squeeze(uv.data_array[blind, :, :, pol_dict[key[2]]])\n assert np.all(dcheck == d)\n assert len(bls) == len(uv.get_baseline_nums())\n assert len(pols) == uv.Npols\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_ants(casa_uvfits):\n # Test function to get unique antennas in data\n uv = casa_uvfits\n\n ants = uv.get_ants()\n for ant in ants:\n assert (ant in uv.ant_1_array) or (ant in uv.ant_2_array)\n for ant in uv.ant_1_array:\n assert ant in ants\n for ant in uv.ant_2_array:\n assert ant in ants\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_enu_antpos():\n uvd = UVData()\n uvd.read_uvh5(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA.uvh5\"))\n # no center, no pick data ants\n antpos, ants = uvd.get_ENU_antpos(center=False, pick_data_ants=False)\n assert len(ants) == 113\n assert np.isclose(antpos[0, 0], 19.340211050751535)\n assert ants[0] == 0\n # test default behavior\n antpos2, ants = uvd.get_ENU_antpos()\n\n assert np.all(antpos == antpos2)\n # center\n antpos, ants = uvd.get_ENU_antpos(center=True, pick_data_ants=False)\n assert np.isclose(antpos[0, 0], 22.472442651767714)\n # pick data ants\n antpos, ants = uvd.get_ENU_antpos(center=True, pick_data_ants=True)\n assert ants[0] == 9\n assert np.isclose(antpos[0, 0], -0.0026981323386223721)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_telescope_loc_xyz_check(paper_uvh5, tmp_path):\n # test that improper telescope locations can still be read\n uv = paper_uvh5\n uv.telescope_location = uvutils.XYZ_from_LatLonAlt(*uv.telescope_location)\n # fix LST values\n uv.set_lsts_from_time_array()\n fname = str(tmp_path / \"test.uvh5\")\n uv.write_uvh5(fname, run_check=False, check_extra=False, clobber=True)\n\n # try to read file without checks (passing is implicit)\n uv.read(fname, run_check=False)\n\n # try to read without checks: assert it fails\n pytest.raises(ValueError, uv.read, fname)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_pols(casa_uvfits):\n # Test function to get unique polarizations in string format\n uv = casa_uvfits\n pols = uv.get_pols()\n pols_data = [\"rr\", \"ll\", \"lr\", \"rl\"]\n assert sorted(pols) == sorted(pols_data)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_pols_x_orientation(paper_uvh5):\n uv_in = paper_uvh5\n\n uv_in.x_orientation = \"east\"\n\n pols = uv_in.get_pols()\n pols_data = [\"en\"]\n assert pols == pols_data\n\n uv_in.x_orientation = \"north\"\n\n pols = uv_in.get_pols()\n pols_data = [\"ne\"]\n assert pols == pols_data\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_get_feedpols(casa_uvfits):\n # Test function to get unique antenna feed polarizations in data. String format.\n uv = casa_uvfits\n pols = uv.get_feedpols()\n pols_data = [\"r\", \"l\"]\n assert sorted(pols) == sorted(pols_data)\n\n # Test break when pseudo-Stokes visibilities are present\n uv.polarization_array[0] = 1 # pseudo-Stokes I\n pytest.raises(ValueError, uv.get_feedpols)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_parse_ants(casa_uvfits):\n # Test function to get correct antenna pairs and polarizations\n uv = casa_uvfits\n\n # All baselines\n ant_str = \"all\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n assert isinstance(ant_pairs_nums, type(None))\n assert isinstance(polarizations, type(None))\n\n # Auto correlations\n ant_str = \"auto\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n assert Counter(ant_pairs_nums) == Counter([])\n assert isinstance(polarizations, type(None))\n\n # Cross correlations\n ant_str = \"cross\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n assert Counter(uv.get_antpairs()) == Counter(ant_pairs_nums)\n assert isinstance(polarizations, type(None))\n\n # pseudo-Stokes params\n ant_str = \"pI,pq,pU,pv\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n pols_expected = [4, 3, 2, 1]\n assert isinstance(ant_pairs_nums, type(None))\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Unparsible string\n ant_str = \"none\"\n pytest.raises(ValueError, uv.parse_ants, ant_str)\n\n # Single antenna number\n ant_str = \"0\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n # fmt: off\n ant_pairs_expected = [(0, 1), (0, 2), (0, 3), (0, 6), (0, 7), (0, 8),\n (0, 11), (0, 14), (0, 18), (0, 19), (0, 20),\n (0, 21), (0, 22), (0, 23), (0, 24), (0, 26),\n (0, 27)]\n # fmt: on\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Single antenna number not in the data\n ant_str = \"10\"\n with uvtest.check_warnings(\n UserWarning,\n \"Warning: Antenna number 10 passed, but not present in the ant_1_array \"\n \"or ant_2_array\",\n ):\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n\n assert isinstance(ant_pairs_nums, type(None))\n assert isinstance(polarizations, type(None))\n\n # Single antenna number with polarization, both not in the data\n ant_str = \"10x\"\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Warning: Antenna number 10 passed, but not present in the ant_1_array or \"\n \"ant_2_array\",\n \"Warning: Polarization XX,XY is not present in the polarization_array\",\n ],\n ):\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n assert isinstance(ant_pairs_nums, type(None))\n assert isinstance(polarizations, type(None))\n\n # Multiple antenna numbers as list\n ant_str = \"22,26\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n # fmt: off\n ant_pairs_expected = [(0, 22), (0, 26), (1, 22), (1, 26), (2, 22), (2, 26),\n (3, 22), (3, 26), (6, 22), (6, 26), (7, 22),\n (7, 26), (8, 22), (8, 26), (11, 22), (11, 26),\n (14, 22), (14, 26), (18, 22), (18, 26),\n (19, 22), (19, 26), (20, 22), (20, 26),\n (21, 22), (21, 26), (22, 23), (22, 24),\n (22, 26), (22, 27), (23, 26), (24, 26),\n (26, 27)]\n # fmt: on\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Single baseline\n ant_str = \"1_3\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 3)]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Single baseline with polarization\n ant_str = \"1l_3r\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 3)]\n pols_expected = [-4]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Single baseline with single polarization in first entry\n ant_str = \"1l_3,2x_3\"\n with uvtest.check_warnings(\n UserWarning,\n \"Warning: Polarization XX,XY is not present in the polarization_array\",\n ):\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n\n ant_pairs_expected = [(1, 3), (2, 3)]\n pols_expected = [-2, -4]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Single baseline with single polarization in last entry\n ant_str = \"1_3l,2_3x\"\n with uvtest.check_warnings(\n UserWarning,\n \"Warning: Polarization XX,YX is not present in the polarization_array\",\n ):\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 3), (2, 3)]\n pols_expected = [-2, -3]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Multiple baselines as list\n ant_str = \"1_2,1_3,1_11\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 2), (1, 3), (1, 11)]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Multiples baselines with polarizations as list\n ant_str = \"1r_2l,1l_3l,1r_11r\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 2), (1, 3), (1, 11)]\n pols_expected = [-1, -2, -3]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Specific baselines with parenthesis\n ant_str = \"(1,3)_11\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 11), (3, 11)]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Specific baselines with parenthesis\n ant_str = \"1_(3,11)\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 3), (1, 11)]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Antenna numbers with polarizations\n ant_str = \"(1l,2r)_(3l,6r)\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 3), (1, 6), (2, 3), (2, 6)]\n pols_expected = [-1, -2, -3, -4]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Antenna numbers with - for avoidance\n ant_str = \"1_(-3,11)\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 11)]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Remove specific antenna number\n ant_str = \"1,-3\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [\n (0, 1),\n (1, 2),\n (1, 6),\n (1, 7),\n (1, 8),\n (1, 11),\n (1, 14),\n (1, 18),\n (1, 19),\n (1, 20),\n (1, 21),\n (1, 22),\n (1, 23),\n (1, 24),\n (1, 26),\n (1, 27),\n ]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Remove specific baseline (same expected antenna pairs as above example)\n ant_str = \"1,-1_3\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Antenna numbers with polarizations and - for avoidance\n ant_str = \"1l_(-3r,11l)\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 11)]\n pols_expected = [-2]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Antenna numbers and pseudo-Stokes parameters\n ant_str = \"(1l,2r)_(3l,6r),pI,pq\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 3), (1, 6), (2, 3), (2, 6)]\n pols_expected = [2, 1, -1, -2, -3, -4]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Multiple baselines with multiple polarizations, one pol to be removed\n ant_str = \"1l_2,1l_3,-1l_3r\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 2), (1, 3)]\n pols_expected = [-2]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Multiple baselines with multiple polarizations, one pol (not in data)\n # to be removed\n ant_str = \"1l_2,1l_3,-1x_3y\"\n with uvtest.check_warnings(\n UserWarning, \"Warning: Polarization XY is not present in the polarization_array\"\n ):\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = [(1, 2), (1, 3)]\n pols_expected = [-2, -4]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Test print toggle on single baseline with polarization\n ant_str = \"1l_2l\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str, print_toggle=True)\n ant_pairs_expected = [(1, 2)]\n pols_expected = [-2]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert Counter(polarizations) == Counter(pols_expected)\n\n # Test ant_str='auto' on file with auto correlations\n uv = UVData()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA.uvh5\")\n uv.read(testfile)\n\n ant_str = \"auto\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_nums = [\n 9,\n 10,\n 20,\n 22,\n 31,\n 43,\n 53,\n 64,\n 65,\n 72,\n 80,\n 81,\n 88,\n 89,\n 96,\n 97,\n 104,\n 105,\n 112,\n ]\n ant_pairs_autos = [(ant_i, ant_i) for ant_i in ant_nums]\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_autos)\n assert isinstance(polarizations, type(None))\n\n # Test cross correlation extraction on data with auto + cross\n ant_str = \"cross\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_cross = list(itertools.combinations(ant_nums, 2))\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_cross)\n assert isinstance(polarizations, type(None))\n\n # Remove only polarization of single baseline\n ant_str = \"all,-9x_10x\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = ant_pairs_autos + ant_pairs_cross\n ant_pairs_expected.remove((9, 10))\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n # Test appending all to beginning of strings that start with -\n ant_str = \"-9\"\n ant_pairs_nums, polarizations = uv.parse_ants(ant_str)\n ant_pairs_expected = ant_pairs_autos + ant_pairs_cross\n for ant_i in ant_nums:\n ant_pairs_expected.remove((9, ant_i))\n assert Counter(ant_pairs_nums) == Counter(ant_pairs_expected)\n assert isinstance(polarizations, type(None))\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_select_with_ant_str(casa_uvfits):\n # Test select function with ant_str argument\n uv = casa_uvfits\n inplace = False\n\n # All baselines\n ant_str = \"all\"\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Cross correlations\n ant_str = \"cross\"\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n # All baselines in data are cross correlations\n\n # Single antenna number\n ant_str = \"0\"\n ant_pairs = [\n (0, 1),\n (0, 2),\n (0, 3),\n (0, 6),\n (0, 7),\n (0, 8),\n (0, 11),\n (0, 14),\n (0, 18),\n (0, 19),\n (0, 20),\n (0, 21),\n (0, 22),\n (0, 23),\n (0, 24),\n (0, 26),\n (0, 27),\n ]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Single antenna number not present in data\n ant_str = \"10\"\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Warning: Antenna number 10 passed, but not present in the \"\n \"ant_1_array or ant_2_array\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv.select(ant_str=ant_str, inplace=inplace)\n\n # Multiple antenna numbers as list\n ant_str = \"22,26\"\n ant_pairs = [\n (0, 22),\n (0, 26),\n (1, 22),\n (1, 26),\n (2, 22),\n (2, 26),\n (3, 22),\n (3, 26),\n (6, 22),\n (6, 26),\n (7, 22),\n (7, 26),\n (8, 22),\n (8, 26),\n (11, 22),\n (11, 26),\n (14, 22),\n (14, 26),\n (18, 22),\n (18, 26),\n (19, 22),\n (19, 26),\n (20, 22),\n (20, 26),\n (21, 22),\n (21, 26),\n (22, 23),\n (22, 24),\n (22, 26),\n (22, 27),\n (23, 26),\n (24, 26),\n (26, 27),\n ]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Single baseline\n ant_str = \"1_3\"\n ant_pairs = [(1, 3)]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Single baseline with polarization\n ant_str = \"1l_3r\"\n ant_pairs = [(1, 3)]\n pols = [\"lr\"]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(pols)\n\n # Single baseline with single polarization in first entry\n ant_str = \"1l_3,2x_3\"\n # x,y pols not present in data\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Warning: Polarization XX,XY is not present in the polarization_array\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv.select(ant_str=ant_str, inplace=inplace)\n # with polarizations in data\n ant_str = \"1l_3,2_3\"\n ant_pairs = [(1, 3), (2, 3)]\n pols = [\"ll\", \"lr\"]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(pols)\n\n # Single baseline with single polarization in last entry\n ant_str = \"1_3l,2_3x\"\n # x,y pols not present in data\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Warning: Polarization XX,YX is not present in the polarization_array\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n # with polarizations in data\n ant_str = \"1_3l,2_3\"\n ant_pairs = [(1, 3), (2, 3)]\n pols = [\"ll\", \"rl\"]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(pols)\n\n # Multiple baselines as list\n ant_str = \"1_2,1_3,1_10\"\n # Antenna number 10 not in data\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Warning: Antenna number 10 passed, but not present in the \"\n \"ant_1_array or ant_2_array\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n\n ant_pairs = [(1, 2), (1, 3)]\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Multiples baselines with polarizations as list\n ant_str = \"1r_2l,1l_3l,1r_11r\"\n ant_pairs = [(1, 2), (1, 3), (1, 11)]\n pols = [\"rr\", \"ll\", \"rl\"]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(pols)\n\n # Specific baselines with parenthesis\n ant_str = \"(1,3)_11\"\n ant_pairs = [(1, 11), (3, 11)]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Specific baselines with parenthesis\n ant_str = \"1_(3,11)\"\n ant_pairs = [(1, 3), (1, 11)]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Antenna numbers with polarizations\n ant_str = \"(1l,2r)_(3l,6r)\"\n ant_pairs = [(1, 3), (1, 6), (2, 3), (2, 6)]\n pols = [\"rr\", \"ll\", \"rl\", \"lr\"]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(pols)\n\n # Antenna numbers with - for avoidance\n ant_str = \"1_(-3,11)\"\n ant_pairs = [(1, 11)]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n ant_str = \"(-1,3)_11\"\n ant_pairs = [(3, 11)]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Remove specific antenna number\n ant_str = \"1,-3\"\n ant_pairs = [\n (0, 1),\n (1, 2),\n (1, 6),\n (1, 7),\n (1, 8),\n (1, 11),\n (1, 14),\n (1, 18),\n (1, 19),\n (1, 20),\n (1, 21),\n (1, 22),\n (1, 23),\n (1, 24),\n (1, 26),\n (1, 27),\n ]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Remove specific baseline\n ant_str = \"1,-1_3\"\n ant_pairs = [\n (0, 1),\n (1, 2),\n (1, 6),\n (1, 7),\n (1, 8),\n (1, 11),\n (1, 14),\n (1, 18),\n (1, 19),\n (1, 20),\n (1, 21),\n (1, 22),\n (1, 23),\n (1, 24),\n (1, 26),\n (1, 27),\n ]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Antenna numbers with polarizations and - for avoidance\n ant_str = \"1l_(-3r,11l)\"\n ant_pairs = [(1, 11)]\n pols = [\"ll\"]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(pols)\n\n # Test pseudo-Stokes params with select\n ant_str = \"pi,pQ\"\n pols = [\"pQ\", \"pI\"]\n uv.polarization_array = np.array([4, 3, 2, 1])\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(uv.get_antpairs())\n assert Counter(uv2.get_pols()) == Counter(pols)\n\n # Test ant_str = 'auto' on file with auto correlations\n uv = UVData()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA.uvh5\")\n uv.read(testfile)\n\n ant_str = \"auto\"\n ant_nums = [\n 9,\n 10,\n 20,\n 22,\n 31,\n 43,\n 53,\n 64,\n 65,\n 72,\n 80,\n 81,\n 88,\n 89,\n 96,\n 97,\n 104,\n 105,\n 112,\n ]\n ant_pairs_autos = [(ant_i, ant_i) for ant_i in ant_nums]\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs_autos)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Test cross correlation extraction on data with auto + cross\n ant_str = \"cross\"\n ant_pairs_cross = list(itertools.combinations(ant_nums, 2))\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs_cross)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Remove only polarization of single baseline\n ant_str = \"all,-9x_10x\"\n ant_pairs = ant_pairs_autos + ant_pairs_cross\n ant_pairs.remove((9, 10))\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n # Test appending all to beginning of strings that start with -\n ant_str = \"-9\"\n ant_pairs = ant_pairs_autos + ant_pairs_cross\n for ant_i in ant_nums:\n ant_pairs.remove((9, ant_i))\n uv2 = uv.select(ant_str=ant_str, inplace=inplace)\n assert Counter(uv2.get_antpairs()) == Counter(ant_pairs)\n assert Counter(uv2.get_pols()) == Counter(uv.get_pols())\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\n \"kwargs,message\",\n [\n (\n {\"ant_str\": \"\", \"antenna_nums\": []},\n \"Cannot provide ant_str with antenna_nums, antenna_names, bls, or \"\n \"polarizations.\",\n ),\n (\n {\"ant_str\": \"\", \"antenna_names\": []},\n \"Cannot provide ant_str with antenna_nums, antenna_names, bls, or \"\n \"polarizations.\",\n ),\n (\n {\"ant_str\": \"\", \"bls\": []},\n \"Cannot provide ant_str with antenna_nums, antenna_names, bls, or \"\n \"polarizations.\",\n ),\n (\n {\"ant_str\": \"\", \"polarizations\": []},\n \"Cannot provide ant_str with antenna_nums, antenna_names, bls, or \"\n \"polarizations.\",\n ),\n ({\"ant_str\": \"auto\"}, \"There is no data matching ant_str=auto in this object.\"),\n (\n {\"ant_str\": \"pI,pq,pU,pv\"},\n \"Polarization 4 is not present in the polarization_array\",\n ),\n ({\"ant_str\": \"none\"}, \"Unparsible argument none\"),\n ],\n)\ndef test_select_with_ant_str_errors(casa_uvfits, kwargs, message):\n uv = casa_uvfits\n\n with pytest.raises(ValueError, match=message):\n uv.select(**kwargs)\n\n\ndef test_set_uvws_from_antenna_pos():\n # Test set_uvws_from_antenna_positions function with phased data\n uv_object = UVData()\n testfile = os.path.join(DATA_PATH, \"1133866760.uvfits\")\n uv_object.read_uvfits(testfile)\n orig_uvw_array = np.copy(uv_object.uvw_array)\n\n with pytest.raises(ValueError) as cm:\n uv_object.set_uvws_from_antenna_positions()\n assert str(cm.value).startswith(\"UVW calculation requires unphased data.\")\n\n with pytest.raises(ValueError) as cm:\n with uvtest.check_warnings(UserWarning, \"Data will be unphased\"):\n uv_object.set_uvws_from_antenna_positions(\n allow_phasing=True, orig_phase_frame=\"xyz\"\n )\n assert str(cm.value).startswith(\"Invalid parameter orig_phase_frame.\")\n\n with pytest.raises(ValueError) as cm:\n with uvtest.check_warnings(UserWarning, \"Data will be unphased\"):\n uv_object.set_uvws_from_antenna_positions(\n allow_phasing=True, orig_phase_frame=\"gcrs\", output_phase_frame=\"xyz\"\n )\n assert str(cm.value).startswith(\"Invalid parameter output_phase_frame.\")\n\n with uvtest.check_warnings(UserWarning, \"Data will be unphased\"):\n uv_object.set_uvws_from_antenna_positions(\n allow_phasing=True, orig_phase_frame=\"gcrs\", output_phase_frame=\"gcrs\"\n )\n\n max_diff = np.amax(np.absolute(np.subtract(orig_uvw_array, uv_object.uvw_array)))\n assert np.isclose(max_diff, 0.0, atol=2)\n\n\ndef test_get_antenna_redundancies():\n uv0 = UVData()\n uv0.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n\n old_bl_array = np.copy(uv0.baseline_array)\n red_gps, centers, lengths = uv0.get_redundancies(\n use_antpos=True, include_autos=False, conjugate_bls=True\n )\n # new and old baseline Numbers are not the same (different conjugation)\n assert not np.allclose(uv0.baseline_array, old_bl_array)\n\n # assert all baselines are in the data (because it's conjugated to match)\n for i, gp in enumerate(red_gps):\n for bl in gp:\n assert bl in uv0.baseline_array\n\n # conjugate data differently\n uv0.conjugate_bls(convention=\"ant1<ant2\")\n new_red_gps, new_centers, new_lengths, conjs = uv0.get_redundancies(\n use_antpos=True, include_autos=False, include_conjugates=True\n )\n\n assert conjs is None\n\n apos, anums = uv0.get_ENU_antpos()\n new_red_gps, new_centers, new_lengths = uvutils.get_antenna_redundancies(\n anums, apos, include_autos=False\n )\n\n # all redundancy info is the same\n assert red_gps == new_red_gps\n assert np.allclose(centers, new_centers)\n assert np.allclose(lengths, new_lengths)\n\n\[email protected](\"method\", (\"select\", \"average\"))\[email protected](\"reconjugate\", (True, False))\[email protected](\"flagging_level\", (\"none\", \"some\", \"all\"))\ndef test_redundancy_contract_expand(method, reconjugate, flagging_level):\n # Test that a UVData object can be reduced to one baseline from each redundant group\n # and restored to its original form.\n\n uv0 = UVData()\n uv0.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n\n # Fails at lower precision because some baselines fall into multiple\n # redundant groups\n tol = 0.02\n # Assign identical data to each redundant group:\n red_gps, centers, lengths = uv0.get_redundancies(\n tol=tol, use_antpos=True, conjugate_bls=True\n )\n for gp_ind, gp in enumerate(red_gps):\n for bl in gp:\n inds = np.where(bl == uv0.baseline_array)\n uv0.data_array[inds] *= 0\n uv0.data_array[inds] += complex(gp_ind)\n\n index_bls = [gp[0] for gp in red_gps]\n if flagging_level == \"none\":\n assert np.all(~uv0.flag_array)\n elif flagging_level == \"some\":\n # flag all the index baselines in a redundant group\n for bl in index_bls:\n bl_locs = np.where(uv0.baseline_array == bl)\n uv0.flag_array[bl_locs, :, :, :] = True\n elif flagging_level == \"all\":\n uv0.flag_array[:] = True\n uv0.check()\n assert np.all(uv0.flag_array)\n\n if reconjugate:\n uv0.conjugate_bls()\n\n uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)\n\n if method == \"average\":\n gp_bl_use = []\n nbls_group = []\n for gp in red_gps:\n bls_init = [bl for bl in gp if bl in uv0.baseline_array]\n nbls_group.append(len(bls_init))\n bl_use = [bl for bl in gp if bl in uv2.baseline_array]\n if len(bl_use) == 0:\n # not all possible baselines were present in uv0\n gp_bl_use.append(None)\n else:\n assert len(bl_use) == 1\n gp_bl_use.append(bl_use[0])\n\n for gp_ind, bl in enumerate(gp_bl_use):\n if bl is None:\n continue\n if flagging_level == \"none\" or flagging_level == \"all\":\n assert np.all(uv2.get_nsamples(bl) == nbls_group[gp_ind])\n else:\n assert np.all(uv2.get_nsamples(bl) == max((nbls_group[gp_ind] - 1), 1))\n if flagging_level == \"all\":\n assert np.all(uv2.flag_array)\n else:\n for gp_ind, bl in enumerate(gp_bl_use):\n if nbls_group[gp_ind] > 1:\n assert np.all(~uv2.get_flags(bl))\n else:\n assert np.all(uv2.nsample_array == 1)\n if flagging_level == \"some\" or flagging_level == \"all\":\n assert np.all(uv2.flag_array)\n else:\n assert np.all(~uv2.flag_array)\n\n # Compare in-place to separated compression.\n uv3 = uv0.copy()\n uv3.compress_by_redundancy(method=method, tol=tol)\n assert uv2 == uv3\n\n # check inflating gets back to the original\n with uvtest.check_warnings(\n UserWarning, match=\"Missing some redundant groups. Filling in available data.\"\n ):\n uv2.inflate_by_redundancy(tol=tol)\n\n # Confirm that we get the same result looping inflate -> compress -> inflate.\n uv3 = uv2.compress_by_redundancy(method=method, tol=tol, inplace=False)\n with uvtest.check_warnings(\n UserWarning, match=\"Missing some redundant groups. Filling in available data.\"\n ):\n uv3.inflate_by_redundancy(tol=tol)\n\n if method == \"average\":\n # with average, the nsample_array goes up by the number of baselines\n # averaged together.\n assert not np.allclose(uv3.nsample_array, uv2.nsample_array)\n # reset it to test other parameters\n uv3.nsample_array = uv2.nsample_array\n uv3.history = uv2.history\n assert uv2 == uv3\n\n uv2.history = uv0.history\n # Inflation changes the baseline ordering into the order of the redundant groups.\n # reorder bls for comparison\n uv0.reorder_blts(conj_convention=\"u>0\")\n uv2.reorder_blts(conj_convention=\"u>0\")\n uv2._uvw_array.tols = [0, tol]\n\n if method == \"average\":\n # with average, the nsample_array goes up by the number of baselines\n # averaged together.\n assert not np.allclose(uv2.nsample_array, uv0.nsample_array)\n # reset it to test other parameters\n uv2.nsample_array = uv0.nsample_array\n if flagging_level == \"some\":\n if method == \"select\":\n # inflated array will be entirely flagged\n assert np.all(uv2.flag_array)\n assert not np.allclose(uv0.flag_array, uv2.flag_array)\n uv2.flag_array = uv0.flag_array\n else:\n # flag arrays will not match -- inflated array will mostly be unflagged\n # it will only be flagged if only one in group\n assert not np.allclose(uv0.flag_array, uv2.flag_array)\n uv2.flag_array = uv0.flag_array\n\n assert uv2 == uv0\n\n\[email protected](\"method\", (\"select\", \"average\"))\[email protected](\"flagging_level\", (\"none\", \"some\", \"all\"))\ndef test_redundancy_contract_expand_variable_data(method, flagging_level):\n # Test that a UVData object can be reduced to one baseline from each redundant group\n # and restored to its original form.\n\n uv0 = UVData()\n uv0.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n\n # Fails at lower precision because some baselines fall into multiple\n # redundant groups\n tol = 0.02\n # Assign identical data to each redundant group in comparison object\n # Assign data to the index baseline and zeros elsewhere in the one to compress\n red_gps, centers, lengths = uv0.get_redundancies(\n tol=tol, use_antpos=True, conjugate_bls=True\n )\n index_bls = [gp[0] for gp in red_gps]\n uv0.data_array *= 0\n uv1 = uv0.copy()\n for gp_ind, gp in enumerate(red_gps):\n for bl in gp:\n inds = np.where(bl == uv0.baseline_array)\n uv1.data_array[inds] += complex(gp_ind)\n if bl in index_bls:\n uv0.data_array[inds] += complex(gp_ind)\n\n if flagging_level == \"none\":\n assert np.all(~uv0.flag_array)\n elif flagging_level == \"some\":\n # flag all the non index baselines in a redundant group\n uv0.flag_array[:, :, :, :] = True\n for bl in index_bls:\n bl_locs = np.where(uv0.baseline_array == bl)\n uv0.flag_array[bl_locs, :, :, :] = False\n elif flagging_level == \"all\":\n uv0.flag_array[:] = True\n uv0.check()\n assert np.all(uv0.flag_array)\n\n uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)\n\n # inflate to get back to the original size\n with uvtest.check_warnings(\n UserWarning, match=\"Missing some redundant groups. Filling in available data.\"\n ):\n uv2.inflate_by_redundancy(tol=tol)\n\n uv2.history = uv1.history\n # Inflation changes the baseline ordering into the order of the redundant groups.\n # reorder bls for comparison\n uv1.reorder_blts(conj_convention=\"u>0\")\n uv2.reorder_blts(conj_convention=\"u>0\")\n uv2._uvw_array.tols = [0, tol]\n\n if method == \"select\":\n if flagging_level == \"all\":\n assert uv2._flag_array != uv1._flag_array\n uv2.flag_array = uv1.flag_array\n assert uv2 == uv1\n else:\n if flagging_level == \"some\":\n for gp in red_gps:\n bls_init = [bl for bl in gp if bl in uv1.baseline_array]\n for bl in bls_init:\n assert np.all(uv2.get_data(bl) == uv1.get_data(bl))\n assert np.all(uv2.get_nsamples(bl) == uv1.get_nsamples(bl))\n else:\n assert uv2.data_array.min() < uv1.data_array.min()\n assert np.all(uv2.data_array <= uv1.data_array)\n for gp in red_gps:\n bls_init = [bl for bl in gp if bl in uv1.baseline_array]\n for bl in bls_init:\n assert np.all(\n uv2.get_data(bl) == (uv1.get_data(bl) / len(bls_init))\n )\n assert np.all(uv2.get_nsamples(bl) == len(bls_init))\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"method\", (\"select\", \"average\"))\ndef test_redundancy_contract_expand_nblts_not_nbls_times_ntimes(method, casa_uvfits):\n uv0 = casa_uvfits\n\n # check that Nblts != Nbls * Ntimes\n assert uv0.Nblts != uv0.Nbls * uv0.Ntimes\n\n tol = 1.0\n\n # Assign identical data to each redundant group:\n red_gps, centers, lengths = uv0.get_redundancies(\n tol=tol, use_antpos=True, conjugate_bls=True\n )\n for i, gp in enumerate(red_gps):\n for bl in gp:\n inds = np.where(bl == uv0.baseline_array)\n uv0.data_array[inds, ...] *= 0\n uv0.data_array[inds, ...] += complex(i)\n\n if method == \"average\":\n with uvtest.check_warnings(\n UserWarning,\n \"Index baseline in the redundant group does not have all the \"\n \"times, compressed object will be missing those times.\",\n nwarnings=4,\n ):\n uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)\n else:\n uv2 = uv0.compress_by_redundancy(method=method, tol=tol, inplace=False)\n\n # check inflating gets back to the original\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Missing some redundant groups. Filling in available data.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv2.inflate_by_redundancy(tol=tol)\n\n uv2.history = uv0.history\n # Inflation changes the baseline ordering into the order of the redundant groups.\n # reorder bls for comparison\n uv0.reorder_blts()\n uv2.reorder_blts()\n uv2._uvw_array.tols = [0, tol]\n\n blt_inds = []\n missing_inds = []\n for bl, t in zip(uv0.baseline_array, uv0.time_array):\n if (bl, t) in zip(uv2.baseline_array, uv2.time_array):\n this_ind = np.where((uv2.baseline_array == bl) & (uv2.time_array == t))[0]\n blt_inds.append(this_ind[0])\n else:\n # this is missing because of the compress_by_redundancy step\n missing_inds.append(\n np.where((uv0.baseline_array == bl) & (uv0.time_array == t))[0]\n )\n\n uv3 = uv2.select(blt_inds=blt_inds, inplace=False)\n\n orig_inds_keep = list(np.arange(uv0.Nblts))\n for ind in missing_inds:\n orig_inds_keep.remove(ind)\n uv1 = uv0.select(blt_inds=orig_inds_keep, inplace=False)\n\n if method == \"average\":\n # the nsample array in the original object varies, so they\n # don't come out the same\n assert not np.allclose(uv3.nsample_array, uv1.nsample_array)\n uv3.nsample_array = uv1.nsample_array\n\n assert uv3 == uv1\n\n\ndef test_compress_redundancy_variable_inttime():\n uv0 = UVData()\n uv0.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n tol = 0.05\n ntimes_in = uv0.Ntimes\n\n # Assign identical data to each redundant group:\n red_gps, centers, lengths = uv0.get_redundancies(\n tol=tol, use_antpos=True, conjugate_bls=True\n )\n index_bls = [gp[0] for gp in red_gps]\n uv0.data_array *= 0\n # set different int time for index baseline in object to compress\n uv1 = uv0.copy()\n ave_int_time = np.average(uv0.integration_time)\n nbls_group = np.zeros(len(red_gps))\n for gp_ind, gp in enumerate(red_gps):\n for bl in gp:\n inds = np.where(bl == uv0.baseline_array)\n if inds[0].size > 0:\n nbls_group[gp_ind] += 1\n uv1.data_array[inds] += complex(gp_ind)\n uv0.data_array[inds] += complex(gp_ind)\n if bl not in index_bls:\n uv0.integration_time[inds] = ave_int_time / 2\n\n assert uv0._integration_time != uv1._integration_time\n\n with uvtest.check_warnings(\n UserWarning,\n \"Integrations times are not identical in a redundant \"\n \"group. Averaging anyway but this may cause unexpected \"\n \"behavior.\",\n nwarnings=56,\n ) as warn_record:\n uv0.compress_by_redundancy(method=\"average\", tol=tol)\n assert len(warn_record) == np.sum(nbls_group > 1) * ntimes_in\n\n uv1.compress_by_redundancy(method=\"average\", tol=tol)\n\n assert uv0 == uv1\n\n\[email protected](\"method\", (\"select\", \"average\"))\ndef test_compress_redundancy_metadata_only(method):\n uv0 = UVData()\n uv0.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n tol = 0.05\n\n # Assign identical data to each redundant group:\n red_gps, centers, lengths = uv0.get_redundancies(\n tol=tol, use_antpos=True, conjugate_bls=True\n )\n for i, gp in enumerate(red_gps):\n for bl in gp:\n inds = np.where(bl == uv0.baseline_array)\n uv0.data_array[inds] *= 0\n uv0.data_array[inds] += complex(i)\n\n uv2 = uv0.copy(metadata_only=True)\n uv2.compress_by_redundancy(method=method, tol=tol, inplace=True)\n\n uv0.compress_by_redundancy(method=method, tol=tol)\n uv0.data_array = None\n uv0.flag_array = None\n uv0.nsample_array = None\n assert uv0 == uv2\n\n\ndef test_compress_redundancy_wrong_method():\n uv0 = UVData()\n uv0.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n tol = 0.05\n with pytest.raises(ValueError, match=\"method must be one of\"):\n uv0.compress_by_redundancy(method=\"foo\", tol=tol, inplace=True)\n\n\[email protected](\"method\", (\"select\", \"average\"))\ndef test_redundancy_missing_groups(method, tmp_path):\n # Check that if I try to inflate a compressed UVData that is missing\n # redundant groups, it will raise the right warnings and fill only what\n # data are available.\n\n uv0 = UVData()\n uv0.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n tol = 0.02\n num_select = 19\n\n uv0.compress_by_redundancy(method=method, tol=tol)\n fname = str(tmp_path / \"temp_hera19_missingreds.uvfits\")\n\n bls = np.unique(uv0.baseline_array)[:num_select] # First twenty baseline groups\n uv0.select(bls=[uv0.baseline_to_antnums(bl) for bl in bls])\n uv0.write_uvfits(fname)\n uv1 = UVData()\n uv1.read_uvfits(fname)\n\n assert uv0 == uv1 # Check that writing compressed files causes no issues.\n\n with uvtest.check_warnings(\n UserWarning, match=\"Missing some redundant groups. Filling in available data.\"\n ):\n uv1.inflate_by_redundancy(tol=tol)\n\n uv2 = uv1.compress_by_redundancy(method=method, tol=tol, inplace=False)\n\n assert np.unique(uv2.baseline_array).size == num_select\n\n\ndef test_quick_redundant_vs_redundant_test_array():\n \"\"\"Verify the quick redundancy calc returns the same groups as a known array.\"\"\"\n uv = UVData()\n uv.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n uv.select(times=uv.time_array[0])\n uv.unphase_to_drift()\n uv.conjugate_bls(convention=\"u>0\", use_enu=True)\n tol = 0.05\n # a quick and dirty redundancy calculation\n unique_bls, baseline_inds = np.unique(uv.baseline_array, return_index=True)\n uvw_vectors = np.take(uv.uvw_array, baseline_inds, axis=0)\n uvw_diffs = np.expand_dims(uvw_vectors, axis=0) - np.expand_dims(\n uvw_vectors, axis=1\n )\n uvw_diffs = np.linalg.norm(uvw_diffs, axis=2)\n\n reds = np.where(uvw_diffs < tol, unique_bls, 0)\n reds = np.ma.masked_where(reds == 0, reds)\n groups = []\n for bl in reds:\n grp = []\n grp.extend(bl.compressed())\n for other_bls in reds:\n if set(reds.compressed()).issubset(other_bls.compressed()):\n grp.extend(other_bls.compressed())\n grp = np.unique(grp).tolist()\n groups.append(grp)\n\n pad = len(max(groups, key=len))\n groups = np.array([i + [-1] * (pad - len(i)) for i in groups])\n groups = np.unique(groups, axis=0)\n groups = [[bl for bl in grp if bl != -1] for grp in groups]\n groups.sort(key=len)\n\n redundant_groups, centers, lengths, conj_inds = uv.get_redundancies(\n tol=tol, include_conjugates=True\n )\n redundant_groups.sort(key=len)\n assert groups == redundant_groups\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_redundancy_finder_when_nblts_not_nbls_times_ntimes(casa_uvfits):\n \"\"\"Test the redundancy finder functions when Nblts != Nbls * Ntimes.\"\"\"\n tol = 1 # meter\n uv = casa_uvfits\n uv.conjugate_bls(convention=\"u>0\", use_enu=True)\n # check that Nblts != Nbls * Ntimes\n assert uv.Nblts != uv.Nbls * uv.Ntimes\n\n # a quick and dirty redundancy calculation\n unique_bls, baseline_inds = np.unique(uv.baseline_array, return_index=True)\n uvw_vectors = np.take(uv.uvw_array, baseline_inds, axis=0)\n uvw_diffs = np.expand_dims(uvw_vectors, axis=0) - np.expand_dims(\n uvw_vectors, axis=1\n )\n uvw_diffs = np.linalg.norm(uvw_diffs, axis=2)\n\n reds = np.where(uvw_diffs < tol, unique_bls, 0)\n reds = np.ma.masked_where(reds == 0, reds)\n groups = []\n for bl in reds:\n grp = []\n grp.extend(bl.compressed())\n for other_bls in reds:\n if set(reds.compressed()).issubset(other_bls.compressed()):\n grp.extend(other_bls.compressed())\n grp = np.unique(grp).tolist()\n groups.append(grp)\n\n pad = len(max(groups, key=len))\n groups = np.array([i + [-1] * (pad - len(i)) for i in groups])\n groups = np.unique(groups, axis=0)\n groups = [[bl for bl in grp if bl != -1] for grp in groups]\n groups.sort(key=len)\n\n redundant_groups, centers, lengths, conj_inds = uv.get_redundancies(\n tol=tol, include_conjugates=True\n )\n redundant_groups.sort(key=len)\n assert groups == redundant_groups\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_overlapping_data_add(casa_uvfits, tmp_path):\n # read in test data\n uv = casa_uvfits\n\n # slice into four objects\n blts1 = np.arange(500)\n blts2 = np.arange(500, 1360)\n uv1 = uv.select(polarizations=[-1, -2], blt_inds=blts1, inplace=False)\n uv2 = uv.select(polarizations=[-3, -4], blt_inds=blts1, inplace=False)\n uv3 = uv.select(polarizations=[-1, -2], blt_inds=blts2, inplace=False)\n uv4 = uv.select(polarizations=[-3, -4], blt_inds=blts2, inplace=False)\n\n # combine and check for equality\n uvfull = uv1 + uv2\n uvfull += uv3\n uvfull += uv4\n extra_history = (\n \"Downselected to specific baseline-times, polarizations using pyuvdata. \"\n \"Combined data along polarization axis using pyuvdata. Combined data along \"\n \"baseline-time axis using pyuvdata. Overwrote invalid data using pyuvdata.\"\n )\n assert uvutils._check_histories(uvfull.history, uv.history + extra_history)\n uvfull.history = uv.history # make histories match\n assert uv == uvfull\n\n # check combination not-in-place\n uvfull = uv1 + uv2\n uvfull += uv3\n uvfull = uvfull + uv4\n uvfull.history = uv.history # make histories match\n assert uv == uvfull\n\n # test raising error for adding objects incorrectly (i.e., having the object\n # with data to be overwritten come second)\n uvfull = uv1 + uv2\n uvfull += uv3\n pytest.raises(ValueError, uv4.__iadd__, uvfull)\n pytest.raises(ValueError, uv4.__add__, uv4, uvfull)\n\n # write individual objects out, and make sure that we can read in the list\n uv1_out = str(tmp_path / \"uv1.uvfits\")\n uv1.write_uvfits(uv1_out)\n uv2_out = str(tmp_path / \"uv2.uvfits\")\n uv2.write_uvfits(uv2_out)\n uv3_out = str(tmp_path / \"uv3.uvfits\")\n uv3.write_uvfits(uv3_out)\n uv4_out = str(tmp_path / \"uv4.uvfits\")\n uv4.write_uvfits(uv4_out)\n\n uvfull = UVData()\n uvfull.read(np.array([uv1_out, uv2_out, uv3_out, uv4_out]))\n assert uvutils._check_histories(uvfull.history, uv.history + extra_history)\n uvfull.history = uv.history # make histories match\n assert uvfull == uv\n\n # clean up after ourselves\n os.remove(uv1_out)\n os.remove(uv2_out)\n os.remove(uv3_out)\n os.remove(uv4_out)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_lsts_from_time_with_only_unique(paper_uvh5):\n \"\"\"\n Test `set_lsts_from_time_array` with only unique values is identical to full array.\n \"\"\"\n uv = paper_uvh5\n lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees\n # calculate the lsts for all elements in time array\n full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)\n # use `set_lst_from_time_array` to set the uv.lst_array using only unique values\n uv.set_lsts_from_time_array()\n assert np.array_equal(full_lsts, uv.lst_array)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_lsts_from_time_with_only_unique_background(paper_uvh5):\n \"\"\"\n Test `set_lsts_from_time_array` with only unique values is identical to full array.\n \"\"\"\n uv = paper_uvh5\n lat, lon, alt = uv.telescope_location_lat_lon_alt_degrees\n # calculate the lsts for all elements in time array\n full_lsts = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)\n # use `set_lst_from_time_array` to set the uv.lst_array using only unique values\n proc = uv.set_lsts_from_time_array(background=True)\n proc.join()\n assert np.array_equal(full_lsts, uv.lst_array)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_copy(casa_uvfits):\n \"\"\"Test the copy method\"\"\"\n uv_object = casa_uvfits\n\n uv_object_copy = uv_object.copy()\n assert uv_object_copy == uv_object\n\n uv_object_copy = uv_object.copy(metadata_only=True)\n assert uv_object_copy.metadata_only\n\n for name in uv_object._data_params:\n setattr(uv_object, name, None)\n assert uv_object_copy == uv_object\n\n uv_object_copy = uv_object.copy()\n assert uv_object_copy == uv_object\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time(hera_uvh5):\n \"\"\"Test the upsample_in_time method\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n\n assert np.allclose(uv_object.integration_time, max_integration_time)\n # we should double the size of the data arrays\n assert uv_object.data_array.size == 2 * init_data_size\n # output data should be the same\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_with_flags(hera_uvh5):\n \"\"\"Test the upsample_in_time method with flags\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n\n # add flags and upsample again\n inds01 = uv_object.antpair2ind(0, 1)\n uv_object.flag_array[inds01[0], 0, 0, 0] = True\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n\n # data and nsamples should be changed as normal, but flagged\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])\n out_flags = uv_object.get_flags(0, 1)\n assert np.all(out_flags[:2, 0, 0])\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_noninteger_resampling(hera_uvh5):\n \"\"\"Test the upsample_in_time method with a non-integer resampling factor\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time) * 0.75\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n\n assert np.allclose(uv_object.integration_time, max_integration_time * 0.5 / 0.75)\n # we should double the size of the data arrays\n assert uv_object.data_array.size == 2 * init_data_size\n # output data should be different by a factor of 2\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_errors(hera_uvh5):\n \"\"\"Test errors and warnings raised by upsample_in_time\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # test using a too-small integration time\n max_integration_time = 1e-3 * np.amin(uv_object.integration_time)\n with pytest.raises(ValueError) as cm:\n uv_object.upsample_in_time(max_integration_time)\n assert str(cm.value).startswith(\"Decreasing the integration time by more than\")\n\n # catch a warning for doing no work\n uv_object2 = uv_object.copy()\n max_integration_time = 2 * np.amax(uv_object.integration_time)\n with uvtest.check_warnings(\n UserWarning, \"All values in the integration_time array are already longer\"\n ):\n uv_object.upsample_in_time(max_integration_time)\n assert uv_object == uv_object2\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_summing_correlator_mode(hera_uvh5):\n \"\"\"Test the upsample_in_time method with summing correlator mode\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n uv_object.upsample_in_time(\n max_integration_time, blt_order=\"baseline\", summing_correlator_mode=True\n )\n\n assert np.allclose(uv_object.integration_time, max_integration_time)\n # we should double the size of the data arrays\n assert uv_object.data_array.size == 2 * init_data_size\n # output data should be the half the input\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_summing_correlator_mode_with_flags(hera_uvh5):\n \"\"\"Test the upsample_in_time method with summing correlator mode and flags\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # add flags and upsample again\n inds01 = uv_object.antpair2ind(0, 1)\n uv_object.flag_array[inds01[0], 0, 0, 0] = True\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n uv_object.upsample_in_time(\n max_integration_time, blt_order=\"baseline\", summing_correlator_mode=True\n )\n\n # data and nsamples should be changed as normal, but flagged\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])\n out_flags = uv_object.get_flags(0, 1)\n assert np.all(out_flags[:2, 0, 0])\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_summing_correlator_mode_nonint_resampling(hera_uvh5):\n \"\"\"Test the upsample_in_time method with summing correlator mode\n and non-integer resampling\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # try again with a non-integer resampling factor\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time) * 0.75\n uv_object.upsample_in_time(\n max_integration_time, blt_order=\"baseline\", summing_correlator_mode=True\n )\n\n assert np.allclose(uv_object.integration_time, max_integration_time * 0.5 / 0.75)\n # we should double the size of the data arrays\n assert uv_object.data_array.size == 2 * init_data_size\n # output data should be half the input\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[0, 0, 0] / 2, out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_partial_upsample_in_time(hera_uvh5):\n \"\"\"Test the upsample_in_time method with non-uniform upsampling\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # change a whole baseline's integration time\n bl_inds = uv_object.antpair2ind(0, 1)\n uv_object.integration_time[bl_inds] = uv_object.integration_time[0] / 2.0\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_wf_01 = uv_object.get_data(0, 1)\n init_wf_02 = uv_object.get_data(0, 2)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns_01 = uv_object.get_nsamples(0, 1)\n init_ns_02 = uv_object.get_nsamples(0, 2)\n\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time)\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n\n assert np.allclose(uv_object.integration_time, max_integration_time)\n # output data should be the same\n out_wf_01 = uv_object.get_data(0, 1)\n out_wf_02 = uv_object.get_data(0, 2)\n assert np.all(init_wf_01 == out_wf_01)\n assert np.isclose(init_wf_02[0, 0, 0], out_wf_02[0, 0, 0])\n assert init_wf_02.size * 2 == out_wf_02.size\n\n # this should be true because there are no flags\n out_ns_01 = uv_object.get_nsamples(0, 1)\n out_ns_02 = uv_object.get_nsamples(0, 2)\n assert np.allclose(out_ns_01, init_ns_01)\n assert np.isclose(init_ns_02[0, 0, 0], out_ns_02[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_drift(hera_uvh5):\n \"\"\"Test the upsample_in_time method on drift mode data\"\"\"\n uv_object = hera_uvh5\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n uv_object.upsample_in_time(\n max_integration_time, blt_order=\"baseline\", allow_drift=True\n )\n\n assert np.allclose(uv_object.integration_time, max_integration_time)\n # we should double the size of the data arrays\n assert uv_object.data_array.size == 2 * init_data_size\n # output data should be the same\n out_wf = uv_object.get_data(0, 1)\n # we need a \"large\" tolerance given the \"large\" data\n new_tol = 1e-2 * np.amax(np.abs(uv_object.data_array))\n assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0], atol=new_tol)\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_in_time_drift_no_phasing(hera_uvh5):\n \"\"\"Test the upsample_in_time method on drift mode data without phasing\"\"\"\n uv_object = hera_uvh5\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n # upsample with allow_drift=False\n uv_object.upsample_in_time(\n max_integration_time, blt_order=\"baseline\", allow_drift=False\n )\n\n assert np.allclose(uv_object.integration_time, max_integration_time)\n # we should double the size of the data arrays\n assert uv_object.data_array.size == 2 * init_data_size\n # output data should be similar, but somewhat different because of the phasing\n out_wf = uv_object.get_data(0, 1)\n # we need a \"large\" tolerance given the \"large\" data\n new_tol = 1e-2 * np.amax(np.abs(uv_object.data_array))\n assert np.isclose(init_wf[0, 0, 0], out_wf[0, 0, 0], atol=new_tol)\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(init_ns[0, 0, 0], out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time(hera_uvh5):\n \"\"\"Test the downsample_in_time method\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n uv_object.downsample_in_time(\n min_int_time=min_integration_time, blt_order=\"baseline\", minor_order=\"time\"\n )\n\n # Should have half the size of the data array and all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n assert uv_object.data_array.size * 2 == init_data_size\n\n # output data should be the average\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # Compare doing it with n_times_to_avg\n uv_object2.downsample_in_time(\n n_times_to_avg=2, blt_order=\"baseline\", minor_order=\"time\"\n )\n # histories are different when n_times_to_avg is set vs min_int_time\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n assert not isinstance(uv_object.data_array, np.ma.MaskedArray)\n assert not isinstance(uv_object.nsample_array, np.ma.MaskedArray)\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_partial_flags(hera_uvh5):\n \"\"\"Test the downsample_in_time method with partial flagging\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n\n # add flags and try again. With one of the 2 inputs flagged, the data should\n # just be the unflagged value and nsample should be half the unflagged one\n # and the output should not be flagged.\n inds01 = uv_object.antpair2ind(0, 1)\n uv_object.flag_array[inds01[0], 0, 0, 0] = True\n uv_object2 = uv_object.copy()\n\n uv_object.downsample_in_time(\n min_int_time=min_integration_time, blt_order=\"baseline\", minor_order=\"time\"\n )\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])\n\n # make sure nsamples is correct\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # check that there are still no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n # Compare doing it with n_times_to_avg\n uv_object2.downsample_in_time(\n n_times_to_avg=2, blt_order=\"baseline\", minor_order=\"time\"\n )\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_totally_flagged(hera_uvh5):\n \"\"\"Test the downsample_in_time method with totally flagged integrations\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n\n # add more flags and try again. When all the input points are flagged,\n # data and nsample should have the same results as no flags but the output\n # should be flagged\n inds01 = uv_object.antpair2ind(0, 1)\n uv_object.flag_array[inds01[:2], 0, 0, 0] = True\n uv_object2 = uv_object.copy()\n\n uv_object.downsample_in_time(\n min_int_time=min_integration_time, blt_order=\"baseline\", minor_order=\"time\"\n )\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])\n\n # make sure nsamples is correct\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # check that the new sample is flagged\n out_flag = uv_object.get_flags(0, 1)\n assert out_flag[0, 0, 0]\n\n # Compare doing it with n_times_to_avg\n uv_object2.downsample_in_time(\n n_times_to_avg=2, blt_order=\"baseline\", minor_order=\"time\"\n )\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_uneven_samples(hera_uvh5):\n \"\"\"Test the downsample_in_time method with uneven downsampling\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n # test again with a downsample factor that doesn't go evenly into the\n # number of samples\n min_integration_time = original_int_time * 3.0\n uv_object.downsample_in_time(\n min_int_time=min_integration_time,\n blt_order=\"baseline\",\n minor_order=\"time\",\n keep_ragged=False,\n )\n\n # Only some baselines have an even number of times, so the output integration time\n # is not uniformly the same. For the test case, we'll have *either* the original\n # integration time or twice that.\n assert np.all(\n np.logical_or(\n np.isclose(uv_object.integration_time, original_int_time),\n np.isclose(uv_object.integration_time, min_integration_time),\n )\n )\n\n # make sure integration time is correct\n # in this case, all integration times should be the target one\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n\n # as usual, the new data should be the average of the input data (3 points now)\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(np.mean(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])\n\n # Compare doing it with n_times_to_avg\n uv_object2.downsample_in_time(\n n_times_to_avg=3, blt_order=\"baseline\", minor_order=\"time\", keep_ragged=False\n )\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_uneven_samples_keep_ragged(hera_uvh5):\n \"\"\"Test downsample_in_time with uneven downsampling and keep_ragged=True.\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n # test again with a downsample factor that doesn't go evenly into the\n # number of samples\n min_integration_time = original_int_time * 3.0\n\n # test again with keep_ragged=False\n uv_object.downsample_in_time(\n min_int_time=min_integration_time,\n blt_order=\"baseline\",\n minor_order=\"time\",\n keep_ragged=True,\n )\n\n # as usual, the new data should be the average of the input data\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(np.mean(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])\n\n # Compare doing it with n_times_to_avg\n uv_object2.downsample_in_time(\n n_times_to_avg=3, blt_order=\"baseline\", minor_order=\"time\", keep_ragged=True\n )\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_summing_correlator_mode(hera_uvh5):\n \"\"\"Test the downsample_in_time method with summing correlator mode\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n uv_object.downsample_in_time(\n min_int_time=min_integration_time,\n blt_order=\"baseline\",\n minor_order=\"time\",\n summing_correlator_mode=True,\n )\n\n # Should have half the size of the data array and all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n assert uv_object.data_array.size * 2 == init_data_size\n\n # output data should be the sum\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]), out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_summing_correlator_mode_partial_flags(hera_uvh5):\n \"\"\"Test the downsample_in_time method with summing correlator mode and\n partial flags\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n\n # add flags and try again. With one of the 2 inputs flagged, the data should\n # just be the unflagged value and nsample should be half the unflagged one\n # and the output should not be flagged.\n inds01 = uv_object.antpair2ind(0, 1)\n uv_object.flag_array[inds01[0], 0, 0, 0] = True\n uv_object.downsample_in_time(\n min_int_time=min_integration_time,\n blt_order=\"baseline\",\n minor_order=\"time\",\n summing_correlator_mode=True,\n )\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])\n\n # make sure nsamples is correct\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # check that there are still no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_summing_correlator_mode_totally_flagged(hera_uvh5):\n \"\"\"Test the downsample_in_time method with summing correlator mode and\n totally flagged integrations.\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n\n # add more flags and try again. When all the input points are flagged,\n # data and nsample should have the same results as no flags but the output\n # should be flagged\n inds01 = uv_object.antpair2ind(0, 1)\n uv_object.flag_array[inds01[:2], 0, 0, 0] = True\n uv_object.downsample_in_time(\n min_int_time=min_integration_time,\n blt_order=\"baseline\",\n minor_order=\"time\",\n summing_correlator_mode=True,\n )\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]), out_wf[0, 0, 0])\n\n # make sure nsamples is correct\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # check that the new sample is flagged\n out_flag = uv_object.get_flags(0, 1)\n assert out_flag[0, 0, 0]\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_summing_correlator_mode_uneven_samples(hera_uvh5):\n \"\"\"Test the downsample_in_time method with summing correlator mode and\n uneven samples.\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # test again with a downsample factor that doesn't go evenly into the\n # number of samples\n min_integration_time = original_int_time * 3.0\n uv_object.downsample_in_time(\n min_int_time=min_integration_time,\n blt_order=\"baseline\",\n minor_order=\"time\",\n keep_ragged=False,\n summing_correlator_mode=True,\n )\n\n # Only some baselines have an even number of times, so the output integration time\n # is not uniformly the same. For the test case, we'll have *either* the original\n # integration time or twice that.\n assert np.all(\n np.logical_or(\n np.isclose(uv_object.integration_time, original_int_time),\n np.isclose(uv_object.integration_time, min_integration_time),\n )\n )\n\n # as usual, the new data should be the average of the input data (3 points now)\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(np.sum(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])\n\n # make sure nsamples is correct\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(np.mean(init_ns[0:3, 0, 0]), out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_summing_correlator_mode_uneven_samples_drop_ragged(\n hera_uvh5,\n):\n \"\"\"Test the downsample_in_time method with summing correlator mode and\n uneven samples, dropping ragged ones.\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # test again with keep_ragged=False\n min_integration_time = original_int_time * 3.0\n uv_object.downsample_in_time(\n min_int_time=min_integration_time,\n blt_order=\"baseline\",\n minor_order=\"time\",\n keep_ragged=False,\n summing_correlator_mode=True,\n )\n\n # make sure integration time is correct\n # in this case, all integration times should be the target one\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n\n # as usual, the new data should be the average of the input data\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(np.sum(init_wf[0:3, 0, 0]), out_wf[0, 0, 0])\n\n # make sure nsamples is correct\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose(np.mean(init_ns[0:3, 0, 0]), out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_partial_downsample_in_time(hera_uvh5):\n \"\"\"Test the downsample_in_time method without uniform downsampling\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # change a whole baseline's integration time\n bl_inds = uv_object.antpair2ind(0, 1)\n uv_object.integration_time[bl_inds] = uv_object.integration_time[0] * 2.0\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\")\n\n # save some values for later\n init_wf_01 = uv_object.get_data(0, 1)\n init_wf_02 = uv_object.get_data(0, 2)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns_01 = uv_object.get_nsamples(0, 1)\n init_ns_02 = uv_object.get_nsamples(0, 2)\n\n # change the target integration time\n min_integration_time = np.amax(uv_object.integration_time)\n uv_object.downsample_in_time(\n min_int_time=min_integration_time, blt_order=\"baseline\"\n )\n\n # Should have all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n\n # output data should be the same\n out_wf_01 = uv_object.get_data(0, 1)\n out_wf_02 = uv_object.get_data(0, 2)\n assert np.all(init_wf_01 == out_wf_01)\n assert np.isclose(\n (init_wf_02[0, 0, 0] + init_wf_02[1, 0, 0]) / 2.0, out_wf_02[0, 0, 0]\n )\n\n # this should be true because there are no flags\n out_ns_01 = uv_object.get_nsamples(0, 1)\n out_ns_02 = uv_object.get_nsamples(0, 2)\n assert np.allclose(out_ns_01, init_ns_01)\n assert np.isclose(\n (init_ns_02[0, 0, 0] + init_ns_02[1, 0, 0]) / 2.0, out_ns_02[0, 0, 0]\n )\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_drift(hera_uvh5):\n \"\"\"Test the downsample_in_time method on drift mode data\"\"\"\n uv_object = hera_uvh5\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n uv_object.downsample_in_time(\n min_int_time=min_integration_time, blt_order=\"baseline\", allow_drift=True\n )\n\n # Should have half the size of the data array and all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n assert uv_object.data_array.size * 2 == init_data_size\n\n # output data should be the average\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n # Compare doing it with n_times_to_avg\n uv_object2.downsample_in_time(\n n_times_to_avg=2, blt_order=\"baseline\", allow_drift=True\n )\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_drift_no_phasing(hera_uvh5):\n \"\"\"Test the downsample_in_time method on drift mode data without phasing\"\"\"\n uv_object = hera_uvh5\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n\n # try again with allow_drift=False\n uv_object.downsample_in_time(\n min_int_time=min_integration_time, blt_order=\"baseline\", allow_drift=False,\n )\n\n # Should have half the size of the data array and all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n assert uv_object.data_array.size * 2 == init_data_size\n\n # output data should be similar to the average, but somewhat different\n # because of the phasing\n out_wf = uv_object.get_data(0, 1)\n new_tol = 5e-2 * np.amax(np.abs(uv_object.data_array))\n assert np.isclose(\n (init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0], atol=new_tol\n )\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n # Compare doing it with n_times_to_avg\n uv_object2.downsample_in_time(\n n_times_to_avg=2, blt_order=\"baseline\", minor_order=\"time\"\n )\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_nsample_precision(hera_uvh5):\n \"\"\"Test the downsample_in_time method with a half-precision nsample_array\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n original_int_time = np.amax(uv_object.integration_time)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the target integration time\n min_integration_time = original_int_time * 2.0\n\n # add flags and try again. With one of the 2 inputs flagged, the data should\n # just be the unflagged value and nsample should be half the unflagged one\n # and the output should not be flagged.\n inds01 = uv_object.antpair2ind(0, 1)\n uv_object.flag_array[inds01[0], 0, 0, 0] = True\n uv_object2 = uv_object.copy()\n\n # change precision of nsample array\n uv_object.nsample_array = uv_object.nsample_array.astype(np.float16)\n uv_object.downsample_in_time(\n min_int_time=min_integration_time, blt_order=\"baseline\", minor_order=\"time\"\n )\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose(init_wf[1, 0, 0], out_wf[0, 0, 0])\n\n # make sure nsamples is correct\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n # make sure nsamples has the right dtype\n assert uv_object.nsample_array.dtype.type is np.float16\n\n # check that there are still no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n # Compare doing it with n_times_to_avg\n uv_object2.nsample_array = uv_object2.nsample_array.astype(np.float16)\n uv_object2.downsample_in_time(\n n_times_to_avg=2, blt_order=\"baseline\", minor_order=\"time\"\n )\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n assert uv_object == uv_object2\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_errors(hera_uvh5):\n \"\"\"Test various errors and warnings are raised\"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # raise an error if set neither min_int_time and n_times_to_avg\n with pytest.raises(\n ValueError, match=\"Either min_int_time or n_times_to_avg must be set.\"\n ):\n uv_object.downsample_in_time()\n\n # raise an error if set both min_int_time and n_times_to_avg\n with pytest.raises(\n ValueError, match=\"Only one of min_int_time or n_times_to_avg can be set.\"\n ):\n uv_object.downsample_in_time(\n min_int_time=2 * np.amin(uv_object.integration_time), n_times_to_avg=2\n )\n # raise an error if only one time\n uv_object2 = uv_object.copy()\n uv_object2.select(times=uv_object2.time_array[0])\n with pytest.raises(\n ValueError, match=\"Only one time in this object, cannot downsample.\"\n ):\n uv_object2.downsample_in_time(n_times_to_avg=2)\n\n # raise an error for a too-large integration time\n max_integration_time = 1e3 * np.amax(uv_object.integration_time)\n with pytest.raises(\n ValueError, match=\"Increasing the integration time by more than\"\n ):\n uv_object.downsample_in_time(min_int_time=max_integration_time)\n\n # catch a warning for doing no work\n uv_object2 = uv_object.copy()\n max_integration_time = 0.5 * np.amin(uv_object.integration_time)\n with uvtest.check_warnings(\n UserWarning, match=\"All values in the integration_time array are already longer\"\n ):\n uv_object.downsample_in_time(min_int_time=max_integration_time)\n\n assert uv_object == uv_object2\n del uv_object2\n\n # raise an error if n_times_to_avg is not an integer\n with pytest.raises(ValueError, match=\"n_times_to_avg must be an integer.\"):\n uv_object.downsample_in_time(n_times_to_avg=2.5)\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # make a gap in the times to check a warning about that\n inds01 = uv_object.antpair2ind(0, 1)\n initial_int_time = uv_object.integration_time[inds01[0]]\n # time array is in jd, integration time is in sec\n uv_object.time_array[inds01[-1]] += initial_int_time / (24 * 3600)\n uv_object.Ntimes += 1\n min_integration_time = 2 * np.amin(uv_object.integration_time)\n times_01 = uv_object.get_times(0, 1)\n assert np.unique(np.diff(times_01)).size > 1\n with uvtest.check_warnings(\n UserWarning,\n [\n \"There is a gap in the times of baseline\",\n \"The uvw_array does not match the expected values\",\n ],\n ):\n uv_object.downsample_in_time(min_int_time=min_integration_time)\n\n # Should have half the size of the data array and all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n assert uv_object.data_array.size * 2 == init_data_size\n\n # output data should be the average\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_int_time_mismatch_warning(hera_uvh5):\n \"\"\"Test warning in downsample_in_time about mismatch between integration\n times and the time between integrations.\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_data_size = uv_object.data_array.size\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # change the integration times to catch a warning about integration times\n # not matching the time delta between integrations\n uv_object.integration_time *= 0.5\n min_integration_time = 2 * np.amin(uv_object.integration_time)\n with uvtest.check_warnings(\n UserWarning,\n match=\"The time difference between integrations is not the same\",\n nwarnings=11,\n ):\n uv_object.downsample_in_time(min_int_time=min_integration_time)\n\n # Should have half the size of the data array and all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n assert uv_object.data_array.size * 2 == init_data_size\n\n # output data should be the average\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_varying_integration_time(hera_uvh5):\n \"\"\"Test downsample_in_time handling of file with integration time changing\n within a baseline\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # test handling (& warnings) with varying integration time in a baseline\n # First, change both integration time & time array to match\n inds01 = uv_object.antpair2ind(0, 1)\n initial_int_time = uv_object.integration_time[inds01[0]]\n # time array is in jd, integration time is in sec\n uv_object.time_array[inds01[-2]] += (initial_int_time / 2) / (24 * 3600)\n uv_object.time_array[inds01[-1]] += (3 * initial_int_time / 2) / (24 * 3600)\n uv_object.integration_time[inds01[-2:]] += initial_int_time\n uv_object.Ntimes = np.unique(uv_object.time_array).size\n min_integration_time = 2 * np.amin(uv_object.integration_time)\n # check that there are no warnings about inconsistencies between\n # integration_time & time_array\n with uvtest.check_warnings(\n UserWarning, match=\"The uvw_array does not match the expected values\",\n ):\n uv_object.downsample_in_time(min_int_time=min_integration_time)\n\n # Should have all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n\n out_wf = uv_object.get_data(0, 1)\n\n n_times_in = init_wf.shape[0]\n n_times_out = out_wf.shape[0]\n assert n_times_out == (n_times_in - 2) / 2 + 2\n\n # output data should be the average for the first set\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])\n # last 2 time samples should be identical to initial ones\n assert np.isclose(init_wf[-1, 0, 0], out_wf[-1, 0, 0])\n assert np.isclose(init_wf[-2, 0, 0], out_wf[-2, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n assert np.isclose(init_ns[-1, 0, 0], out_ns[-1, 0, 0])\n assert np.isclose(init_ns[-2, 0, 0], out_ns[2, 0, 0])\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_varying_int_time_partial_flags(hera_uvh5):\n \"\"\"Test downsample_in_time handling of file with integration time changing\n within a baseline and partial flagging.\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # downselect to 14 times and one baseline\n uv_object.select(times=np.unique(uv_object.time_array)[:14])\n\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n\n # change last 2 integrations to be twice as long\n # (so 12 normal length, 2 double length)\n # change integration time & time array to match\n inds01 = uv_object.antpair2ind(0, 1)\n initial_int_time = uv_object.integration_time[inds01[0]]\n # time array is in jd, integration time is in sec\n uv_object.time_array[inds01[-2]] += (initial_int_time / 2) / (24 * 3600)\n uv_object.time_array[inds01[-1]] += (3 * initial_int_time / 2) / (24 * 3600)\n uv_object.integration_time[inds01[-2:]] += initial_int_time\n uv_object.Ntimes = np.unique(uv_object.time_array).size\n\n # add a flag on last time\n uv_object.flag_array[inds01[-1], :, :, :] = True\n # add a flag on thrid to last time\n uv_object.flag_array[inds01[-3], :, :, :] = True\n\n uv_object2 = uv_object.copy()\n\n with uvtest.check_warnings(\n UserWarning, match=\"The uvw_array does not match the expected values\",\n ):\n uv_object.downsample_in_time(min_int_time=4 * initial_int_time)\n with uvtest.check_warnings(None):\n uv_object.downsample_in_time(min_int_time=8 * initial_int_time)\n with uvtest.check_warnings(\n UserWarning, match=\"The uvw_array does not match the expected values\",\n ):\n uv_object2.downsample_in_time(min_int_time=8 * initial_int_time)\n\n assert uv_object.history != uv_object2.history\n uv_object2.history = uv_object.history\n\n assert uv_object == uv_object2\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_downsample_in_time_varying_integration_time_warning(hera_uvh5):\n \"\"\"Test downsample_in_time handling of file with integration time changing\n within a baseline, but without adjusting the time_array so there is a mismatch.\n \"\"\"\n uv_object = hera_uvh5\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n\n # save some values for later\n init_wf = uv_object.get_data(0, 1)\n # check that there are no flags\n assert np.nonzero(uv_object.flag_array)[0].size == 0\n init_ns = uv_object.get_nsamples(0, 1)\n\n # Next, change just integration time, so time array doesn't match\n inds01 = uv_object.antpair2ind(0, 1)\n initial_int_time = uv_object.integration_time[inds01[0]]\n uv_object.integration_time[inds01[-2:]] += initial_int_time\n min_integration_time = 2 * np.amin(uv_object.integration_time)\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The time difference between integrations is different than\",\n \"The uvw_array does not match the expected values\",\n ],\n ):\n uv_object.downsample_in_time(min_int_time=min_integration_time)\n\n # Should have all the new integration time\n # (for this file with 20 integrations and a factor of 2 downsampling)\n assert np.all(np.isclose(uv_object.integration_time, min_integration_time))\n\n # output data should be the average\n out_wf = uv_object.get_data(0, 1)\n assert np.isclose((init_wf[0, 0, 0] + init_wf[1, 0, 0]) / 2.0, out_wf[0, 0, 0])\n\n # this should be true because there are no flags\n out_ns = uv_object.get_nsamples(0, 1)\n assert np.isclose((init_ns[0, 0, 0] + init_ns[1, 0, 0]) / 2.0, out_ns[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:Data will be unphased and rephased\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_downsample_in_time(hera_uvh5):\n \"\"\"Test round trip works\"\"\"\n uv_object = hera_uvh5\n\n # set uvws from antenna positions so they'll agree later.\n # the fact that this is required is a bit concerning, it means that\n # our calculated uvws from the antenna positions do not match what's in the file\n uv_object.set_uvws_from_antenna_positions()\n\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n assert np.amax(uv_object.integration_time) <= max_integration_time\n new_Nblts = uv_object.Nblts\n\n # check that calling upsample again with the same max_integration_time\n # gives warning and does nothing\n with uvtest.check_warnings(\n UserWarning, \"All values in the integration_time array are already longer\"\n ):\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n assert uv_object.Nblts == new_Nblts\n\n # check that calling upsample again with the almost the same max_integration_time\n # gives warning and does nothing\n small_number = 0.9 * uv_object._integration_time.tols[1]\n with uvtest.check_warnings(\n UserWarning, \"All values in the integration_time array are already longer\"\n ):\n uv_object.upsample_in_time(\n max_integration_time - small_number, blt_order=\"baseline\"\n )\n assert uv_object.Nblts == new_Nblts\n\n uv_object.downsample_in_time(\n min_int_time=np.amin(uv_object2.integration_time), blt_order=\"baseline\"\n )\n\n # increase tolerance on LST if iers.conf.auto_max_age is set to None, as we\n # do in testing if the iers url is down. See conftest.py for more info.\n if iers.conf.auto_max_age is None:\n uv_object._lst_array.tols = (0, 1e-4)\n\n # make sure that history is correct\n assert (\n \"Upsampled data to 0.939524 second integration time using pyuvdata.\"\n in uv_object.history\n )\n assert (\n \"Downsampled data to 1.879048 second integration time using pyuvdata.\"\n in uv_object.history\n )\n\n # overwrite history and check for equality\n uv_object.history = uv_object2.history\n assert uv_object == uv_object2\n\n # check that calling downsample again with the same min_integration_time\n # gives warning and does nothing\n with uvtest.check_warnings(\n UserWarning, match=\"All values in the integration_time array are already longer\"\n ):\n uv_object.downsample_in_time(\n min_int_time=np.amin(uv_object2.integration_time), blt_order=\"baseline\"\n )\n assert uv_object.Nblts == uv_object2.Nblts\n\n # check that calling upsample again with the almost the same min_integration_time\n # gives warning and does nothing\n with uvtest.check_warnings(\n UserWarning, match=\"All values in the integration_time array are already longer\"\n ):\n uv_object.upsample_in_time(\n np.amin(uv_object2.integration_time) + small_number, blt_order=\"baseline\"\n )\n\n assert uv_object.Nblts == uv_object2.Nblts\n\n return\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:Data will be unphased and rephased\")\[email protected](\"ignore:There is a gap in the times of baseline\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_downsample_in_time_odd_resample(hera_uvh5):\n \"\"\"Test round trip works with odd resampling\"\"\"\n uv_object = hera_uvh5\n\n # set uvws from antenna positions so they'll agree later.\n # the fact that this is required is a bit concerning, it means that\n # our calculated uvws from the antenna positions do not match what's in the file\n uv_object.set_uvws_from_antenna_positions()\n\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n # try again with a resampling factor of 3 (test odd numbers)\n max_integration_time = np.amin(uv_object.integration_time) / 3.0\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n assert np.amax(uv_object.integration_time) <= max_integration_time\n\n uv_object.downsample_in_time(\n np.amin(uv_object2.integration_time), blt_order=\"baseline\"\n )\n\n # increase tolerance on LST if iers.conf.auto_max_age is set to None, as we\n # do in testing if the iers url is down. See conftest.py for more info.\n if iers.conf.auto_max_age is None:\n uv_object._lst_array.tols = (0, 1e-4)\n\n # make sure that history is correct\n assert (\n \"Upsampled data to 0.626349 second integration time using pyuvdata.\"\n in uv_object.history\n )\n assert (\n \"Downsampled data to 1.879048 second integration time using pyuvdata.\"\n in uv_object.history\n )\n\n # overwrite history and check for equality\n uv_object.history = uv_object2.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:The xyz array in ENU_from_ECEF\")\[email protected](\"ignore:The enu array in ECEF_from_ENU\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_upsample_downsample_in_time_metadata_only(hera_uvh5):\n \"\"\"Test round trip works with metadata-only objects\"\"\"\n uv_object = hera_uvh5\n\n # drop the data arrays\n uv_object.data_array = None\n uv_object.flag_array = None\n uv_object.nsample_array = None\n\n # set uvws from antenna positions so they'll agree later.\n # the fact that this is required is a bit concerning, it means that\n # our calculated uvws from the antenna positions do not match what's in the file\n uv_object.set_uvws_from_antenna_positions()\n\n uv_object.phase_to_time(Time(uv_object.time_array[0], format=\"jd\"))\n\n # reorder to make sure we get the right value later\n uv_object.reorder_blts(order=\"baseline\", minor_order=\"time\")\n uv_object2 = uv_object.copy()\n\n max_integration_time = np.amin(uv_object.integration_time) / 2.0\n uv_object.upsample_in_time(max_integration_time, blt_order=\"baseline\")\n assert np.amax(uv_object.integration_time) <= max_integration_time\n\n uv_object.downsample_in_time(\n np.amin(uv_object2.integration_time), blt_order=\"baseline\"\n )\n\n # increase tolerance on LST if iers.conf.auto_max_age is set to None, as we\n # do in testing if the iers url is down. See conftest.py for more info.\n if iers.conf.auto_max_age is None:\n uv_object._lst_array.tols = (0, 1e-4)\n\n # make sure that history is correct\n assert (\n \"Upsampled data to 0.939524 second integration time using pyuvdata.\"\n in uv_object.history\n )\n assert (\n \"Downsampled data to 1.879048 second integration time using pyuvdata.\"\n in uv_object.history\n )\n\n # overwrite history and check for equality\n uv_object.history = uv_object2.history\n assert uv_object == uv_object2\n\n\[email protected](\"ignore:Telescope mock-HERA is not in known_telescopes\")\[email protected](\"ignore:There is a gap in the times of baseline\")\ndef test_resample_in_time(bda_test_file):\n \"\"\"Test the resample_in_time method\"\"\"\n # Note this file has slight variations in the delta t between integrations\n # that causes our gap test to issue a warning, but the variations are small\n # We aren't worried about them, so we filter those warnings\n uv_object = bda_test_file\n\n # save some initial info\n # 2s integration time\n init_data_1_136 = uv_object.get_data((1, 136))\n # 4s integration time\n init_data_1_137 = uv_object.get_data((1, 137))\n # 8s integration time\n init_data_1_138 = uv_object.get_data((1, 138))\n # 16s integration time\n init_data_136_137 = uv_object.get_data((136, 137))\n\n uv_object.resample_in_time(8)\n # Should have all the target integration time\n assert np.all(np.isclose(uv_object.integration_time, 8))\n\n # 2s integration time\n out_data_1_136 = uv_object.get_data((1, 136))\n # 4s integration time\n out_data_1_137 = uv_object.get_data((1, 137))\n # 8s integration time\n out_data_1_138 = uv_object.get_data((1, 138))\n # 16s integration time\n out_data_136_137 = uv_object.get_data((136, 137))\n\n # check array sizes make sense\n assert out_data_1_136.size * 4 == init_data_1_136.size\n assert out_data_1_137.size * 2 == init_data_1_137.size\n assert out_data_1_138.size == init_data_1_138.size\n assert out_data_136_137.size / 2 == init_data_136_137.size\n\n # check some values\n assert np.isclose(np.mean(init_data_1_136[0:4, 0, 0]), out_data_1_136[0, 0, 0])\n assert np.isclose(np.mean(init_data_1_137[0:2, 0, 0]), out_data_1_137[0, 0, 0])\n assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])\n assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:Telescope mock-HERA is not in known_telescopes\")\[email protected](\"ignore:There is a gap in the times of baseline\")\ndef test_resample_in_time_downsample_only(bda_test_file):\n \"\"\"Test resample_in_time with downsampling only\"\"\"\n # Note this file has slight variations in the delta t between integrations\n # that causes our gap test to issue a warning, but the variations are small\n # We aren't worried about them, so we filter those warnings\n uv_object = bda_test_file\n\n # save some initial info\n # 2s integration time\n init_data_1_136 = uv_object.get_data((1, 136))\n # 4s integration time\n init_data_1_137 = uv_object.get_data((1, 137))\n # 8s integration time\n init_data_1_138 = uv_object.get_data((1, 138))\n # 16s integration time\n init_data_136_137 = uv_object.get_data((136, 137))\n\n # resample again, with only_downsample set\n uv_object.resample_in_time(8, only_downsample=True)\n # Should have all less than or equal to the target integration time\n assert np.all(\n np.logical_or(\n np.isclose(uv_object.integration_time, 8),\n np.isclose(uv_object.integration_time, 16),\n )\n )\n\n # 2s integration time\n out_data_1_136 = uv_object.get_data((1, 136))\n # 4s integration time\n out_data_1_137 = uv_object.get_data((1, 137))\n # 8s integration time\n out_data_1_138 = uv_object.get_data((1, 138))\n # 16s integration time\n out_data_136_137 = uv_object.get_data((136, 137))\n\n # check array sizes make sense\n assert out_data_1_136.size * 4 == init_data_1_136.size\n assert out_data_1_137.size * 2 == init_data_1_137.size\n assert out_data_1_138.size == init_data_1_138.size\n assert out_data_136_137.size == init_data_136_137.size\n\n # check some values\n assert np.isclose(np.mean(init_data_1_136[0:4, 0, 0]), out_data_1_136[0, 0, 0])\n assert np.isclose(np.mean(init_data_1_137[0:2, 0, 0]), out_data_1_137[0, 0, 0])\n assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])\n assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:Telescope mock-HERA is not in known_telescopes\")\[email protected](\"ignore:There is a gap in the times of baseline\")\ndef test_resample_in_time_only_upsample(bda_test_file):\n \"\"\"Test resample_in_time with only upsampling\"\"\"\n # Note this file has slight variations in the delta t between integrations\n # that causes our gap test to issue a warning, but the variations are small\n # We aren't worried about them, so we filter those warnings\n uv_object = bda_test_file\n\n # save some initial info\n # 2s integration time\n init_data_1_136 = uv_object.get_data((1, 136))\n # 4s integration time\n init_data_1_137 = uv_object.get_data((1, 137))\n # 8s integration time\n init_data_1_138 = uv_object.get_data((1, 138))\n # 16s integration time\n init_data_136_137 = uv_object.get_data((136, 137))\n\n # again, with only_upsample set\n uv_object.resample_in_time(8, only_upsample=True)\n # Should have all greater than or equal to the target integration time\n assert np.all(\n np.logical_or(\n np.logical_or(\n np.isclose(uv_object.integration_time, 2.0),\n np.isclose(uv_object.integration_time, 4.0),\n ),\n np.isclose(uv_object.integration_time, 8.0),\n )\n )\n\n # 2s integration time\n out_data_1_136 = uv_object.get_data((1, 136))\n # 4s integration time\n out_data_1_137 = uv_object.get_data((1, 137))\n # 8s integration time\n out_data_1_138 = uv_object.get_data((1, 138))\n # 16s integration time\n out_data_136_137 = uv_object.get_data((136, 137))\n\n # check array sizes make sense\n assert out_data_1_136.size == init_data_1_136.size\n assert out_data_1_137.size == init_data_1_137.size\n assert out_data_1_138.size == init_data_1_138.size\n assert out_data_136_137.size / 2 == init_data_136_137.size\n\n # check some values\n assert np.isclose(init_data_1_136[0, 0, 0], out_data_1_136[0, 0, 0])\n assert np.isclose(init_data_1_137[0, 0, 0], out_data_1_137[0, 0, 0])\n assert np.isclose(init_data_1_138[0, 0, 0], out_data_1_138[0, 0, 0])\n assert np.isclose(init_data_136_137[0, 0, 0], out_data_136_137[0, 0, 0])\n\n return\n\n\[email protected](\"ignore:Telescope mock-HERA is not in known_telescopes\")\[email protected](\"ignore:There is a gap in the times of baseline\")\ndef test_resample_in_time_partial_flags(bda_test_file):\n \"\"\"Test resample_in_time with partial flags\"\"\"\n # Note this file has slight variations in the delta t between integrations\n # that causes our gap test to issue a warning, but the variations are small\n # We aren't worried about them, so we filter those warnings\n uv = bda_test_file\n # For ease, select a single baseline\n uv.select(bls=[(1, 136)])\n # Flag one time\n uv.flag_array[0, :, :, :] = True\n uv2 = uv.copy()\n\n # Downsample in two stages\n uv.resample_in_time(4.0, only_downsample=True)\n uv.resample_in_time(8.0, only_downsample=True)\n # Downsample in a single stage\n uv2.resample_in_time(8.0, only_downsample=True)\n\n assert uv.history != uv2.history\n uv2.history = uv.history\n assert uv == uv2\n return\n\n\[email protected](\"ignore:There is a gap in the times of baseline\")\ndef test_downsample_in_time_mwa():\n \"\"\"\n Test resample in time works with numerical weirdnesses.\n\n In particular, when min_int_time is not quite an integer mulitple of\n integration_time. This text broke with a prior bug (see issue 773).\n \"\"\"\n filename = os.path.join(DATA_PATH, \"mwa_integration_time.uvh5\")\n uv = UVData()\n uv.read(filename)\n uv.phase_to_time(np.mean(uv.time_array))\n uv_object2 = uv.copy()\n\n # all data within 5 milliseconds of 2 second integrations\n assert np.allclose(uv.integration_time, 2, atol=5e-3)\n min_int_time = 4.0\n uv.resample_in_time(min_int_time, only_downsample=True, keep_ragged=False)\n\n assert np.all(uv.integration_time > (min_int_time - 5e-3))\n\n # Now do the human expected thing:\n init_data = uv_object2.get_data((61, 58))\n uv_object2.downsample_in_time(n_times_to_avg=2, keep_ragged=False)\n\n assert uv_object2.Ntimes == 5\n\n out_data = uv_object2.get_data((61, 58))\n\n assert np.isclose(np.mean(init_data[0:2, 0, 0]), out_data[0, 0, 0])\n\n\[email protected](\"ignore:There is a gap in the times of baseline\")\ndef test_resample_in_time_warning():\n filename = os.path.join(DATA_PATH, \"mwa_integration_time.uvh5\")\n uv = UVData()\n uv.read(filename)\n\n uv2 = uv.copy()\n\n with uvtest.check_warnings(\n UserWarning, match=\"No resampling will be done because target time\"\n ):\n uv.resample_in_time(3, keep_ragged=False)\n\n assert uv2 == uv\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average(uvdata_data):\n \"\"\"Test averaging in frequency.\"\"\"\n eq_coeffs = np.tile(\n np.arange(uvdata_data.uv_object.Nfreqs, dtype=np.float),\n (uvdata_data.uv_object.Nants_telescope, 1),\n )\n uvdata_data.uv_object.eq_coeffs = eq_coeffs\n uvdata_data.uv_object.check()\n\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n with uvtest.check_warnings(UserWarning, \"eq_coeffs vary by frequency\"):\n uvdata_data.uv_object.frequency_average(2),\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = uvdata_data.uv_object2.freq_array.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs / 2), 2\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n expected_coeffs = eq_coeffs.reshape(\n uvdata_data.uv_object2.Nants_telescope,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.eq_coeffs - expected_coeffs)) == 0\n\n # no flagging, so the following is true\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n # TODO: Spw axis to be collapsed in future release\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)\n assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_uneven(uvdata_data):\n \"\"\"Test averaging in frequency with a number that is not a factor of Nfreqs.\"\"\"\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n with uvtest.check_warnings(\n UserWarning,\n [\n \"Nfreqs does not divide by `n_chan_to_avg` evenly. The final 1 \"\n \"frequencies will be excluded, to control which frequencies to exclude, \"\n \"use a select to control.\",\n \"The uvw_array does not match the expected values\",\n ],\n ):\n uvdata_data.uv_object.frequency_average(7)\n\n assert uvdata_data.uv_object2.Nfreqs % 7 != 0\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs // 7)\n\n expected_freqs = uvdata_data.uv_object2.freq_array[\n :, np.arange((uvdata_data.uv_object2.Nfreqs // 7) * 7)\n ]\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = expected_freqs.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs // 7), 7\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n # no flagging, so the following is true\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n expected_data = expected_data[\n :, :, 0 : ((uvdata_data.uv_object2.Nfreqs // 7) * 7), :\n ]\n # TODO: Spw axis to be collapsed in future release\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs // 7),\n 7,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_flagging(uvdata_data):\n \"\"\"Test averaging in frequency with flagging all samples averaged.\"\"\"\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n # apply some flagging for testing\n inds01 = uvdata_data.uv_object.antpair2ind(0, 1)\n uvdata_data.uv_object.flag_array[inds01[0], :, 0:2, :] = True\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == uvdata_data.uv_object.Npols * 2\n )\n\n uvdata_data.uv_object.frequency_average(2)\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = uvdata_data.uv_object2.freq_array.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs / 2), 2\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n # TODO: Spw axis to be collapsed in future release\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n\n assert np.sum(uvdata_data.uv_object.flag_array[inds01[0], :, 0, :]) == 4\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == uvdata_data.uv_object.Npols\n )\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array[inds01[1:], :, 0, :])[0].size == 0\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_flagging_partial(uvdata_data):\n \"\"\"Test averaging in frequency with flagging only one sample averaged.\"\"\"\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n # apply some flagging for testing\n inds01 = uvdata_data.uv_object.antpair2ind(0, 1)\n uvdata_data.uv_object.flag_array[inds01[0], :, 0, :] = True\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == uvdata_data.uv_object.Npols\n )\n\n uvdata_data.uv_object.frequency_average(2)\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = uvdata_data.uv_object2.freq_array.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs / 2), 2\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n # TODO: Spw axis to be collapsed in future release\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)\n expected_data[0, :, 0, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 1, :]\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_flagging_full_and_partial(uvdata_data):\n \"\"\"\n Test averaging in frequency with flagging all of one and only one of\n another sample averaged.\n \"\"\"\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n # apply some flagging for testing\n inds01 = uvdata_data.uv_object.antpair2ind(0, 1)\n uvdata_data.uv_object.flag_array[inds01[0], :, 0:3, :] = True\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == uvdata_data.uv_object.Npols * 3\n )\n\n uvdata_data.uv_object.frequency_average(2)\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = uvdata_data.uv_object2.freq_array.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs / 2), 2\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n # TODO: Spw axis to be collapsed in future release\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)\n\n expected_data[0, :, 1, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 3, :]\n\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == uvdata_data.uv_object.Npols\n )\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_flagging_partial_twostage(uvdata_data):\n \"\"\"\n Test averaging in frequency in two stages with flagging only one sample averaged.\n \"\"\"\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n # apply some flagging for testing\n inds01 = uvdata_data.uv_object.antpair2ind(0, 1)\n uvdata_data.uv_object.flag_array[inds01[0], :, 0, :] = True\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == uvdata_data.uv_object.Npols\n )\n\n uv_object3 = uvdata_data.uv_object.copy()\n\n uvdata_data.uv_object.frequency_average(2)\n uvdata_data.uv_object.frequency_average(2)\n\n uv_object3.frequency_average(4)\n\n assert uvdata_data.uv_object == uv_object3\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_summing_corr_mode(uvdata_data):\n \"\"\"Test averaging in frequency.\"\"\"\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n uvdata_data.uv_object.frequency_average(2, summing_correlator_mode=True)\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = uvdata_data.uv_object2.freq_array.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs / 2), 2\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n # no flagging, so the following is true\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n # TODO: Spw axis to be collapsed in future release\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).sum(axis=3)\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)\n assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_propagate_flags(uvdata_data):\n \"\"\"\n Test averaging in frequency with flagging all of one and only one of\n another sample averaged, and propagating flags. Data should be identical,\n but flags should be slightly different compared to other test of the same\n name.\n \"\"\"\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n # apply some flagging for testing\n inds01 = uvdata_data.uv_object.antpair2ind(0, 1)\n uvdata_data.uv_object.flag_array[inds01[0], :, 0:3, :] = True\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == uvdata_data.uv_object.Npols * 3\n )\n\n uvdata_data.uv_object.frequency_average(2, propagate_flags=True)\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = uvdata_data.uv_object2.freq_array.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs / 2), 2\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n # TODO: Spw axis to be collapsed in future release\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)\n\n expected_data[0, :, 1, :] = uvdata_data.uv_object2.data_array[inds01[0], :, 3, :]\n\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n # Twice as many flags should exist compared to test of previous name.\n assert (\n np.nonzero(uvdata_data.uv_object.flag_array)[0].size\n == 2 * uvdata_data.uv_object.Npols\n )\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_frequency_average_nsample_precision(uvdata_data):\n \"\"\"Test averaging in frequency with a half-precision nsample_array.\"\"\"\n eq_coeffs = np.tile(\n np.arange(uvdata_data.uv_object.Nfreqs, dtype=np.float),\n (uvdata_data.uv_object.Nants_telescope, 1),\n )\n uvdata_data.uv_object.eq_coeffs = eq_coeffs\n uvdata_data.uv_object.check()\n\n # check that there's no flagging\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n # change precision of the nsample array\n uvdata_data.uv_object.nsample_array = uvdata_data.uv_object.nsample_array.astype(\n np.float16\n )\n\n with uvtest.check_warnings(UserWarning, \"eq_coeffs vary by frequency\"):\n uvdata_data.uv_object.frequency_average(2),\n\n assert uvdata_data.uv_object.Nfreqs == (uvdata_data.uv_object2.Nfreqs / 2)\n\n # TODO: Spw axis to be collapsed in future release\n expected_freqs = uvdata_data.uv_object2.freq_array.reshape(\n 1, int(uvdata_data.uv_object2.Nfreqs / 2), 2\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.freq_array - expected_freqs)) == 0\n\n expected_coeffs = eq_coeffs.reshape(\n uvdata_data.uv_object2.Nants_telescope,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n ).mean(axis=2)\n assert np.max(np.abs(uvdata_data.uv_object.eq_coeffs - expected_coeffs)) == 0\n\n # no flagging, so the following is true\n expected_data = uvdata_data.uv_object2.get_data(0, 1, squeeze=\"none\")\n # TODO: Spw axis to be collapsed in future release\n reshape_tuple = (\n expected_data.shape[0],\n 1,\n int(uvdata_data.uv_object2.Nfreqs / 2),\n 2,\n uvdata_data.uv_object2.Npols,\n )\n expected_data = expected_data.reshape(reshape_tuple).mean(axis=3)\n assert np.allclose(\n uvdata_data.uv_object.get_data(0, 1, squeeze=\"none\"), expected_data\n )\n\n assert np.nonzero(uvdata_data.uv_object.flag_array)[0].size == 0\n\n assert not isinstance(uvdata_data.uv_object.data_array, np.ma.MaskedArray)\n assert not isinstance(uvdata_data.uv_object.nsample_array, np.ma.MaskedArray)\n\n # make sure we still have a half-precision nsample_array\n assert uvdata_data.uv_object.nsample_array.dtype.type is np.float16\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_remove_eq_coeffs_divide(uvdata_data):\n \"\"\"Test using the remove_eq_coeffs method with divide convention.\"\"\"\n # give eq_coeffs to the object\n eq_coeffs = np.empty(\n (uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs),\n dtype=np.float,\n )\n for i, ant in enumerate(uvdata_data.uv_object.antenna_numbers):\n eq_coeffs[i, :] = ant + 1\n uvdata_data.uv_object.eq_coeffs = eq_coeffs\n uvdata_data.uv_object.eq_coeffs_convention = \"divide\"\n uvdata_data.uv_object.remove_eq_coeffs()\n\n # make sure the right coefficients were removed\n for key in uvdata_data.uv_object.get_antpairs():\n eq1 = key[0] + 1\n eq2 = key[1] + 1\n blt_inds = uvdata_data.uv_object.antpair2ind(key)\n norm_data = uvdata_data.uv_object.data_array[blt_inds, 0, :, :]\n unnorm_data = uvdata_data.uv_object2.data_array[blt_inds, 0, :, :]\n assert np.allclose(norm_data, unnorm_data / (eq1 * eq2))\n\n return\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_remove_eq_coeffs_multiply(uvdata_data):\n \"\"\"Test using the remove_eq_coeffs method with multiply convention.\"\"\"\n # give eq_coeffs to the object\n eq_coeffs = np.empty(\n (uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs),\n dtype=np.float,\n )\n for i, ant in enumerate(uvdata_data.uv_object.antenna_numbers):\n eq_coeffs[i, :] = ant + 1\n uvdata_data.uv_object.eq_coeffs = eq_coeffs\n uvdata_data.uv_object.eq_coeffs_convention = \"multiply\"\n uvdata_data.uv_object.remove_eq_coeffs()\n\n # make sure the right coefficients were removed\n for key in uvdata_data.uv_object.get_antpairs():\n eq1 = key[0] + 1\n eq2 = key[1] + 1\n blt_inds = uvdata_data.uv_object.antpair2ind(key)\n norm_data = uvdata_data.uv_object.data_array[blt_inds, 0, :, :]\n unnorm_data = uvdata_data.uv_object2.data_array[blt_inds, 0, :, :]\n assert np.allclose(norm_data, unnorm_data * (eq1 * eq2))\n\n return\n\n\[email protected](\"ignore:Telescope EVLA is not\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_remove_eq_coeffs_errors(uvdata_data):\n \"\"\"Test errors raised by remove_eq_coeffs method.\"\"\"\n # raise error when eq_coeffs are not defined\n with pytest.raises(ValueError) as cm:\n uvdata_data.uv_object.remove_eq_coeffs()\n assert str(cm.value).startswith(\"The eq_coeffs attribute must be defined\")\n\n # raise error when eq_coeffs are defined but not eq_coeffs_convention\n uvdata_data.uv_object.eq_coeffs = np.ones(\n (uvdata_data.uv_object.Nants_telescope, uvdata_data.uv_object.Nfreqs)\n )\n with pytest.raises(ValueError) as cm:\n uvdata_data.uv_object.remove_eq_coeffs()\n assert str(cm.value).startswith(\n \"The eq_coeffs_convention attribute must be defined\"\n )\n\n # raise error when convention is not a valid choice\n uvdata_data.uv_object.eq_coeffs_convention = \"foo\"\n with pytest.raises(ValueError) as cm:\n uvdata_data.uv_object.remove_eq_coeffs()\n assert str(cm.value).startswith(\"Got unknown convention foo. Must be one of\")\n\n return\n\n\[email protected](\n \"read_func,filelist\",\n [\n (\"read_miriad\", [os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcA\")] * 2),\n (\n \"read_mwa_corr_fits\",\n [[mwa_corr_files[0:2], [mwa_corr_files[0], mwa_corr_files[2]]]],\n ),\n (\"read_uvh5\", [os.path.join(DATA_PATH, \"zen.2458661.23480.HH.uvh5\")] * 2),\n (\n \"read_uvfits\",\n [os.path.join(DATA_PATH, \"day2_TDEM0003_10s_norx_1src_1spw.uvfits\")] * 2,\n ),\n (\n \"read_ms\",\n [\n os.path.join(DATA_PATH, \"multi_1.ms\"),\n os.path.join(DATA_PATH, \"multi_2.ms\"),\n ],\n ),\n (\n \"read_fhd\",\n [\n list(np.array(fhd_files)[[0, 1, 2, 4, 6, 7]]),\n list(np.array(fhd_files)[[0, 2, 3, 5, 6, 7]]),\n ],\n ),\n ],\n)\ndef test_multifile_read_errors(read_func, filelist):\n uv = UVData()\n with pytest.raises(ValueError) as cm:\n getattr(uv, read_func)(filelist)\n assert str(cm.value).startswith(\n \"Reading multiple files from class specific read functions is no \"\n \"longer supported.\"\n )\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_multifile_read_check(hera_uvh5, tmp_path):\n \"\"\"Test setting skip_bad_files=True when reading in files\"\"\"\n\n uvTrue = hera_uvh5\n uvh5_file = os.path.join(DATA_PATH, \"zen.2458661.23480.HH.uvh5\")\n\n # Create a test file and remove header info to 'corrupt' it\n testfile = str(tmp_path / \"zen.2458661.23480.HH.uvh5\")\n\n uvTrue.write_uvh5(testfile)\n with h5py.File(testfile, \"r+\") as h5f:\n del h5f[\"Header/ant_1_array\"]\n\n uv = UVData()\n # Test that the expected error arises\n with pytest.raises(KeyError) as cm:\n uv.read(testfile, skip_bad_files=False)\n assert \"Unable to open object (object 'ant_1_array' doesn't exist)\" in str(cm.value)\n\n # Test when the corrupted file is at the beggining, skip_bad_files=False\n fileList = [testfile, uvh5_file]\n with pytest.raises(KeyError) as cm:\n with uvtest.check_warnings(UserWarning, match=\"Failed to read\"):\n uv.read(fileList, skip_bad_files=False)\n assert \"Unable to open object (object 'ant_1_array' doesn't exist)\" in str(cm.value)\n assert uv != uvTrue\n\n # Test when the corrupted file is at the beggining, skip_bad_files=True\n fileList = [testfile, uvh5_file]\n with uvtest.check_warnings(\n UserWarning,\n match=[\n \"Failed to read\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv.read(fileList, skip_bad_files=True)\n assert uv == uvTrue\n\n # Test when the corrupted file is at the end of a list\n fileList = [uvh5_file, testfile]\n with uvtest.check_warnings(\n UserWarning,\n match=[\n \"Failed to read\",\n \"The uvw_array does not match the expected values given the antenna \"\n \"positions.\",\n ],\n ):\n uv.read(fileList, skip_bad_files=True)\n # Check that the uncorrupted file was still read in\n assert uv == uvTrue\n\n os.remove(testfile)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"err_type\", [\"KeyError\", \"ValueError\"])\ndef test_multifile_read_check_long_list(hera_uvh5, tmp_path, err_type):\n \"\"\"\n Test KeyError catching by setting skip_bad_files=True when\n reading in files for a list of length >2\n \"\"\"\n # Create mini files for testing\n uv = hera_uvh5\n\n fileList = []\n for i in range(0, 4):\n uv2 = uv.select(\n times=np.unique(uv.time_array)[i * 5 : i * 5 + 4], inplace=False\n )\n fname = str(tmp_path / f\"minifile_{i}.uvh5\")\n fileList.append(fname)\n uv2.write_uvh5(fname)\n if err_type == \"KeyError\":\n with h5py.File(fileList[-1], \"r+\") as h5f:\n del h5f[\"Header/ant_1_array\"]\n elif err_type == \"ValueError\":\n with h5py.File(fileList[-1], \"r+\") as h5f:\n h5f[\"Header/antenna_numbers\"][3] = 85\n h5f[\"Header/ant_1_array\"][2] = 1024\n\n # Test with corrupted file as last file in list, skip_bad_files=True\n uvTest = UVData()\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\"\n ]\n * 9\n + [\"Failed to read\"],\n ):\n uvTest.read(fileList[0:4], skip_bad_files=True)\n uvTrue = UVData()\n uvTrue.read(fileList[0:3], skip_bad_files=True)\n\n assert uvTest == uvTrue\n\n # Repeat above test, but with corrupted file as first file in list\n os.remove(fileList[3])\n uv2 = uv.select(times=np.unique(uv.time_array)[15:19], inplace=False)\n fname = str(tmp_path / f\"minifile_{3}.uvh5\")\n uv2.write_uvh5(fname)\n if err_type == \"KeyError\":\n with h5py.File(fileList[0], \"r+\") as h5f:\n del h5f[\"Header/ant_1_array\"]\n elif err_type == \"ValueError\":\n with h5py.File(fileList[0], \"r+\") as h5f:\n h5f[\"Header/antenna_numbers\"][3] = 85\n h5f[\"Header/ant_1_array\"][2] = 1024\n uvTest = UVData()\n with uvtest.check_warnings(\n UserWarning,\n [\"Failed to read\"]\n + [\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\"\n ]\n * 9,\n ):\n uvTest.read(fileList[0:4], skip_bad_files=True)\n uvTrue = UVData()\n uvTrue.read(fileList[1:4], skip_bad_files=True)\n\n assert uvTest == uvTrue\n\n # Test with corrupted file first in list, but with skip_bad_files=False\n uvTest = UVData()\n if err_type == \"KeyError\":\n with pytest.raises(KeyError, match=\"Unable to open object\"):\n with uvtest.check_warnings(UserWarning, match=\"Failed to read\"):\n uvTest.read(fileList[0:4], skip_bad_files=False)\n elif err_type == \"ValueError\":\n with pytest.raises(ValueError, match=\"Nants_data must be equal to\"):\n with uvtest.check_warnings(UserWarning, match=\"Failed to read\"):\n uvTest.read(fileList[0:4], skip_bad_files=False)\n uvTrue = UVData()\n uvTrue.read([fileList[1], fileList[2], fileList[3]], skip_bad_files=False)\n\n assert uvTest != uvTrue\n\n # Repeat above test, but with corrupted file in the middle of the list\n os.remove(fileList[0])\n uv2 = uv.select(times=np.unique(uv.time_array)[0:4], inplace=False)\n fname = str(tmp_path / f\"minifile_{0}.uvh5\")\n uv2.write_uvh5(fname)\n if err_type == \"KeyError\":\n with h5py.File(fileList[1], \"r+\") as h5f:\n del h5f[\"Header/ant_1_array\"]\n elif err_type == \"ValueError\":\n with h5py.File(fileList[1], \"r+\") as h5f:\n h5f[\"Header/antenna_numbers\"][3] = 85\n h5f[\"Header/ant_1_array\"][2] = 1024\n uvTest = UVData()\n with uvtest.check_warnings(\n UserWarning,\n [\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n \"Failed to read\",\n ]\n + [\n \"The uvw_array does not match the expected values given the \"\n \"antenna positions.\",\n ]\n * 8,\n ):\n uvTest.read(fileList[0:4], skip_bad_files=True)\n uvTrue = UVData()\n uvTrue.read([fileList[0], fileList[2], fileList[3]], skip_bad_files=True)\n\n assert uvTest == uvTrue\n\n # Test with corrupted file in middle of list, but with skip_bad_files=False\n uvTest = UVData()\n if err_type == \"KeyError\":\n with pytest.raises(KeyError, match=\"Unable to open object\"):\n with uvtest.check_warnings(UserWarning, match=\"Failed to read\"):\n uvTest.read(fileList[0:4], skip_bad_files=False)\n elif err_type == \"ValueError\":\n with pytest.raises(ValueError, match=\"Nants_data must be equal to\"):\n with uvtest.check_warnings(UserWarning, match=\"Failed to read\"):\n uvTest.read(fileList[0:4], skip_bad_files=False)\n uvTrue = UVData()\n uvTrue.read([fileList[0], fileList[2], fileList[3]], skip_bad_files=False)\n\n assert uvTest != uvTrue\n\n # Test case where all files in list are corrupted\n os.remove(fileList[1])\n uv2 = uv.select(times=np.unique(uv.time_array)[5:9], inplace=False)\n fname = str(tmp_path / f\"minifile_{1}.uvh5\")\n uv2.write_uvh5(fname)\n for file in fileList:\n if err_type == \"KeyError\":\n with h5py.File(file, \"r+\") as h5f:\n del h5f[\"Header/ant_1_array\"]\n elif err_type == \"ValueError\":\n with h5py.File(file, \"r+\") as h5f:\n h5f[\"Header/antenna_numbers\"][3] = 85\n h5f[\"Header/ant_1_array\"][2] = 1024\n uvTest = UVData()\n with uvtest.check_warnings(\n UserWarning,\n match=(\n \"########################################################\\n\"\n \"ALL FILES FAILED ON READ - NO READABLE FILES IN FILENAME\\n\"\n \"########################################################\"\n ),\n ):\n uvTest.read(fileList[0:4], skip_bad_files=True)\n uvTrue = UVData()\n\n assert uvTest == uvTrue\n\n os.remove(fileList[0])\n os.remove(fileList[1])\n os.remove(fileList[2])\n os.remove(fileList[3])\n\n return\n\n\ndef test_deprecation_warnings_set_phased():\n \"\"\"\n Test the deprecation warnings in set_phased et al.\n \"\"\"\n uv = UVData()\n # first call set_phased\n with uvtest.check_warnings(DeprecationWarning, match=\"`set_phased` is deprecated\"):\n uv.set_phased()\n assert uv.phase_type == \"phased\"\n assert uv._phase_center_epoch.required is True\n assert uv._phase_center_ra.required is True\n assert uv._phase_center_dec.required is True\n\n # now call set_drift\n with uvtest.check_warnings(DeprecationWarning, match=\"`set_drift` is deprecated\"):\n uv.set_drift()\n assert uv.phase_type == \"drift\"\n assert uv._phase_center_epoch.required is False\n assert uv._phase_center_ra.required is False\n assert uv._phase_center_dec.required is False\n\n # now call set_unknown_phase_type\n with uvtest.check_warnings(\n DeprecationWarning, match=\"`set_unknown_phase_type` is deprecated\"\n ):\n uv.set_unknown_phase_type()\n assert uv.phase_type == \"unknown\"\n assert uv._phase_center_epoch.required is False\n assert uv._phase_center_ra.required is False\n assert uv._phase_center_dec.required is False\n\n return\n\n\[email protected](\"ignore:Telescope EVLA is not in known_telescopes.\")\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_read_background_lsts():\n \"\"\"Test reading a file with the lst calc in the background.\"\"\"\n uvd = UVData()\n uvd2 = UVData()\n testfile = os.path.join(DATA_PATH, \"day2_TDEM0003_10s_norx_1src_1spw.uvfits\")\n uvd.read(testfile, background_lsts=False)\n uvd2.read(testfile, background_lsts=True)\n assert uvd == uvd2\n\n\ndef test_parse_ants_x_orientation_kwarg(hera_uvh5):\n uvd = hera_uvh5\n # call with x_orientation = None to make parse_ants read from the object\n ant_pair, pols = uvutils.parse_ants(uvd, \"cross\")\n ant_pair2, pols2 = uvd.parse_ants(\"cross\")\n assert np.array_equal(ant_pair, ant_pair2)\n assert np.array_equal(pols, pols2)\n", "id": "9152242", "language": "Python", "matching_score": 7.5800652503967285, "max_stars_count": 0, "path": "pyuvdata/uvdata/tests/test_uvdata.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Class for reading FHD save files.\"\"\"\nimport numpy as np\nimport warnings\nfrom scipy.io.idl import readsav\nfrom astropy import constants as const\n\nfrom .uvdata import UVData\nfrom .. import utils as uvutils\nfrom .. import telescopes as uvtel\n\n__all__ = [\"get_fhd_history\", \"FHD\"]\n\n\ndef get_fhd_history(settings_file, return_user=False):\n \"\"\"\n Small function to get the important history from an FHD settings text file.\n\n Includes information about the command line call, the user, machine name and date\n\n Parameters\n ----------\n settings_file : str\n FHD settings file name\n return_user : bool\n optionally return the username who ran FHD\n\n Returns\n -------\n history : str\n string of history extracted from the settings file\n user : str\n Only returned if return_user is True\n \"\"\"\n with open(settings_file, \"r\") as f:\n settings_lines = f.readlines()\n main_loc = None\n command_loc = None\n obs_loc = None\n user_line = None\n for ind, line in enumerate(settings_lines):\n if line.startswith(\"##MAIN\"):\n main_loc = ind\n if line.startswith(\"##COMMAND_LINE\"):\n command_loc = ind\n if line.startswith(\"##OBS\"):\n obs_loc = ind\n if line.startswith(\"User\"):\n user_line = ind\n if (\n main_loc is not None\n and command_loc is not None\n and obs_loc is not None\n and user_line is not None\n ):\n break\n\n main_lines = settings_lines[main_loc + 1 : command_loc]\n command_lines = settings_lines[command_loc + 1 : obs_loc]\n history_lines = [\"FHD history\\n\"] + main_lines + command_lines\n for ind, line in enumerate(history_lines):\n history_lines[ind] = line.rstrip().replace(\"\\t\", \" \")\n history = \"\\n\".join(history_lines)\n user = settings_lines[user_line].split()[1]\n\n if return_user:\n return history, user\n else:\n return history\n\n\nclass FHD(UVData):\n \"\"\"\n Defines a FHD-specific subclass of UVData for reading FHD save files.\n\n This class should not be interacted with directly, instead use the read_fhd\n method on the UVData class.\n \"\"\"\n\n def _latlonalt_close(self, latlonalt1, latlonalt2):\n radian_tols = self._phase_center_ra.tols\n loc_tols = self._telescope_location.tols\n latlon_close = np.allclose(\n np.array(latlonalt1[0:2]),\n np.array(latlonalt2[0:2]),\n rtol=radian_tols[0],\n atol=radian_tols[1],\n )\n alt_close = np.isclose(\n latlonalt1[2], latlonalt2[2], rtol=loc_tols[0], atol=loc_tols[1]\n )\n if latlon_close and alt_close:\n return True\n else:\n return False\n\n def _xyz_close(self, xyz1, xyz2):\n loc_tols = self._telescope_location.tols\n return np.allclose(xyz1, xyz2, rtol=loc_tols[0], atol=loc_tols[1])\n\n def read_fhd(\n self,\n filelist,\n use_model=False,\n background_lsts=True,\n read_data=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n strict_uvw_antpos_check=False,\n ):\n \"\"\"\n Read in data from a list of FHD files.\n\n Parameters\n ----------\n filelist : array_like of str\n The list/array of FHD save files to read from. Must include at\n least one polarization file, a params file and a flag file. An obs\n file is also required if `read_data` is False.\n use_model : bool\n Option to read in the model visibilities rather than the dirty\n visibilities (the default is False, meaning the dirty visibilities\n will be read).\n background_lsts : bool\n When set to True, the lst_array is calculated in a background thread.\n read_data : bool\n Read in the visibility, nsample and flag data. If set to False, only\n the metadata will be read in. Setting read_data to False results in\n a metadata only object. If read_data is False, an obs file must be\n included in the filelist. Note that if read_data is False, Npols is\n derived from the obs file and reflects the number of polarizations\n used in the FHD run. If read_data is True, Npols is given by the\n number of visibility data files provided in `filelist`.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after after reading in the file (the default is True,\n meaning the check will be run).\n check_extra : bool\n Option to check optional parameters as well as required ones (the\n default is True, meaning the optional parameters will be checked).\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n reading in the file (the default is True, meaning the acceptable\n range check will be done).\n strict_uvw_antpos_check : bool\n Option to raise an error rather than a warning if the check that\n uvws match antenna positions does not pass.\n\n Raises\n ------\n IOError\n If root file directory doesn't exist.\n ValueError\n If required files are missing or multiple files for any polarization\n are included in filelist.\n If there is no recognized key for visibility weights in the flags_file.\n\n \"\"\"\n datafiles = {}\n params_file = None\n obs_file = None\n flags_file = None\n layout_file = None\n settings_file = None\n if use_model:\n data_name = \"_vis_model_\"\n else:\n data_name = \"_vis_\"\n for file in filelist:\n if file.lower().endswith(data_name + \"xx.sav\"):\n if \"xx\" in list(datafiles.keys()):\n raise ValueError(\"multiple xx datafiles in filelist\")\n datafiles[\"xx\"] = file\n elif file.lower().endswith(data_name + \"yy.sav\"):\n if \"yy\" in list(datafiles.keys()):\n raise ValueError(\"multiple yy datafiles in filelist\")\n datafiles[\"yy\"] = file\n elif file.lower().endswith(data_name + \"xy.sav\"):\n if \"xy\" in list(datafiles.keys()):\n raise ValueError(\"multiple xy datafiles in filelist\")\n datafiles[\"xy\"] = file\n elif file.lower().endswith(data_name + \"yx.sav\"):\n if \"yx\" in list(datafiles.keys()):\n raise ValueError(\"multiple yx datafiles in filelist\")\n datafiles[\"yx\"] = file\n elif file.lower().endswith(\"_params.sav\"):\n if params_file is not None:\n raise ValueError(\"multiple params files in filelist\")\n params_file = file\n elif file.lower().endswith(\"_obs.sav\"):\n if obs_file is not None:\n raise ValueError(\"multiple obs files in filelist\")\n obs_file = file\n elif file.lower().endswith(\"_flags.sav\"):\n if flags_file is not None:\n raise ValueError(\"multiple flags files in filelist\")\n flags_file = file\n elif file.lower().endswith(\"_layout.sav\"):\n if layout_file is not None:\n raise ValueError(\"multiple layout files in filelist\")\n layout_file = file\n elif file.lower().endswith(\"_settings.txt\"):\n if settings_file is not None:\n raise ValueError(\"multiple settings files in filelist\")\n settings_file = file\n else:\n # this is reached in tests but marked as uncovered because\n # CPython's peephole optimizer replaces a jump to a continue\n # with a jump to the top of the loop\n continue # pragma: no cover\n\n if len(datafiles) < 1 and read_data is True:\n raise ValueError(\n \"No data files included in file list and read_data is True.\"\n )\n if obs_file is None and read_data is False:\n raise ValueError(\n \"No obs file included in file list and read_data is False.\"\n )\n if params_file is None:\n raise ValueError(\"No params file included in file list\")\n if flags_file is None:\n raise ValueError(\"No flags file included in file list\")\n if layout_file is None:\n warnings.warn(\n \"No layout file included in file list, \"\n \"antenna_postions will not be defined.\"\n )\n if settings_file is None:\n warnings.warn(\"No settings file included in file list\")\n\n if not read_data:\n obs_dict = readsav(obs_file, python_dict=True)\n this_obs = obs_dict[\"obs\"]\n self.Npols = int(this_obs[0][\"N_POL\"])\n else:\n # TODO: add checking to make sure params, flags and datafiles are\n # consistent with each other\n vis_data = {}\n for pol, file in datafiles.items():\n this_dict = readsav(file, python_dict=True)\n if use_model:\n vis_data[pol] = this_dict[\"vis_model_ptr\"]\n else:\n vis_data[pol] = this_dict[\"vis_ptr\"]\n this_obs = this_dict[\"obs\"]\n self.Npols = len(list(vis_data.keys()))\n\n obs = this_obs\n bl_info = obs[\"BASELINE_INFO\"][0]\n astrometry = obs[\"ASTR\"][0]\n fhd_pol_list = []\n for pol in obs[\"POL_NAMES\"][0]:\n fhd_pol_list.append(pol.decode(\"utf8\").lower())\n\n params_dict = readsav(params_file, python_dict=True)\n params = params_dict[\"params\"]\n\n if read_data:\n flag_file_dict = readsav(flags_file, python_dict=True)\n # The name for this variable changed recently (July 2016). Test for both.\n vis_weights_data = {}\n if \"flag_arr\" in flag_file_dict:\n weights_key = \"flag_arr\"\n elif \"vis_weights\" in flag_file_dict:\n weights_key = \"vis_weights\"\n else:\n raise ValueError(\n \"No recognized key for visibility weights in flags_file.\"\n )\n for index, w in enumerate(flag_file_dict[weights_key]):\n vis_weights_data[fhd_pol_list[index]] = w\n\n self.Ntimes = int(obs[\"N_TIME\"][0])\n self.Nbls = int(obs[\"NBASELINES\"][0])\n self.Nblts = params[\"UU\"][0].size\n self.Nfreqs = int(obs[\"N_FREQ\"][0])\n self.Nspws = 1\n self.spw_array = np.array([0])\n self.vis_units = \"JY\"\n\n # bl_info.JDATE (a vector of length Ntimes) is the only safe date/time\n # to use in FHD files.\n # (obs.JD0 (float) and params.TIME (vector of length Nblts) are\n # context dependent and are not safe\n # because they depend on the phasing of the visibilities)\n # the values in bl_info.JDATE are the JD for each integration.\n # We need to expand up to Nblts.\n int_times = list(uvutils._get_iterable(bl_info[\"JDATE\"][0]))\n bin_offset = bl_info[\"BIN_OFFSET\"][0]\n if self.Ntimes != len(int_times):\n warnings.warn(\n \"Ntimes does not match the number of unique times in the data\"\n )\n self.time_array = np.zeros(self.Nblts)\n if self.Ntimes == 1:\n self.time_array.fill(int_times[0])\n else:\n for ii in range(0, len(int_times)):\n if ii < (len(int_times) - 1):\n self.time_array[bin_offset[ii] : bin_offset[ii + 1]] = int_times[ii]\n else:\n self.time_array[bin_offset[ii] :] = int_times[ii]\n\n # this is generated in FHD by subtracting the JD of neighboring\n # integrations. This can have limited accuracy, so it can be slightly\n # off the actual value.\n # (e.g. 1.999426... rather than 2)\n time_res = obs[\"TIME_RES\"]\n # time_res is constrained to be a scalar currently\n self.integration_time = (\n np.ones_like(self.time_array, dtype=np.float64) * time_res[0]\n )\n # # --- observation information ---\n self.telescope_name = obs[\"INSTRUMENT\"][0].decode(\"utf8\")\n\n # This is a bit of a kludge because nothing like object_name exists\n # in FHD files.\n # At least for the MWA, obs.ORIG_PHASERA and obs.ORIG_PHASEDEC specify\n # the field the telescope was nominally pointing at\n # (May need to be revisited, but probably isn't too important)\n self.object_name = (\n \"Field RA(deg): \"\n + str(obs[\"ORIG_PHASERA\"][0])\n + \", Dec:\"\n + str(obs[\"ORIG_PHASEDEC\"][0])\n )\n # For the MWA, this can sometimes be converted to EoR fields\n if self.telescope_name.lower() == \"mwa\":\n if np.isclose(obs[\"ORIG_PHASERA\"][0], 0) and np.isclose(\n obs[\"ORIG_PHASEDEC\"][0], -27\n ):\n self.object_name = \"EoR 0 Field\"\n\n self.instrument = self.telescope_name\n latitude = np.deg2rad(float(obs[\"LAT\"][0]))\n longitude = np.deg2rad(float(obs[\"LON\"][0]))\n altitude = float(obs[\"ALT\"][0])\n\n # get the stuff FHD read from the antenna table (in layout file)\n if layout_file is not None:\n layout_dict = readsav(layout_file, python_dict=True)\n layout = layout_dict[\"layout\"]\n\n layout_fields = [name.lower() for name in layout.dtype.names]\n # Try to get the telescope location from the layout file &\n # compare it to the position from the obs structure.\n arr_center = layout[\"array_center\"][0]\n layout_fields.remove(\"array_center\")\n\n xyz_telescope_frame = layout[\"coordinate_frame\"][0].decode(\"utf8\").lower()\n layout_fields.remove(\"coordinate_frame\")\n\n if xyz_telescope_frame == \"itrf\":\n # compare to lat/lon/alt\n location_latlonalt = uvutils.XYZ_from_LatLonAlt(\n latitude, longitude, altitude\n )\n latlonalt_arr_center = uvutils.LatLonAlt_from_XYZ(\n arr_center, check_acceptability=run_check_acceptability\n )\n\n # check both lat/lon/alt and xyz because of subtle differences\n # in tolerances\n if self._xyz_close(\n location_latlonalt, arr_center\n ) or self._latlonalt_close(\n (latitude, longitude, altitude), latlonalt_arr_center\n ):\n self.telescope_location = arr_center\n else:\n # values do not agree with each other to within the tolerances.\n # this is a known issue with FHD runs on cotter uvfits\n # files for the MWA\n # compare with the known_telescopes values\n telescope_obj = uvtel.get_telescope(self.telescope_name)\n # start warning message\n message = (\n \"Telescope location derived from obs lat/lon/alt \"\n \"values does not match the location in the layout file.\"\n )\n\n if telescope_obj is not False:\n if self._latlonalt_close(\n (latitude, longitude, altitude),\n telescope_obj.telescope_location_lat_lon_alt,\n ):\n # obs lat/lon/alt matches known_telescopes\n message += (\n \" Value from obs lat/lon/alt matches the \"\n \"known_telescopes values, using them.\"\n )\n self.telescope_location = location_latlonalt\n elif self._xyz_close(\n arr_center, telescope_obj.telescope_location\n ):\n # layout xyz matches known_telescopes\n message += (\n \" Value from the layout file matches the \"\n \"known_telescopes values, using them.\"\n )\n self.telescope_location = arr_center\n else:\n # None of the values match each other. Defaulting\n # to known_telescopes value.\n message += (\n \" Neither location matches the values \"\n \"in known_telescopes. Defaulting to \"\n \"using the known_telescopes values.\"\n )\n self.telescope_location = telescope_obj.telescope_location\n else:\n message += (\n \" Telescope is not in known_telescopes. \"\n \"Defaulting to using the obs derived values.\"\n )\n self.telescope_location = location_latlonalt\n # issue warning\n warnings.warn(message)\n else:\n self.telescope_location_lat_lon_alt = (latitude, longitude, altitude)\n\n self.antenna_positions = layout[\"antenna_coords\"][0]\n layout_fields.remove(\"antenna_coords\")\n\n self.antenna_names = [\n ant.decode(\"utf8\").strip()\n for ant in layout[\"antenna_names\"][0].tolist()\n ]\n layout_fields.remove(\"antenna_names\")\n\n # make these 0-indexed (rather than one indexed)\n self.antenna_numbers = layout[\"antenna_numbers\"][0] - 1\n layout_fields.remove(\"antenna_numbers\")\n\n self.Nants_telescope = int(layout[\"n_antenna\"][0])\n layout_fields.remove(\"n_antenna\")\n\n if self.telescope_name.lower() == \"mwa\":\n # check that obs.baseline_info.tile_names match the antenna names\n # this only applies for MWA because the tile_names come from\n # metafits files\n obs_tile_names = [\n ant.decode(\"utf8\").strip()\n for ant in bl_info[\"TILE_NAMES\"][0].tolist()\n ]\n obs_tile_names = [\n \"Tile\" + \"0\" * (3 - len(ant)) + ant for ant in obs_tile_names\n ]\n # tile_names are assumed to be ordered: so their index gives\n # the antenna number\n # make an comparison array from self.antenna_names ordered this way.\n ant_names = np.zeros((np.max(self.antenna_numbers) + 1), str).tolist()\n for index, number in enumerate(self.antenna_numbers):\n ant_names[number] = self.antenna_names[index]\n if obs_tile_names != ant_names:\n warnings.warn(\n \"tile_names from obs structure does not match \"\n \"antenna_names from layout\"\n )\n\n self.gst0 = float(layout[\"gst0\"][0])\n layout_fields.remove(\"gst0\")\n\n if layout[\"ref_date\"][0] != \"\":\n self.rdate = layout[\"ref_date\"][0].decode(\"utf8\").lower()\n layout_fields.remove(\"ref_date\")\n\n self.earth_omega = float(layout[\"earth_degpd\"][0])\n layout_fields.remove(\"earth_degpd\")\n\n self.dut1 = float(layout[\"dut1\"][0])\n layout_fields.remove(\"dut1\")\n\n self.timesys = layout[\"time_system\"][0].decode(\"utf8\").upper().strip()\n layout_fields.remove(\"time_system\")\n\n if \"diameters\" in layout_fields:\n self.timesys = layout[\"time_system\"][0].decode(\"utf8\").upper().strip()\n layout_fields.remove(\"diameters\")\n\n # ignore some fields, put everything else in extra_keywords\n layout_fields_ignore = [\n \"diff_utc\",\n \"pol_type\",\n \"n_pol_cal_params\",\n \"mount_type\",\n \"axis_offset\",\n \"pola\",\n \"pola_orientation\",\n \"pola_cal_params\",\n \"polb\",\n \"polb_orientation\",\n \"polb_cal_params\",\n \"beam_fwhm\",\n ]\n for field in layout_fields_ignore:\n if field in layout_fields:\n layout_fields.remove(field)\n for field in layout_fields:\n keyword = field\n if len(keyword) > 8:\n keyword = field.replace(\"_\", \"\")\n\n value = layout[field][0]\n if isinstance(value, bytes):\n value = value.decode(\"utf8\")\n\n self.extra_keywords[keyword.upper()] = value\n else:\n self.telescope_location_lat_lon_alt = (latitude, longitude, altitude)\n self.antenna_names = [\n ant.decode(\"utf8\").strip() for ant in bl_info[\"TILE_NAMES\"][0].tolist()\n ]\n if self.telescope_name.lower() == \"mwa\":\n self.antenna_names = [\n \"Tile\" + \"0\" * (3 - len(ant)) + ant for ant in self.antenna_names\n ]\n self.Nants_telescope = len(self.antenna_names)\n self.antenna_numbers = np.arange(self.Nants_telescope)\n\n try:\n self.set_telescope_params()\n except ValueError as ve:\n warnings.warn(str(ve))\n\n # need to make sure telescope location is defined properly before this call\n proc = self.set_lsts_from_time_array(background=background_lsts)\n\n if not np.isclose(obs[\"OBSRA\"][0], obs[\"PHASERA\"][0]) or not np.isclose(\n obs[\"OBSDEC\"][0], obs[\"PHASEDEC\"][0]\n ):\n warnings.warn(\n \"These visibilities may have been phased \"\n \"improperly -- without changing the uvw locations\"\n )\n\n self._set_phased()\n self.phase_center_ra_degrees = np.float(obs[\"OBSRA\"][0])\n self.phase_center_dec_degrees = np.float(obs[\"OBSDEC\"][0])\n\n self.phase_center_epoch = astrometry[\"EQUINOX\"][0]\n\n # Note that FHD antenna arrays are 1-indexed so we subtract 1\n # to get 0-indexed arrays\n self.ant_1_array = bl_info[\"TILE_A\"][0] - 1\n self.ant_2_array = bl_info[\"TILE_B\"][0] - 1\n\n self.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)\n\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array\n )\n if self.Nbls != len(np.unique(self.baseline_array)):\n warnings.warn(\n \"Nbls does not match the number of unique baselines in the data\"\n )\n\n # TODO: Spw axis to be collapsed in future release\n self.freq_array = np.zeros((1, len(bl_info[\"FREQ\"][0])), dtype=np.float_)\n self.freq_array[0, :] = bl_info[\"FREQ\"][0]\n\n self.channel_width = float(obs[\"FREQ_RES\"][0])\n\n # In FHD, uvws are in seconds not meters.\n # FHD follows the FITS uvw direction convention, which is opposite\n # ours and Miriad's.\n # So conjugate the visibilities and flip the uvws:\n self.uvw_array = np.zeros((self.Nblts, 3))\n self.uvw_array[:, 0] = (-1) * params[\"UU\"][0] * const.c.to(\"m/s\").value\n self.uvw_array[:, 1] = (-1) * params[\"VV\"][0] * const.c.to(\"m/s\").value\n self.uvw_array[:, 2] = (-1) * params[\"WW\"][0] * const.c.to(\"m/s\").value\n\n lin_pol_order = [\"xx\", \"yy\", \"xy\", \"yx\"]\n linear_pol_dict = dict(zip(lin_pol_order, np.arange(5, 9) * -1))\n pol_list = []\n if read_data:\n for pol in lin_pol_order:\n if pol in vis_data:\n pol_list.append(linear_pol_dict[pol])\n self.polarization_array = np.asarray(pol_list)\n else:\n # Use Npols because for FHD, npol fully specifies which pols to use\n pol_strings = lin_pol_order[: self.Npols]\n self.polarization_array = np.asarray(\n [linear_pol_dict[pol] for pol in pol_strings]\n )\n\n # history: add the first few lines from the settings file\n if settings_file is not None:\n self.history = get_fhd_history(settings_file)\n else:\n self.history = \"\"\n\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n if read_data:\n # TODO: Spw axis to be collapsed in future release\n self.data_array = np.zeros(\n (self.Nblts, 1, self.Nfreqs, self.Npols), dtype=np.complex_\n )\n # TODO: Spw axis to be collapsed in future release\n self.nsample_array = np.zeros(\n (self.Nblts, 1, self.Nfreqs, self.Npols), dtype=np.float_\n )\n # TODO: Spw axis to be collapsed in future release\n self.flag_array = np.zeros(\n (self.Nblts, 1, self.Nfreqs, self.Npols), dtype=np.bool_\n )\n for pol, vis in vis_data.items():\n pol_i = pol_list.index(linear_pol_dict[pol])\n # FHD follows the FITS uvw direction convention, which is opposite\n # ours and Miriad's.\n # So conjugate the visibilities and flip the uvws:\n self.data_array[:, 0, :, pol_i] = np.conj(vis)\n self.flag_array[:, 0, :, pol_i] = vis_weights_data[pol] <= 0\n self.nsample_array[:, 0, :, pol_i] = np.abs(vis_weights_data[pol])\n\n # wait for LSTs if set in background\n if proc is not None:\n proc.join()\n\n # check if object has all required uv_properties set\n if run_check:\n self.check(\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n strict_uvw_antpos_check=strict_uvw_antpos_check,\n )\n", "id": "6179709", "language": "Python", "matching_score": 3.1422646045684814, "max_stars_count": 0, "path": "pyuvdata/uvdata/fhd.py" }, { "content": "\"\"\"DSAMFS/IO.PY\n\nRoutines to read and manipulate the correlator-data psrdada buffer stream and\nwrite the correlated data to a uvh5 file.\n\n<NAME>, <EMAIL>, 2020\n\"\"\"\n\nfrom datetime import datetime\nimport os\nimport traceback\nimport socket\nimport numpy as np\nimport h5py\nimport astropy.units as u\nfrom psrdada.exceptions import PSRDadaError\nfrom antpos.utils import get_itrf\nimport dsautils.dsa_syslog as dsl\nimport dsautils.dsa_store as ds\nimport dsacalib.constants as ct\nimport dsamfs.utils as pu\nfrom dsamfs.fringestopping import fringestop_on_zenith\n\netcd = ds.DsaStore()\n\nlogger = dsl.DsaSyslogger()\nlogger.subsystem(\"software\")\nlogger.app(\"dsamfs\")\n\ndef initialize_uvh5_file(fhdf, nfreq, npol, pt_dec, antenna_order, fobs,\n fs_table=None):\n \"\"\"Initializes an HDF5 file according to the UVH5 specification.\n\n For details on the specification of the UVH5 file format, see the pyuvdata\n memo \"UVH5 file format\" from November 28, 2018.\n\n Parameters\n ----------\n fhdf5 : file handler\n The hdf5 file to initialize.\n nbls : int\n The number of baselines in the correlated data.\n nfreq : int\n The number of frequency channels in the correlated data.\n npol : int\n The number of polarizations in the correlated data.\n pt_dec : float\n The declination at which the visbilities are phased, in radians.\n antenna_order : array\n The order of the antennas. The antennas should be specified as\n integers between 1 and 117 inclusive. (E.g. DSA-24 should be\n identified as 24.)\n fs_table : str\n The full path to the table used in fringestopping. Defaults None.\n \"\"\"\n # also need the itrf coordinates of the antennas\n df = get_itrf(\n latlon_center=(ct.OVRO_LAT*u.rad, ct.OVRO_LON*u.rad, ct.OVRO_ALT*u.m)\n )\n ant_itrf = np.array([df['dx_m'], df['dy_m'], df['dz_m']]).T\n nants_telescope = max(df.index)\n # have to have some way of calculating the ant_1_array and\n # ant_2_array order and uvw array. The uvw array should be constant but\n # still has to have dimensions (nblts, 3)\n\n # Header parameters\n header = fhdf.create_group(\"Header\")\n data = fhdf.create_group(\"Data\")\n # The following must be defined\n header[\"latitude\"] = (ct.OVRO_LAT*u.rad).to_value(u.deg)\n header[\"longitude\"] = (ct.OVRO_LON*u.rad).to_value(u.deg)\n header[\"altitude\"] = ct.OVRO_ALT\n header[\"telescope_name\"] = np.string_(\"OVRO_MMA\")\n header[\"instrument\"] = np.string_(\"DSA\")\n header[\"object_name\"] = np.string_(\"search\")\n header[\"history\"] = np.string_(\"written by dsa110-meridian-fringestopping \"\n \"on {0}\".format(datetime.now().strftime(\n '%Y-%m-%dT%H:%M:%S')))\n header[\"phase_type\"] = np.string_(\"drift\")\n header[\"Nants_data\"] = len(antenna_order)\n header[\"Nants_telescope\"] = nants_telescope\n header[\"antenna_diameters\"] = np.ones(nants_telescope)*4.65\n # ant_1_array and ant_2_array have ot be updated\n header.create_dataset(\n \"ant_1_array\", (0, ), maxshape=(None, ), dtype=np.int,\n chunks=True, data=None)\n header.create_dataset(\n \"ant_2_array\", (0, ), maxshape=(None, ), dtype=np.int,\n chunks=True, data=None)\n antenna_names = np.array(['{0}'.format(ant_no+1) for ant_no in\n range(nants_telescope)], dtype=\"S4\")\n header.create_dataset(\"antenna_names\", (nants_telescope, ), dtype=\"S4\",\n data=antenna_names)\n header[\"antenna_numbers\"] = np.arange(nants_telescope)\n header[\"Nbls\"] = ((header[\"Nants_data\"][()]+1)*\n header[\"Nants_data\"][()])//2\n header[\"Nblts\"] = 0\n header[\"Nfreqs\"] = nfreq\n header[\"Npols\"] = npol\n header[\"Ntimes\"] = 0\n header[\"Nspws\"] = 1\n header.create_dataset(\n \"uvw_array\", (0, 3), maxshape=(None, 3), dtype=np.float32,\n chunks=True, data=None)\n header.create_dataset(\n \"time_array\", (0, ), maxshape=(None, ), dtype=np.float64,\n chunks=True, data=None)\n header.create_dataset(\n \"integration_time\", (0, ), maxshape=(None, ), dtype=np.float64,\n chunks=True, data=None)\n header[\"freq_array\"] = fobs[np.newaxis, :]*1e9\n header[\"channel_width\"] = np.abs(np.median(np.diff(fobs))*1e9)\n header[\"spw_array\"] = np.array([1])\n # Polarization array is defined at the top of page 8 of\n # AIPS memo 117:\n # Values of 1 through 4 are assiged to Stokes I, Q, U, V\n # Values of -5 through -8 to XX, YY, XY, YX\n header[\"polarization_array\"] = np.array([-5, -6])\n header[\"antenna_positions\"] = ant_itrf\n\n # Optional parameters\n extra = header.create_group(\"extra_keywords\")\n extra[\"phase_center_dec\"] = pt_dec\n extra[\"ha_phase_center\"] = 0.\n extra[\"phase_center_epoch\"] = 2000\n if fs_table is not None:\n extra[\"fs_table\"] = np.string_(fs_table)\n snapdelays = pu.get_delays(np.array(antenna_order), nants_telescope)\n extra[\"applied_delays_ns\"] = np.string_(\n ' '.join([str(d) for d in snapdelays.flatten()])\n )\n # Data sets\n data.create_dataset(\n \"visdata\", (0, 1, nfreq, npol), maxshape=(None, 1, nfreq, npol),\n dtype=np.complex64, chunks=True, data=None)\n data.create_dataset(\n \"flags\", (0, 1, nfreq, npol), maxshape=(None, 1, nfreq, npol),\n dtype=np.bool, chunks=True, data=None)\n # likely set flags_dataset all to 1?\n data.create_dataset(\n \"nsamples\", (0, 1, nfreq, npol), maxshape=(None, 1, nfreq, npol),\n dtype=np.float32)\n # nsamples tells us how many samples went into each integration\n\ndef update_uvh5_file(fhdf5, data, t, tsamp, bname, uvw, nsamples):\n \"\"\"Appends new data to the uvh5 file.\n\n Currently assumes phasing at the meridian. To account for tracking, need to\n update to allow the passed uvw to also have time axis.\n\n Parameters\n ----------\n fhdf5 : file handler\n The open and initialized hdf5 file.\n data : ndarray\n The data to append to the file. Dimensions (time, baseline, channel,\n polarization).\n t : array\n The central time of each timebin in `data`, in MJD.\n tsamp : float\n The sampling time of the data before integration.\n bname : list(str)\n The name of each baseline.\n uvw : ndarray\n The UVW coordinates at the phase center. Dimensions (nbls, 3).\n nsamples : ndarray\n The number of samples (unflagged) samples that have been integrated for\n each bin of `data`. Same dimensions as `data`.\n \"\"\"\n (nt, nbls, nchan, npol) = data.shape\n assert t.shape[0] == nt\n assert data.shape == nsamples.shape\n assert uvw.shape[1] == nbls\n assert uvw.shape[2] == 3\n\n antenna_order = fhdf5[\"Header\"][\"antenna_names\"][:]\n ant_1_array = np.array(\n [np.where(antenna_order == np.string_(bn.split('-')[0]))\n for bn in bname], dtype=np.int\n ).squeeze()\n ant_2_array = np.array(\n [np.where(antenna_order == np.string_(bn.split('-')[1]))\n for bn in bname], dtype=np.int\n ).squeeze()\n\n old_size = fhdf5[\"Header\"][\"time_array\"].shape[0]\n new_size = old_size+nt*nbls\n\n # TIME_ARRAY\n fhdf5[\"Header\"][\"time_array\"].resize(new_size, axis=0)\n fhdf5[\"Header\"][\"time_array\"][old_size:] = np.tile(\n t[:, np.newaxis],\n (1, nbls)\n ).flatten()\n\n # INTEGRATION_TIME\n fhdf5[\"Header\"][\"integration_time\"].resize(new_size, axis=0)\n fhdf5[\"Header\"][\"integration_time\"][old_size:] = np.ones(\n (nt*nbls, ),\n dtype=np.float32\n )*tsamp\n\n # UVW_ARRAY\n # Note that the uvw and baseline convention for pyuvdata is B-A,\n # where vis=A^* B\n fhdf5[\"Header\"][\"uvw_array\"].resize(new_size, axis=0)\n if uvw.shape[0] == 1:\n fhdf5[\"Header\"][\"uvw_array\"][old_size:, :] = np.tile(\n uvw,\n (nt, 1, 1)\n ).reshape(-1, 3)\n else:\n assert uvw.shape[0] == nt\n fhdf5[\"Header\"][\"uvw_array\"][old_size:, :] = uvw.reshape(-1, 3)\n\n # Ntimes and Nblts\n fhdf5[\"Header\"][\"Ntimes\"][()] = new_size//nbls\n fhdf5[\"Header\"][\"Nblts\"][()] = new_size\n\n # ANT_1_ARRAY\n fhdf5[\"Header\"][\"ant_1_array\"].resize(new_size, axis=0)\n fhdf5[\"Header\"][\"ant_1_array\"][old_size:] = np.tile(\n ant_1_array[np.newaxis, :],\n (nt, 1)\n ).flatten()\n\n # ANT_2_ARRAY\n fhdf5[\"Header\"][\"ant_2_array\"].resize(new_size, axis=0)\n fhdf5[\"Header\"][\"ant_2_array\"][old_size:] = np.tile(\n ant_2_array[np.newaxis, :],\n (nt, 1)\n ).flatten()\n\n # VISDATA\n fhdf5[\"Data\"][\"visdata\"].resize(new_size, axis=0)\n fhdf5[\"Data\"][\"visdata\"][old_size:, ...] = data.reshape(\n nt*nbls, 1, nchan, npol)\n\n # FLAGS\n fhdf5[\"Data\"][\"flags\"].resize(new_size, axis=0)\n fhdf5[\"Data\"][\"flags\"][old_size:, ...] = np.zeros(\n (nt*nbls, 1, nchan, npol), dtype=np.bool)\n\n # NSAMPLES\n fhdf5[\"Data\"][\"nsamples\"].resize(new_size, axis=0)\n fhdf5[\"Data\"][\"nsamples\"][old_size:, ...] = nsamples.reshape(\n nt*nbls, 1, nchan, npol)\n\ndef dada_to_uvh5(reader, outdir, nbls, nchan, npol, nint, nfreq_int,\n samples_per_frame_out, sample_rate_out, pt_dec, antenna_order,\n fs_table, tsamp, bname, uvw, fobs,\n vis_model, test, nmins):\n \"\"\"\n Reads dada buffer and writes to uvh5 file.\n \"\"\"\n if nfreq_int > 1:\n assert nchan%nfreq_int == 0, (\"Number of channels must be an integer \"\n \"number of output channels.\")\n fobs = np.median(fobs.reshape(-1, nfreq_int), axis=1)\n nchan = len(fobs)\n\n nans = False\n idx_frame_out = 0 # total number of fsed frames, for timekeeping\n max_frames_per_file = int(np.ceil(nmins*60*sample_rate_out))\n hostname = socket.gethostname()\n while not nans:\n now = datetime.utcnow()\n fout = now.strftime(\"%Y-%m-%dT%H:%M:%S\")\n if outdir is not None:\n fout = '{0}/{1}'.format(outdir, fout)\n print('Opening output file {0}.hdf5'.format(fout))\n with h5py.File('{0}_incomplete.hdf5'.format(fout), 'w') as fhdf5:\n initialize_uvh5_file(fhdf5, nchan, npol, pt_dec, antenna_order,\n fobs, fs_table)\n\n idx_frame_file = 0 # number of fsed frames write to curent file\n while (idx_frame_file < max_frames_per_file) and (not nans):\n data_in = np.ones(\n (samples_per_frame_out*nint, nbls, nchan*nfreq_int, npol),\n dtype=np.complex64)*np.nan\n for i in range(data_in.shape[0]):\n try:\n assert reader.isConnected\n data_in[i, ...] = pu.read_buffer(\n reader, nbls, nchan*nfreq_int, npol)\n except (AssertionError, ValueError, PSRDadaError) as e:\n print('Last integration has {0} timesamples'.format(i))\n logger.info('Disconnected from buffer with message'\n '{0}:\\n{1}'.\n format(type(e).__name__, ''.join(\n traceback.format_tb(e.__traceback__))))\n nans = True\n break\n\n if idx_frame_out == 0:\n if test:\n tstart = 59000.5\n else:\n tstart = pu.get_time()\n tstart += (nint*tsamp/2)/ct.SECONDS_PER_DAY+2400000.5\n\n data, nsamples = fringestop_on_zenith(data_in, vis_model, nans)\n t, tstart = pu.update_time(tstart, samples_per_frame_out,\n sample_rate_out)\n if nfreq_int > 1:\n if not nans:\n data = np.mean(data.reshape(\n data.shape[0], data.shape[1], nchan, nfreq_int,\n npol), axis=3)\n nsamples = np.mean(nsamples.reshape(\n nsamples.shape[0], nsamples.shape[1], nchan,\n nfreq_int, npol), axis=3)\n else:\n data = np.nanmean(data.reshape(\n data.shape[0], data.shape[1], nchan,\n nfreq_int, npol),\n axis=3)\n nsamples = np.nanmean(nsamples.reshape(\n nsamples.shape[0], nsamples.shape[1], nchan,\n nfreq_int, npol), axis=3)\n\n update_uvh5_file(\n fhdf5, data, t, tsamp, bname, uvw,\n nsamples\n )\n\n idx_frame_out += 1\n idx_frame_file += 1\n print('Integration {0} done'.format(idx_frame_out))\n os.rename('{0}_incomplete.hdf5'.format(fout), '{0}.hdf5'.format(fout))\n try:\n etcd.put_dict(\n '/cmd/cal',\n {\n 'cmd': 'rsync',\n 'val':\n {\n 'hostname': hostname,\n 'filename': '{0}.hdf5'.format(fout)\n }\n }\n )\n except:\n logger.info('Could not reach ETCD to transfer {0} from {1}'.format(fout, hostname))\n try:\n reader.disconnect()\n except PSRDadaError:\n pass\n", "id": "888087", "language": "Python", "matching_score": 4.237085342407227, "max_stars_count": 0, "path": "dsamfs/io.py" }, { "content": "\"\"\"\nDSAMFS/PSRSDADA_UTILS.PY\n\n<NAME>, <EMAIL>, 02/2020\n\nUtilities to interact with the psrdada buffer written to\nby the DSA-110 correlator\n\"\"\"\n\nimport os\nimport socket\nfrom datetime import datetime\nfrom collections import OrderedDict\nimport numpy as np\nimport yaml\nimport astropy.units as u\nfrom antpos.utils import get_baselines\nimport scipy #pylint: disable=unused-import\nimport casatools as cc\nfrom dsautils import dsa_store\nimport dsautils.dsa_syslog as dsl\nimport dsautils.cnf as cnf\nimport dsacalib.constants as ct\nfrom dsacalib.fringestopping import calc_uvw\nfrom dsamfs.fringestopping import generate_fringestopping_table\nfrom dsamfs.fringestopping import zenith_visibility_model\n\nMY_CNF = cnf.Conf()\nCORR_CNF = MY_CNF.get('corr')\nMFS_CNF = MY_CNF.get('fringe')\nCAL_CNF = MY_CNF.get('cal')\n\n# Logger\nLOGGER = dsl.DsaSyslogger()\nLOGGER.subsystem(\"software\")\nLOGGER.app(\"dsamfs\")\n\nETCD = dsa_store.DsaStore()\n\ndef get_delays(antenna_order, nants):\n \"\"\"Gets the delays currently set in the sanps.\n\n Parameters\n ----------\n antenna_order : array\n The order of antennas in the snaps.\n nants : int\n The total number of antennas in the array.\n\n Returns\n -------\n ndarray\n The delays for each antenna/pol in the array.\n \"\"\"\n delays = np.zeros((nants, 2), dtype=np.int)\n d = dsa_store.DsaStore()\n nant_snap = 3\n nsnaps = len(antenna_order)//nant_snap\n nant_lastsnap = len(antenna_order)%nant_snap\n if nant_lastsnap != 0:\n nsnaps += 1\n else:\n nant_lastsnap = nant_snap\n for i in range(0, nsnaps):\n LOGGER.info('getting delays for snap {0} of {1}'.format(i+1, nsnaps))\n try:\n snap_delays = np.array(\n d.get_dict(\n '/mon/snap/{0}/delays'.format(i+1)\n )['delays']\n )*2\n snap_delays = snap_delays.reshape(3, 2)[\n :nant_snap if i<nsnaps-1 else nant_lastsnap, :]\n delays[(antenna_order-1)[i*3:(i+1)*3], :] = snap_delays\n except (AttributeError, TypeError) as e:\n LOGGER.error('delays not set for snap{0}'.format(i+1))\n return delays\n\ndef get_time():\n \"\"\"\n Gets the start time of the first spectrum from etcd.\n \"\"\"\n try:\n ret_time = (ETCD.get_dict('/mon/snap/1/armed_mjd')['armed_mjd']\n +float(ETCD.get_dict('/mon/snap/1/utc_start')['utc_start'])\n *4.*8.192e-6/86400.)\n except:\n ret_time = 55000.0\n\n return ret_time\n\ndef read_header(reader):\n \"\"\"\n Reads a psrdada header.\n\n Parameters\n ----------\n reader : psrdada reader instance\n The reader instance connected to the psrdada buffer.\n\n Returns\n -------\n tstart : float\n The start time in mjd seconds.\n tsamp : float\n The sample time in seconds.\n \"\"\"\n header = reader.getHeader()\n tsamp = float(header['TSAMP'])\n tstart = float(header['MJD_START'])*ct.SECONDS_PER_DAY\n return tstart, tsamp\n\ndef read_buffer(reader, nbls, nchan, npol):\n \"\"\"\n Reads a psrdada buffer as float32 and returns the visibilities.\n\n Parameters\n ----------\n reader : psrdada Reader instance\n An instance of the Reader class for the psrdada buffer to read.\n nbls : int\n The number of baselines.\n nchan : int\n The number of frequency channels.\n npol : int\n The number of polarizations.\n\n Returns\n -------\n ndarray\n The data. Dimensions (time, baselines, channels, polarization).\n \"\"\"\n page = reader.getNextPage()\n reader.markCleared()\n\n data = np.asarray(page)\n data = data.view(np.float32)\n data = data.reshape(-1, 2).view(np.complex64).squeeze(axis=-1)\n try:\n data = data.reshape(-1, nbls, nchan, npol)\n except ValueError:\n print('incomplete data: {0} out of {1} samples'.format(\n data.shape[0]%(nbls*nchan*npol), nbls*nchan*npol))\n data = data[\n :data.shape[0]//(nbls*nchan*npol)*(nbls*nchan*npol)\n ].reshape(-1, nbls, nchan, npol)\n return data\n\ndef update_time(tstart, samples_per_frame, sample_rate):\n \"\"\"\n Update the start time and the array of sample times for a dataframe.\n\n Parameters\n ----------\n tstart : float\n The start time of the frame in mjd seconds.\n samples_per_frame : int\n The number of time samples in the frame.\n sample_rate : float\n The sampling rate in samples per second.\n\n Returns\n -------\n t : array(float)\n The center of the time bin for each sample in mjd seconds.\n tstart : float\n The start time of the next dataframe in seconds.\n \"\"\"\n t = tstart+np.arange(samples_per_frame)/sample_rate/ct.SECONDS_PER_DAY\n tstart += samples_per_frame/sample_rate/ct.SECONDS_PER_DAY\n return t, tstart\n\ndef integrate(data, nint):\n \"\"\"\n A simple integration for testing and benchmarking.\n\n Integrates along the time axis.\n\n Parameters\n ----------\n data : ndarray\n The data to integrate. Dimensions (time, baseline, channel,\n polarization).\n nint : int\n The number of consecutive time samples to combine.\n\n Returns\n -------\n ndarray\n The integrated data. Dimensions (time, baseline, channel,\n polarization).\n \"\"\"\n (_nt, nbls, nchan, npol) = data.shape\n data = data.reshape(-1, nint, nbls, nchan, npol).mean(1)\n return data\n\ndef load_visibility_model(\n fs_table, blen, nint, fobs, pt_dec, tsamp, antenna_order,\n outrigger_delays, bname\n):\n \"\"\"\n Load the visibility model for fringestopping.\n\n If the path to the file does not exist or if the model is for a different\n number of integrations or baselines a new model will be created and saved\n to the file path. TODO: Order may not be correct! Need to verify the\n antenna order that the correlator uses.\n\n Parameters\n ----------\n fs_table : str\n The full path to the .npz file containing the fringestopping model.\n antenna_order : array\n The order of the antennas in the correlator.\n nint : int\n The number of time samples to integrate.\n nbls : int\n The number of baselines.\n\n Returns\n -------\n ndarray\n The visibility model to use for fringestopping.\n \"\"\"\n try:\n fs_data = np.load(fs_table, allow_pickle=True)\n assert fs_data['bw'].shape == (nint, blen.shape[0])\n assert np.abs(fs_data['dec_rad']-pt_dec) < 1e-6\n assert np.abs(fs_data['tsamp_s']-tsamp) < 1e-6\n assert np.all(fs_data['antenna_order']==antenna_order)\n assert fs_data['outrigger_delays']==outrigger_delays\n except (FileNotFoundError, AssertionError, KeyError):\n print('Creating new fringestopping table.')\n generate_fringestopping_table(\n blen, pt_dec, nint, tsamp, antenna_order, outrigger_delays,\n bname, outname=fs_table\n )\n\n vis_model = zenith_visibility_model(fobs, fs_table)\n\n return vis_model\n\ndef load_antenna_delays(ant_delay_table, nant, npol=2):\n \"\"\"Load antenna delays from a CASA calibration table.\n\n Parameters\n ----------\n ant_delay_table : str\n The full path to the calibration table.\n nant : int\n The number of antennas.\n npol : int\n The number of polarizations.\n\n Returns\n -------\n ndarray\n The relative delay per baseline in nanoseconds. Baselines are in\n anti-casa order. Dimensions (nbaselines, npol).\n \"\"\"\n error = 0\n tb = cc.table()\n error += not tb.open(ant_delay_table)\n antenna_delays = tb.getcol('FPARAM')\n npol = antenna_delays.shape[0]\n antenna_delays = antenna_delays.reshape(npol, -1, nant)\n error += not tb.close()\n\n bl_delays = np.zeros(((nant*(nant+1))//2, npol))\n idx = 0\n for i in np.arange(nant):\n for j in np.arange(i+1):\n #j-i or i-j ?\n bl_delays[idx, :] = antenna_delays[:, 0, j]-antenna_delays[:, 0, i]\n\n return bl_delays\n\ndef baseline_uvw(antenna_order, pt_dec, autocorrs=True, casa_order=False):\n \"\"\"Calculates the antenna positions and baseline coordinates.\n\n Parameters\n ----------\n antenna_order : list\n The names of the antennas in correct order.\n pt_dec : float\n The pointing declination in radians.\n autocorrs : bool\n Whether to consider only cross-correlations or both cross-correlations\n and auto-correlations when constructing baselines. Defaults True.\n casa_order : bool\n Whether the baselines are organized in casa order (e.g. [1-1, 1-2, 1-3,\n 2-2, 2-3, 3-3]) or the reverse. Defaults False.\n\n Returns\n -------\n bname : list\n The names of the baselines, e.g. ['1-1', '1-2', '2-2'].\n blen : ndarray\n The itrf coordinates of the baselines, dimensions (nbaselines, 3).\n uvw : ndarray\n The uvw coordinates of the baselines for a phase reference at meridian.\n Dimensions (nbaselines, 3).\n \"\"\"\n df_bls = get_baselines(antenna_order, autocorrs=autocorrs,\n casa_order=casa_order)\n bname = df_bls['bname']\n blen = np.array([df_bls['x_m'], df_bls['y_m'], df_bls['z_m']]).T\n bu, bv, bw = calc_uvw(blen, 58849.0, 'HADEC', 0.*u.deg,\n (pt_dec*u.rad).to(u.deg))\n uvw = np.array([bu, bv, bw]).T\n return bname, blen, uvw\n\ndef parse_params(param_file=None):\n \"\"\"Parses parameter file.\n\n Parameters\n ----------\n param_file : str\n The full path to the yaml parameter file.\n \"\"\"\n if param_file is not None:\n fhand = open(param_file)\n corr_cnf = yaml.safe_load(fhand)\n mfs_cnf = corr_cnf\n fhand.close()\n else:\n corr_cnf = CORR_CNF\n mfs_cnf = MFS_CNF\n test = mfs_cnf['test']\n key_string = mfs_cnf['key_string']\n nant = corr_cnf['nant']\n nchan = corr_cnf['nchan']\n npol = corr_cnf['npol']\n samples_per_frame = mfs_cnf['samples_per_frame']\n samples_per_frame_out = mfs_cnf['samples_per_frame_out']\n nint = mfs_cnf['nint']\n fringestop = mfs_cnf['fringestop']\n nfreq_int = mfs_cnf['nfreq_int']\n ant_od = OrderedDict(sorted(corr_cnf['antenna_order'].items()))\n antenna_order = list(ant_od.values())\n dfreq = corr_cnf['bw_GHz']/nchan\n if corr_cnf['chan_ascending']:\n fobs = corr_cnf['f0_GHz']+np.arange(nchan)*dfreq\n else:\n fobs = corr_cnf['f0_GHz']-np.arange(nchan)*dfreq\n pt_dec = get_pointing_declination().to_value(u.rad) #corr_cnf['pt_dec'] in radians\n tsamp = corr_cnf['tsamp'] # in seconds\n\n hname = socket.gethostname()\n try:\n ch0 = corr_cnf['ch0'][hname]\n except KeyError:\n ch0 = 3400\n LOGGER.error('host {0} not in correlator'.format(hname))\n nchan_spw = corr_cnf['nchan_spw']\n fobs = fobs[ch0:ch0+nchan_spw]\n filelength_minutes = mfs_cnf['filelength_minutes']\n outrigger_delays = mfs_cnf['outrigger_delays']\n\n assert (samples_per_frame_out*nint)%samples_per_frame == 0, \\\n \"Each frame out must contain an integer number of frames in.\"\n\n return test, key_string, nant, nchan_spw, npol, fobs, \\\n samples_per_frame, samples_per_frame_out, nint, \\\n nfreq_int, antenna_order, pt_dec, tsamp, fringestop, \\\n filelength_minutes, outrigger_delays\n\ndef get_pointing_declination(tol=0.25):\n \"\"\"Gets the pointing declination from the commanded antenna elevations.\n\n Parameters\n ----------\n tol : float\n The tolerance for discrepancies in the antenna pointing and commanded\n elevations, in degrees.\n\n Returns\n -------\n astropy quantity\n The pointing declination, in degrees or equivalent.\n \"\"\"\n commanded_els = np.zeros(len(CORR_CNF['antenna_order']))\n for idx, ant in CORR_CNF['antenna_order'].items():\n try:\n antmc = ETCD.get_dict('/mon/ant/{0}'.format(ant))\n a1 = np.abs(antmc['ant_el'] - antmc['ant_cmd_el'])\n except:\n a1 = 2.*tol\n if a1 < tol:\n commanded_els[idx] = antmc['ant_cmd_el'] + CAL_CNF['el_offset'].get(ant, 0.)\n else:\n commanded_els[idx] = np.nan\n\n pt_el = np.nanmedian(commanded_els)\n if pt_el is not np.nan:\n pt_dec = ct.OVRO_LAT*u.rad + pt_el*u.deg - 90*u.deg\n else:\n pt_el = CORR_CNF['pt_dec']\n return pt_dec\n", "id": "6603317", "language": "Python", "matching_score": 5.398181915283203, "max_stars_count": 0, "path": "dsamfs/utils.py" }, { "content": "from dsamfs.utils import parse_params\n\ndef test_parse_params():\n test, key_string, nant, nchan_spw, npol, fobs, \\\n samples_per_frame, samples_per_frame_out, nint, \\\n nfreq_int, antenna_order, pt_dec, tsamp, fringestop, \\\n filelength_minutes, outrigger_delays = parse_params()\n assert nant == len(antenna_order)\n \n", "id": "4103180", "language": "Python", "matching_score": 1.8623801469802856, "max_stars_count": 0, "path": "tests/travis/test_utils.py" }, { "content": "import os\nimport pytest\nfrom antpos import utils\nfrom dsamfs import fringestopping\nfrom dsamfs.utils import parse_params\nimport dsacalib.utils as du\nimport dsacalib.constants as ct\nfrom dsacalib.fringestopping import calc_uvw\nimport numpy as np\nimport astropy.units as u\n\ndef test_gentable(tmpdir):\n fstable = '{0}/fs_table.npz'.format(tmpdir)\n pt_dec = 0.71094487066\n tsamp = 0.134217728\n nint = 10\n antenna_order = [24, 10, 3, 66]\n outrigger_delays = {24 : 1200, }\n bname = []\n for i, ant1 in enumerate(antenna_order):\n for ant2 in antenna_order[i:]:\n bname += ['{0}-{1}'.format(ant1, ant2)]\n df_bls = utils.get_baselines(antenna_order, autocorrs=True, casa_order=False)\n blen = np.array([df_bls['x_m'], df_bls['y_m'], df_bls['z_m']]).T\n fringestopping.generate_fringestopping_table(\n blen, pt_dec, nint, tsamp, antenna_order, outrigger_delays, bname,\n outname=fstable)\n assert os.path.exists(fstable)\n\ndef test_outrigger_lookup():\n bn = '100-101'\n ants = bn.split('-')\n _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, outrigger_delays = parse_params()\n delay = outrigger_delays.get(int(ants[0]), 0) - outrigger_delays.get(int(ants[1]), 0)\n assert np.abs(delay) > 0\n delay2 = outrigger_delays[int(ants[0])] - outrigger_delays[int(ants[1])]\n assert delay2 == delay\n \ndef test_write_fs_delay_table():\n msname = 'test_write'\n source = du.src('TEST', 16*u.hourangle, 37*u.deg, 1.)\n antenna_order = [24, 10, 3, 66]\n df_bls = utils.get_baselines(antenna_order, autocorrs=True, casa_order=False)\n blen = np.array([df_bls['x_m'], df_bls['y_m'], df_bls['z_m']]).T\n \ndef test_calc_uvw():\n nant = 5\n nt = 10\n nbl = (nant*(nant+1))//2\n antenna_order = np.arange(nant)+1\n tobs = 59100.956635023+np.arange(nt)/ct.SECONDS_PER_DAY\n df_bls = utils.get_baselines(antenna_order, autocorrs=True, casa_order=False)\n blen = np.array([df_bls['x_m'], df_bls['y_m'], df_bls['z_m']]).T\n ra = 14.31225787*u.hourangle\n dec = 0.71094487*u.rad\n uvw_blt = fringestopping.calc_uvw_blt(np.tile(blen[np.newaxis, :, :],\n (nt, 1, 1)).reshape(-1, 3),\n np.tile(tobs[:, np.newaxis],\n (1, nbl)).flatten(),\n 'J2000', ra, dec)\n uu, vv, ww = calc_uvw(blen, tobs, 'J2000', ra, dec)\n print(uvw_blt.shape, uu.T.shape)\n assert np.all(np.abs(uvw_blt[:, 0]-uu.T.flatten()) < 1e-6)\n assert np.all(np.abs(uvw_blt[:, 1]-vv.T.flatten()) < 1e-6)\n assert np.all(np.abs(uvw_blt[:, 2]-ww.T.flatten()) < 1e-6)\n \n uvw_blt = fringestopping.calc_uvw_blt(np.tile(blen[np.newaxis, :, :],\n (nt, 1, 1)).reshape(-1, 3),\n np.tile(tobs[:, np.newaxis],\n (1, nbl)).flatten(),\n 'HADEC',\n np.zeros(nt*nbl)*u.rad,\n np.ones(nt*nbl)*dec)\n uu, vv, ww = calc_uvw(blen, tobs, 'HADEC', np.zeros(nt)*u.rad, np.ones(nt)*dec)\n assert np.all(np.abs(uvw_blt[:, 0]-uu.T.flatten()) < 1e-6)\n assert np.all(np.abs(uvw_blt[:, 1]-vv.T.flatten()) < 1e-6)\n assert np.all(np.abs(uvw_blt[:, 2]-ww.T.flatten()) < 1e-6)\n", "id": "3662188", "language": "Python", "matching_score": 1.80696702003479, "max_stars_count": 0, "path": "tests/travis/test_fringestopping.py" }, { "content": "import pkg_resources\nfrom dsamfs.routines import run_fringestopping\nfrom dsacalib.utils import get_autobl_indices\nfrom pyuvdata import UVData\nimport glob\nimport numpy as np\nfrom dsacalib.ms_io import uvh5_to_ms\nfrom casatasks import importuvfits\nimport casatools as cc\nimport os\nfrom astropy.time import Time\nfrom dsamfs.fringestopping import calc_uvw_blt\nimport astropy.io.fits as pf\nfrom antpos.utils import get_baselines, get_itrf\nimport astropy.units as u\nimport astropy.constants as c\n\ndef test_end2end(tmpdir):\n data_path = pkg_resources.resource_filename('dsamfs', 'data/')\n param_path = '{0}/test_parameters.yaml'.format(data_path)\n header_path = '{0}/test_header.txt'.format(data_path)\n print(param_path)\n run_fringestopping(param_file=param_path, header_file=header_path, output_dir=tmpdir)\n fname = glob.glob('{0}/*.hdf5'.format(tmpdir))[0]\n UV = UVData()\n UV.read(fname, file_type='uvh5')\n # Check that the baselines are in the correct order\n nant = UV.Nants_data\n abi = get_autobl_indices(nant, casa=False)\n ant1, ant2 = UV.baseline_to_antnums(UV.baseline_array)\n antenna_order = ant2[abi]+1\n print(antenna_order)\n assert np.all(ant1[abi] == ant2[abi])\n print(UV.time_array[:10])\n print(type(UV.time_array))\n print(UV.time_array.dtype)\n # Check that we can convert to uvfits\n uvh5_to_ms(fname, fname.replace('.hdf5', ''))\n assert os.path.exists(fname.replace('hdf5', 'fits'))\n # Check that we can read in the uvfits file\n assert os.path.exists(fname.replace('hdf5', 'ms'))\n ms = cc.ms()\n status = ms.open(fname.replace('hdf5', 'ms'))\n assert status\n uvw_ms = ms.getdata('uvw')['uvw']\n ms.close()\n # Check that the UVW coordinates are right in the fits file\n f = pf.open(fname.replace('hdf5', 'fits'))\n uu = (f['PRIMARY'].data['UU']*u.s*c.c).to_value(u.m)\n vv = (f['PRIMARY'].data['VV']*u.s*c.c).to_value(u.m)\n ww = (f['PRIMARY'].data['WW']*u.s*c.c).to_value(u.m)\n ant1_array = f['PRIMARY'].data['ANTENNA1']\n ant2_array = f['PRIMARY'].data['ANTENNA2']\n\n df_itrf = get_itrf()\n antenna_positions = np.array([df_itrf['x_m'], df_itrf['y_m'],\n df_itrf['z_m']]).T-UV.telescope_location\n blen = np.zeros((ant1_array.shape[0], 3))\n for i, ant1 in enumerate(ant1_array):\n ant2 = ant2_array[i]\n blen[i, ...] = antenna_positions[int(ant2)-1, :] - \\\n antenna_positions[int(ant1)-1, :]\n \n print(ant1_array[:2], ant2_array[:2])\n assert ant1_array[1]==ant1_array[0] # Check that ant1 and ant2 are defined properly\n time = Time(f['PRIMARY'].data['DATE'],format='jd').mjd\n for i in range(10):\n try:\n if f['PRIMARY'].header['CTYPE{0}'.format(i)] == 'RA':\n ra = f['PRIMARY'].header['CRVAL{0}'.format(i)]*u.deg\n elif f['PRIMARY'].header['CTYPE{0}'.format(i)] == 'DEC':\n dec = f['PRIMARY'].header['CRVAL{0}'.format(i)]*u.deg\n except KeyError:\n continue\n assert ra is not None\n assert dec is not None\n print(time.shape, blen.shape)\n uvw = calc_uvw_blt(blen, time, 'J2000', ra, dec) # Doesnt make sense\n uvw = -1*uvw\n print(uvw[:2])\n print(uu[:2])\n print(vv[:2])\n print(ww[:2])\n # Why have the uvw coordinates been inverted? \n assert np.all(np.abs(uvw[:, 0] - uu) < 1e-1)\n assert np.all(np.abs(uvw[:, 1] - vv) < 1e-1)\n assert np.all(np.abs(uvw[:, 2] - ww) < 1e-1)\n assert np.all(np.abs(uvw-uvw_ms.T) < 1e-2)\n UV = UVData()\n UV.read(fname.replace('hdf5', 'ms'), file_type='ms')\n assert np.all(np.abs(UV.antenna_diameters-4.65) < 1e-4)\n\n", "id": "6812466", "language": "Python", "matching_score": 2.3530144691467285, "max_stars_count": 0, "path": "tests/local/test_writing.py" }, { "content": "\"\"\"\nmeridian_fringestopping.py\n<EMAIL>, Feb 2020\n\nReads correlated data from a psrdada\nringbuffer. Fringestops on the meridian for each integrated\nsample, before integrating the data and writing it to a hdf5 file.\n\"\"\"\nimport sys\nfrom dsamfs.routines import run_fringestopping\n\nif len(sys.argv) > 1:\n OUTDIR = sys.argv[1]\nelse:\n OUTDIR = None\n\nif len(sys.argv) > 2:\n PARAM_FILE = sys.argv[2]\nelse:\n PARAM_FILE = None\n\nif len(sys.argv) > 3:\n HEADER_FILE = sys.argv[3]\nelse:\n HEADER_FILE = None\n\nrun_fringestopping(PARAM_FILE, header_file=HEADER_FILE, output_dir=OUTDIR)\n", "id": "2840658", "language": "Python", "matching_score": 1.4423503875732422, "max_stars_count": 0, "path": "dsamfs/meridian_fringestop.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nSetup script for the PSRDada python bindings.\n\nBuild and install the package using distutils.\n\"\"\"\n\n# pylint: disable=all\nfrom Cython.Build import cythonize\nfrom setuptools import setup\nfrom distutils.extension import Extension\nfrom os import environ, path\n\nwith open('README.md') as readme_file:\n README = readme_file.read()\n\nwith open(path.join('psrdada', '__version__.py')) as version_file:\n version = {}\n exec(version_file.read(), version)\n PROJECT_VERSION = version['__version__']\n\n# Get the header locations from the environment\nINCLUDE_DIRS = []\nif \"CPATH\" in environ:\n flags = environ[\"CPATH\"].split(':')\n for flag in flags:\n # when usingn spack, there is no -I prefix\n INCLUDE_DIRS.append(flag)\n\nif \"CFLAGS\" in environ:\n flags = environ[\"CFLAGS\"].split(' ')\n for flag in flags:\n if flag[0:2] == '-I':\n # when usingn spack, there is no -I prefix\n INCLUDE_DIRS.append(flag[2:-1])\n\n# keep the original order\nINCLUDE_DIRS.reverse() \n\n# Get the header locations from the environment\nLIBRARY_DIRS = []\nif \"LD_LIBRARY_PATH\" in environ:\n flags = environ[\"LD_LIBRARY_PATH\"].split(':')\n for flag in flags:\n # when usingn spack, there is no -I prefix\n LIBRARY_DIRS.append(flag)\n\n # keep the original order\n LIBRARY_DIRS.reverse() \n\nEXTENSIONS = [\n Extension(\n \"psrdada.ringbuffer\",\n [\"psrdada/ringbuffer.pyx\"], \n libraries=[\"psrdada\"],\n library_dirs=LIBRARY_DIRS,\n include_dirs=INCLUDE_DIRS\n ),\n Extension(\n \"psrdada.reader\",\n [\"psrdada/reader.pyx\"],\n libraries=[\"psrdada\"],\n library_dirs=LIBRARY_DIRS,\n include_dirs=INCLUDE_DIRS\n ),\n Extension(\n \"psrdada.writer\",\n [\"psrdada/writer.pyx\"],\n libraries=[\"psrdada\"],\n library_dirs=LIBRARY_DIRS,\n include_dirs=INCLUDE_DIRS\n ),\n ]\n\nsetup(\n name='psrdada',\n version=PROJECT_VERSION,\n description=\"Python3 bindings to the ringbuffer implementation in PSRDada\",\n long_description=README + '\\n\\n',\n author=\"<NAME>\",\n author_email='<EMAIL>',\n url='https://github.com/NLeSC/psrdada-python',\n packages=['psrdada',],\n package_dir={'psrdada': 'psrdada'},\n include_package_data=True,\n license=\"Apache Software License 2.0\",\n zip_safe=False,\n keywords='psrdada',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n ],\n test_suite='tests',\n ext_modules=cythonize(EXTENSIONS),\n)\n", "id": "724143", "language": "Python", "matching_score": 2.7490344047546387, "max_stars_count": 0, "path": "setup.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nPython bindings to the PSR Dada library.\n\nThis package provides a minimal interface to the PSRDada library.\nExported are the Reader and Writer classes to connect with\npsrdada ring buffers.\n\nRingbuffers are used to process large data streams, in our case data generated\nby radio telescopes.\nA writer and (mulitple) readers can connect to the buffer and read, process,\nand write data with a minimum of data copies.\nThis library exposes the ringbuffer as a Cython memory view, which you can then\ninteract with via fi. numpy.\n\nUse cases are:\n * rapid prototyping\n * a glue layer to run CUDA kernels\n * interactive use of the telescope\n\"\"\"\nfrom psrdada.reader import Reader\nfrom psrdada.writer import Writer\nfrom .__version__ import __version__\n\n__author__ = '<NAME>'\n__email__ = '<EMAIL>'\n\n__all__ = ['Reader', 'Writer']\n__version__ = '0.1.0'\n", "id": "12306068", "language": "Python", "matching_score": 0.8912399411201477, "max_stars_count": 0, "path": "psrdada/__init__.py" }, { "content": "\"\"\"\nA quick script to write to a psrdada buffer in order to test a psrdada reader.\n\"\"\"\n\nimport os\nimport subprocess\nfrom time import sleep\nimport numpy as np\nfrom psrdada import Writer\n\nKEY_STRING = 'adad'\nKEY = 0xadad\nNANT = 64 #16\nNCHAN = 384 #1536 #*4\nNPOL = 2\nNBLS = NANT*(NANT+1)//2\n\ndef main():\n \"\"\"Writes a psrdada buffer for test\"\"\"\n vis_temp = np.arange(NBLS*NCHAN*NPOL*2, dtype=np.float32)\n\n # Define the data rate, including the buffer size\n # and the header size\n samples_per_frame = 1\n # sample_rate = 1/0.134217728\n header_size = 4096\n buffer_size = int(4*NBLS*NPOL*NCHAN*samples_per_frame*2)\n assert buffer_size == vis_temp.nbytes, (\"Sample data size and buffer \"\n \"size do not match.\")\n\n # Create the buffer\n # data_rate = buffer_size*(sample_rate/samples_per_frame)/1e6\n os.system('dada_db -a {0} -b {1} -k {2}'.format(header_size, buffer_size,\n KEY_STRING))\n print('Buffer created')\n\n # Start the reader\n read = 'python ./meridian_fringestop.py /home/ubuntu/data/ /home/ubuntu/proj/dsa110-shell/dsa110-meridian-fs/dsamfs/data/test_parameters.yaml /home/ubuntu/proj/dsa110-shell/dsa110-meridian-fs/dsamfs/data/test_header.txt'\n read_log = open('/home/ubuntu/data/tmp/write.log', 'w')\n _read_proc = subprocess.Popen(read, shell=True, stdout=read_log,\n stderr=read_log)\n print('Reader started')\n sleep(0.1)\n\n # Write to the buffer\n writer = Writer(KEY)\n print('Writer created')\n for i in range(48):\n page = writer.getNextPage()\n data = np.asarray(page)\n data[...] = vis_temp.view(np.int8)\n if i < 9:\n writer.markFilled()\n else:\n writer.markEndOfData()\n vis_temp += 1\n # Wait to allow reader to clear pages\n sleep(1)\n\n writer.disconnect()\n os.system('dada_db -d -k {0}'.format(KEY_STRING))\n\nif __name__ == '__main__':\n main()\n", "id": "1644165", "language": "Python", "matching_score": 3.456723213195801, "max_stars_count": 0, "path": "dsamfs/psrdada_write.py" }, { "content": "\"\"\"Routines for running meridian fringestopping of DSA-110 data.\n\n<NAME>, <EMAIL>, 2020\n\"\"\"\n\nimport subprocess\nimport numpy as np\nimport astropy.units as u\nfrom psrdada import Reader\nimport dsautils.dsa_syslog as dsl\nimport dsamfs.utils as pu\nfrom dsamfs.io import dada_to_uvh5\n\nlogger = dsl.DsaSyslogger()\nlogger.subsystem(\"software\")\nlogger.app(\"dsamfs\")\n\ndef run_fringestopping(param_file=None, header_file=None, output_dir=None):\n \"\"\"Read in data, fringestop on zenith, and write to hdf5 file.\n Parameters\n ----------\n param_file : str\n The full path to the json parameter file. Defaults to the file\n meridian_fringestopping_parameters.py in the package directory.\n \"\"\"\n # Read in parameter file\n test, key_string, nant, nchan, npol, fobs, samples_per_frame, \\\n samples_per_frame_out, nint, nfreq_int, antenna_order, pt_dec, tsamp, fringestop, filelength_minutes, outrigger_delays = \\\n pu.parse_params(param_file)\n nbls = (nant*(nant+1))//2\n key = int('0x{0}'.format(key_string), 16)\n\n fs_table = 'fringestopping_table_dec{0:.1f}deg_{1}ant.npz'.format((pt_dec*u.rad).to_value(u.deg), len(antenna_order))\n if output_dir is not None:\n fs_table = '{0}/{1}'.format(output_dir, fs_table)\n bname, blen, uvw = pu.baseline_uvw(antenna_order, pt_dec, casa_order=False)\n\n logger.info(\"Started fringestopping of dada buffer {0} with {1} \"\n \"integrations and {2} baselines.\")\n\n # Get the visibility model\n vis_model = pu.load_visibility_model(\n fs_table, blen, nint, fobs, pt_dec, tsamp, antenna_order,\n outrigger_delays, bname\n )\n if not fringestop:\n vis_model = np.ones(vis_model.shape, vis_model.dtype)\n\n if test:\n sample_rate = 1/0.134217728\n header_size = 4096\n buffer_size = int(4*nbls*npol*nchan*samples_per_frame*2)\n data_rate = buffer_size*(sample_rate/samples_per_frame)/1e6\n p_create = subprocess.Popen(\n [\"dada_db\", \"-a\", str(header_size), \"-b\", str(buffer_size), \"-k\",\n key_string], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n outs, errs = p_create.communicate(timeout=15)\n if p_create.returncode != 0:\n print(errs.decode(\"utf-8\"))\n logger.info(errs.decode(\"utf-8\"))\n raise RuntimeError('Dada buffer could not be created.')\n print(outs.decode(\"utf-8\"))\n\n print('Initializing reader: {0}'.format(key_string))\n reader = Reader(key)\n\n if test:\n p_write = subprocess.Popen(\n [\"dada_junkdb\", \"-r\", str(data_rate), \"-t\", \"60\", \"-k\", key_string,\n header_file], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n # Get the start time and the sample time from the reader\n sample_rate_out = 1/(tsamp*nint)\n\n # Read in psrdada buffer, fringestop, and write to uvh5\n dada_to_uvh5(\n reader, output_dir, nbls, nchan, npol, nint, nfreq_int,\n samples_per_frame_out, sample_rate_out, pt_dec, antenna_order,\n fs_table, tsamp, bname, uvw, fobs,\n vis_model, test, filelength_minutes\n )\n\n if test:\n outs, errs = p_write.communicate(timeout=15)\n if p_write.returncode != 0:\n logger.info(errs.decode(\"utf-8\"))\n print(errs.decode(\"utf-8\"))\n raise RuntimeError('Error in writing to dada buffer.')\n print(outs.decode(\"utf-8\"))\n print(errs.decode(\"utf-8\"))\n p_kill = subprocess.Popen(\n [\"dada_db\", \"-d\", \"-k\", key_string], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n outs, errs = p_kill.communicate(timeout=15)\n if p_kill.returncode != 0:\n logger.info(errs.decode(\"utf-8\"))\n print(errs.decode(\"utf-8\"))\n else:\n print(outs.decode(\"utf-8\"))\n\n logger.info(\"Disconnected from psrdada buffer {0}\".format(key_string))\n", "id": "3542885", "language": "Python", "matching_score": 2.1531810760498047, "max_stars_count": 0, "path": "dsamfs/routines.py" }, { "content": "\"\"\"\nDSACALIB/FITS_IO.PY\n\n<NAME>, <EMAIL>, 10/2019\n\nModified for python3 from DSA-10 routines written by <NAME>, <NAME>.\n\nRoutines to interact w/ fits visibilities recorded by DSA-10.\n\"\"\"\n\n# TODO: Replace to_deg w/ astropy versions\n\nimport warnings\nimport numpy as np\nfrom dsacalib import constants as ct\nfrom dsacalib.utils import get_autobl_indices\n# pylint will complain about this, but iers.conf.iers_auto_url_mirror must be\n# set before astropy.time.Time is imported.\nimport astropy.io.fits as pf\nimport astropy.units as u\nfrom astropy.utils import iers\niers.conf.iers_auto_url_mirror = ct.IERS_TABLE\niers.conf.auto_max_age = None\nfrom astropy.time import Time # pylint: disable=wrong-import-position\n\nwarnings.warn(\n \"the fits_io module is deprecated and will be removed in v2.0.0\",\n DeprecationWarning,\n stacklevel=2\n)\n\ndef read_psrfits_file(fl, source, dur=50*u.min, antenna_order=None,\n antpos=None, utc_start=None, autocorrs=False,\n badants=None, quiet=True, dsa10=True):\n r\"\"\"Reads in the psrfits header and data.\n\n Parameters\n ----------\n fl : str\n The full path to the psrfits file.\n source : src class instance\n The source to retrieve data for.\n dur : astropy quantity in minutes or equivalent\n The amount of time to extract around the source meridian passing.\n Defaults ``50*u.min``.\n antenna_order : list\n The order of the antennas in the correlator. Only used if dsa10 is set\n to ``False``. Defaults ``None``.\n antpos : str\n The full path of the text file containing the antenna ITRF positions.\n Defaults `dsacalib.constants.PKG_DATA_PATH`/antpos_ITRF.txt.\n utc_start : astropy time object\n The start time of the observation in UTC. Only used if dsa10 is set to\n ``False``. Defaults ``None``.\n autocorrs : boolean\n If set to ``True``, both auto and cross correlations will be returned.\n If set to False, only the cross correlations will be returned. Defaults\n ``False``.\n badants : list(int)\n The antennas for which you do not want data to be returned. If set to\n ``None``, all antennas are returned. Defaults ``None``.\n quiet : boolean\n If set to ``True``, infromation on the file printed to stdout.\n dsa10 : boolean\n If set to ``True``, assumes fits file is in dsa10 format, otherwise\n assumes fits file is in the 3-antenna correlator output format.\n Defaults ``True``.\n\n Returns\n -------\n fobs : array\n The frequency of the channels in GHz.\n blen : array\n The itrf coordinates of the baselines, shape (nbaselines, 3).\n bname : list\n The station pairs for each baseline (in the same order as blen). Shape\n (nbaselines, 2).\n tstart : float\n The start time of the extracted data in MJD.\n tstop : float\n The stop time of the extracted data in MJD.\n tsamp : float\n The sampling time in seconds.\n vis : ndarray\n The requested visibilities, dimensions (baseline, time, frequency,\n polarization).\n mjd : array\n The midpoint MJD of each subintegration in the visibilities.\n lst : array\n The midpoint LST of each subintegration in the visibilities.\n transit_idx : int\n The index of the meridian passing in the time axis of the visibilities.\n antenna_order : list\n The antenna indices, in the order that they are in in the visibilities.\n \"\"\"\n if antpos is None:\n antpos = '{0}/antpos_ITRF.txt'.format(ct.PKG_DATA_PATH)\n fo = pf.open(fl, ignore_missing_end=True)\n f = fo[1]\n if dsa10:\n _nchan, fobs, _nt, blen, bname, tstart, tstop, tsamp, antenna_order = \\\n get_header_info(f, verbose=True, antpos=antpos)\n vis, lst, mjd, transit_idx = extract_vis_from_psrfits(\n f, source.ra.to_value(u.rad),\n (dur/2*(15*u.deg/u.h)).to_value(u.rad), antenna_order, tstart,\n tstop, quiet)\n else:\n assert antenna_order is not None, 'Antenna order must be provided'\n assert utc_start is not None, 'Start time must be provided'\n _nchan, fobs, _nt, blen, bname, tstart_offset, tstop_offset, tsamp, \\\n antenna_order = get_header_info(f, verbose=True, antpos=antpos,\n antenna_order=antenna_order,\n dsa10=False)\n tstart = (utc_start+tstart_offset*u.s).mjd\n tstop = (utc_start+tstop_offset*u.s).mjd\n vis, lst, mjd, transit_idx = extract_vis_from_psrfits(\n f, source.ra.to_value(u.rad),\n (dur/2*(15*u.deg/u.h)).to_value(u.rad), antenna_order, tstart,\n tstop, quiet)\n fo.close()\n\n # Now we have to extract the correct baselines\n nant = len(antenna_order)\n if not autocorrs:\n basels = list(range((nant*(nant+1))//2))\n auto_bls = get_autobl_indices(nant)\n if not dsa10:\n auto_bls = [(len(basels)-1)-auto_bl for auto_bl in auto_bls]\n for i in auto_bls:\n basels.remove(i)\n vis = vis[basels, ...]\n blen = blen[basels, ...]\n bname = [bname[i] for i in basels]\n\n # Reorder the visibilities to fit with CASA ms convention\n if dsa10:\n vis = vis[::-1, ...]\n bname = bname[::-1]\n blen = blen[::-1, ...]\n antenna_order = antenna_order[::-1]\n\n if badants is not None:\n blen = np.array(blen)\n good_idx = list(range(len(bname)))\n for i, bn in enumerate(bname):\n if (bn[0] in badants) or (bn[1] in badants):\n good_idx.remove(i)\n vis = vis[good_idx, ...]\n blen = blen[good_idx, ...]\n bname = [bname[i] for i in good_idx]\n\n if badants is not None:\n for badant in badants:\n antenna_order.remove(badant)\n\n dt = np.median(np.diff(mjd))\n if len(mjd) > 0:\n tstart = mjd[0]-dt/2\n tstop = mjd[-1]+dt/2\n else:\n tstart = None\n tstop = None\n\n if not isinstance(bname, list):\n bname = bname.tolist()\n return fobs, blen, bname, tstart, tstop, tsamp, vis, mjd, lst, \\\n transit_idx, antenna_order\n\ndef get_header_info(f, antpos=None, verbose=False, antenna_order=None,\n dsa10=True):\n \"\"\"Extracts important header info from a visibility fits file.\n\n Parameters\n ----------\n f : pyfits table handle\n The visibility data from the correlator.\n antpos : str\n The path to the text file containing the antenna positions. Defaults\n `dsacalib.constants.PKG_DATA_PATH`.\n verbose : boolean\n If ``True``, information on the fits file is printed to stdout.\n antenna_order : list\n The order of the antennas in the correlator. Required if `dsa10` is\n set to ``False``. Defaults ``None``.\n dsa10 : Boolean\n Set to ``True`` if the fits file is in dsa10 correlator format,\n ``False`` if the file is in 6-input correlator format.\n\n Returns\n -------\n nchan : int\n The number of frequency channels.\n fobs : array\n The midpoint frequency of each channel in GHz.\n nt: int\n The number of time subintegrations.\n blen : ndarray\n The ITRF coordinates of the baselines, shape (nbaselines, 3).\n bname : list\n The station pairs for each baseline (in the same order as blen), shape\n (nbaselines, 2).\n tstart : float\n The start time. If `dsa10` is set to ``True``, `tstart` is the start\n time in MJD. If `dsa10` is set to ``False``, `tstart` is the start time\n in seconds past the utc start time of the correlator run.\n tstop : float\n The stop time. If `dsa10` is set to ``True``, `tstart` is the stop time\n in MJD. If `dsa10` is set to ``False``, `tstart` is the stop time in\n seconds past the utc start time of the correlator run.\n tsamp : float\n The sampling time in seconds.\n aname : list\n The antenna names, in the order they are in in the visibilities.\n \"\"\"\n if antpos is None:\n antpos = '{0}/antpos_ITRF.txt'.format(ct.PKG_DATA_PATH)\n if dsa10:\n aname = f.header['ANTENNAS'].split('-')\n aname = [int(an) for an in aname]\n else:\n assert antenna_order is not None, 'Antenna order must be provided'\n aname = antenna_order\n nant = len(aname)\n\n nchan = f.header['NCHAN']\n if dsa10:\n fobs = ((f.header['FCH1']*1e6-(np.arange(nchan)+0.5)*2.*2.5e8/nchan)*\n u.Hz).to_value(u.GHz)\n else:\n fobs = ((f.header['FCH1']*1e6-(np.arange(nchan)+0.5)*2.5e8/8192)*\n u.Hz).to_value(u.GHz)\n nt = f.header['NAXIS2']\n tsamp = f.header['TSAMP']\n\n tel_pos = np.loadtxt(antpos)\n blen = []\n bname = []\n# for i in np.arange(len(aname)-1)+1:\n# for j in np.arange(i+1):\n if dsa10:\n # currently doesn't include autocorrelations\n for i in np.arange(10):\n for j in np.arange(i+1):\n a1 = int(aname[i])-1\n a2 = int(aname[j])-1\n bname.append([a1+1, a2+1])\n blen.append(tel_pos[a1, 1:]-tel_pos[a2, 1:])\n else:\n for j in range(nant):\n for i in range(j, nant):\n a1 = int(aname[i])-1\n a2 = int(aname[j])-1\n bname.append([a2+1, a1+1])\n blen.append(tel_pos[a2, 1:]-tel_pos[a1, 1:])\n blen = np.array(blen)\n\n if dsa10:\n tstart = f.header['MJD']+ct.TIME_OFFSET/ct.SECONDS_PER_DAY\n tstop = tstart+nt*tsamp/ct.SECONDS_PER_DAY\n else:\n tstart = tsamp*f.header['NBLOCKS']\n tstop = tstart+nt*tsamp\n\n if verbose:\n if dsa10:\n print('File covers {0:.2f} hours from MJD {1} to {2}'.format(\n ((tstop-tstart)*u.d).to(u.h), tstart, tstop))\n else:\n print('File covers {0:.2f} h from {1} s to {2} s'.format(\n ((tstop-tstart)*u.s).to(u.h), tstart, tstop))\n return nchan, fobs, nt, blen, bname, tstart, tstop, tsamp, aname\n\ndef extract_vis_from_psrfits(f, lstmid, seg_len, antenna_order, mjd0, mjd1,\n quiet=True):\n \"\"\"Extracts visibilities from a fits file.\n\n Based on clip.extract_segment from DSA-10 routines.\n\n Parameters\n ----------\n f : pyfits table handle\n The fits file containing the visibilities.\n lstmid : float\n The LST around which to extract visibilities, in radians.\n seg_len : float\n The duration (in LST) of visibilities to extract, in radians.\n antenna_order : list\n The order of the antennas in the correlator.\n mjd0 : float\n The start time of the file in MJD.\n mjd1 : float\n The stop time of the file in MJD.\n quiet : boolean\n If set to ``False``, information on the file will be printed. Defaults\n ``True``.\n\n Returns\n -------\n vis : ndarray\n The requested visibilities, dimensions (baselines, time, frequency).\n lst : array\n The lst of each integration in the visibilities, in radians.\n mjd : array\n The midpoint mjd of each integration in the visibilities.\n idxmid : int\n The index along the time axis corresponding to the `lstmid`. (i.e the\n index corresponding to the meridian transit of the source.)\n \"\"\"\n vis = f.data['VIS']\n nt = f.header['NAXIS2']\n nchan = f.header['NCHAN']\n tsamp = f.header['TSAMP']\n nant = len(antenna_order)\n\n if (mjd1-mjd0) >= 1:\n print(\"Data covers > 1 sidereal day. Only the first segment will be \"+\n \"extracted\")\n\n lst0 = Time(mjd0, format='mjd').sidereal_time(\n 'apparent', longitude=ct.OVRO_LON*u.rad).to_value(u.rad)\n mjd = mjd0+(np.arange(nt)+0.5)*tsamp/ct.SECONDS_PER_DAY\n lst = np.angle(np.exp(1j*(lst0+2*np.pi/ct.SECONDS_PER_SIDEREAL_DAY*\n np.arange(nt+0.5)*tsamp)))\n\n if not quiet:\n print(\"\\n-------------EXTRACT DATA--------------------\")\n print(\"Extracting data around {0}\".format(lstmid*180/np.pi))\n print(\"{0} Time samples in data\".format(nt))\n print(\"LST range: {0:.1f} --- ({1:.1f}-{2:.1f}) --- {3:.1f}deg\".format(\n lst[0]*180./np.pi, (lstmid-seg_len)*180./np.pi,\n (lstmid+seg_len)*180./np.pi, lst[-1]*180./np.pi))\n\n idxl = np.argmax(np.absolute(np.exp(1j*lst)+np.exp(1j*lstmid)*\n np.exp(-1j*seg_len)))\n idxr = np.argmax(np.absolute(np.exp(1j*lst)+np.exp(1j*lstmid)*\n np.exp(1j*seg_len)))\n idx0 = np.argmax(np.absolute(np.exp(1j*lst)+np.exp(1j*(lstmid))))\n idxmid = idxl-idx0\n\n mjd = mjd[idxl:idxr]\n lst = lst[idxl:idxr]\n vis = vis.reshape((nt, (nant*(nant+1))//2, nchan, 2, 2)) \\\n [idxl:idxr, :, :, :, :]\n\n if not quiet:\n print(\"Extract: {0} ----> {1} sample; transit at {2}\".format(\n idxl, idxr, idx0))\n print(\"----------------------------------------------\")\n\n # Fancy indexing can have downfalls and may change in future numpy versions\n # See issue here https://github.com/numpy/numpy/issues/9450\n # odata = dat[:,basels,:,:,0]+ 1j*dat[:,basels,:,:,1]\n vis = vis[..., 0]+1j*vis[..., 1]\n vis = vis.swapaxes(0, 1)\n\n return vis, lst, mjd, idxmid\n", "id": "9032490", "language": "Python", "matching_score": 6.919518947601318, "max_stars_count": 1, "path": "dsacalib/fits_io.py" }, { "content": "\"\"\"\nDSACALIB/HDF5_UTILS.PY\n\n<NAME>, <EMAIL>, 10/2019\n\nRoutines to interact w/ hdf5 visibilities recorded by DSA-110.\n\"\"\"\n# Always import scipy before importing casatools.\nimport numpy as np\nimport h5py\nfrom antpos.utils import get_baselines\n# pylint will complain about this, but iers.conf.iers_auto_url_mirror must be\n# set before astropy.time.Time is imported.\nimport astropy.units as u\nfrom dsacalib import constants as ct\nfrom astropy.utils import iers\niers.conf.iers_auto_url_mirror = ct.IERS_TABLE\niers.conf.auto_max_age = None\nfrom astropy.time import Time # pylint: disable=wrong-import-position\n\ndef read_hdf5_file(\n fl, source=None, dur=50*u.min, autocorrs=True, badants=None, quiet=True\n):\n \"\"\"Reads visibilities from a hdf5 file written by dsa110-fringestopping.\n\n Parameters\n ----------\n fl : str\n Full path to the hdf5 file.\n source : source instance\n The source to extract from the hdf5 file. If set to ``None``, the\n entire file is extracted. Defaults ``None``.\n dur : astropy quantity\n The duration of the observation to extract, in minutes or an equivalent\n unit. Only used if `source` is not set to ``None``. Defaults\n ``50*u.min``.\n autocorrs : Boolean\n If set to ``True``, both the autocorrelations and the crosscorrelations\n are extracted from the hdf5 file. If set to ``False``, only the\n crosscorrelations are extracted. Defaults ``True``.\n badants : list\n Antennas that have been flagged as bad or offline. If provied,\n baselines that include these antennas will not be extracted. If set to\n ``None``, all baselines in the hdf5 file are extracted. Defaults\n ``None``.\n quiet : Boolean\n If set to ``False``, information about the file will be printed to\n stdout. Defaults ``True``.\n\n Returns\n -------\n fobs : array\n The observing frequency of the center of each channel in the\n visibilities, in GHz.\n blen : ndarray\n The ITRF baseline lengths, dimensions (nbaselines, 3).\n bname : list(str)\n The name of each baseline.\n tstart : float\n The start time of the extracted visibilities in MJD. If the data does\n not contain the specified ``source``, a value of ``None`` is returned.\n tstop : float\n The stop time of the extracted visibilities in MJD. If the data does\n not contain the specified ``source``, a value of ``None`` is returned.\n vis : ndarray\n The extracted visibilities, dimensions (baselines, time, frequency,\n polarization).\n mjd : array\n The time of the center of each timebin in the visibilities, in MJD.\n transit_idx : int\n The index along the time axis corresponding to the meridian crossing of\n the source given by `source`. If `source` is set to ``None``, a value\n of ``None`` is returned.\n antenna_order : list\n The antenna names, in the order they are in in the hdf5 visibilities.\n tsamp : float\n The sampling time in seconds.\n \"\"\"\n with h5py.File(fl, 'r') as f:\n antenna_order = list(f['antenna_order'][...])\n nant = len(antenna_order)\n fobs = f['fobs_GHz'][...]\n mjd = (f['time_seconds'][...]+f['tstart_mjd_seconds'])/ \\\n ct.SECONDS_PER_DAY\n nt = len(mjd)\n tsamp = (mjd[-1]-mjd[0])/(nt-1)*ct.SECONDS_PER_DAY\n lst0 = Time(mjd[0], format='mjd').sidereal_time(\n 'apparent', longitude=ct.OVRO_LON*u.rad).to_value(u.rad)\n lst = np.angle(np.exp(\n 1j*(lst0+2*np.pi/ct.SECONDS_PER_SIDEREAL_DAY*np.arange(nt)*tsamp)))\n\n if source is not None:\n lstmid = lst0 - source.direction.hadec(obstime=mjd[0])[0]\n seg_len = (dur/2*(15*u.deg/u.h)).to_value(u.rad)\n if not quiet:\n print(\"\\n-------------EXTRACT DATA--------------------\")\n print(\"Extracting data around {0}\".format(lstmid*180/np.pi))\n print(\"{0} Time samples in data\".format(nt))\n print(\"LST range: {0:.1f} --- ({1:.1f}-{2:.1f}) --- {3:.1f}deg\"\n .format(lst[0]*180./np.pi, (lstmid-seg_len)*180./np.pi,\n (lstmid+seg_len)*180./np.pi, lst[-1]*180./np.pi))\n idxl = np.argmax(np.absolute(np.exp(1j*lst)+\n np.exp(1j*lstmid)*\n np.exp(-1j*seg_len)))\n idxr = np.argmax(np.absolute(np.exp(1j*lst)+\n np.exp(1j*lstmid)*np.exp(1j*seg_len)))\n transit_idx = np.argmax(np.absolute(np.exp(1j*lst)+\n np.exp(1j*(lstmid))))\n\n mjd = mjd[idxl:idxr]\n vis = f['vis'][idxl:idxr, ...]\n if not quiet:\n print(\"Extract: {0} ----> {1} sample; transit at {2}\".format(\n idxl, idxr, transit_idx))\n print(\"----------------------------------------------\")\n\n else:\n vis = f['vis'][...]\n transit_idx = None\n\n df_bls = get_baselines(antenna_order, autocorrs=True, casa_order=True)\n blen = np.array([df_bls['x_m'], df_bls['y_m'], df_bls['z_m']]).T\n bname = np.array([bn.split('-') for bn in df_bls['bname']])\n bname = bname.astype(int)\n\n if not autocorrs:\n cross_bls = list(range((nant*(nant+1))//2))\n i = -1\n for j in range(1, nant+1):\n i += j\n cross_bls.remove(i)\n vis = vis[:, cross_bls, ...]\n blen = blen[cross_bls, ...]\n bname = bname[cross_bls, ...]\n\n assert vis.shape[0] == len(mjd)\n assert vis.shape[1] == len(cross_bls)\n\n if badants is not None:\n good_idx = list(range(len(bname)))\n for i, bn in enumerate(bname):\n if (bn[0] in badants) or (bn[1] in badants):\n good_idx.remove(i)\n vis = vis[:, good_idx, ...]\n blen = blen[good_idx, ...]\n bname = bname[good_idx, ...]\n\n for badant in badants:\n antenna_order.remove(badant)\n\n assert vis.shape[0] == len(mjd)\n vis = vis.swapaxes(0, 1)\n dt = np.median(np.diff(mjd))\n if len(mjd) > 0:\n tstart = mjd[0]-dt/2\n tstop = mjd[-1]+dt/2\n else:\n tstart = None\n tstop = None\n\n bname = bname.tolist()\n return fobs, blen, bname, tstart, tstop, vis, mjd, transit_idx, \\\n antenna_order, tsamp\n\ndef initialize_hdf5_file(\n fhdf, fobs, antenna_order, t0, nbls, nchan, npol, nant\n):\n \"\"\"Initializes the hdf5 file for the fringestopped visibilities.\n\n Parameters\n ----------\n fhdf : hdf5 file handler\n The file to initialize.\n fobs : array\n The center frequency of each channel in GHz.\n antenna_order : array\n The order of the antennas in the correlator.\n t0 : float\n The time of the first subintegration in MJ seconds.\n nbls : int\n The number of baselines in the visibilities.\n nchan : int\n The number of channels in the visibilities.\n npol : int\n The number of polarizations in the visibilities.\n nant : int\n The number of antennas in the visibilities.\n\n Returns\n -------\n vis_ds : hdf5 dataset\n The dataset for the visibilities.\n t_ds : hdf5 dataset\n The dataset for the times.\n \"\"\"\n _ds_fobs = fhdf.create_dataset(\"fobs_GHz\", (nchan, ), dtype=np.float32,\n data=fobs)\n _ds_ants = fhdf.create_dataset(\"antenna_order\", (nant, ), dtype=np.int,\n data=antenna_order)\n _t_st = fhdf.create_dataset(\"tstart_mjd_seconds\", (1, ), maxshape=(1, ),\n dtype=int, data=t0)\n vis_ds = fhdf.create_dataset(\"vis\", (0, nbls, nchan, npol),\n maxshape=(None, nbls, nchan, npol),\n dtype=np.complex64, chunks=True, data=None)\n t_ds = fhdf.create_dataset(\"time_seconds\", (0, ), maxshape=(None, ),\n dtype=np.float32, chunks=True, data=None)\n return vis_ds, t_ds\n", "id": "367259", "language": "Python", "matching_score": 2.187624216079712, "max_stars_count": 1, "path": "dsacalib/hdf5_io.py" }, { "content": "\"\"\"Functions used in fringestopping of DSA-110 visibilities.\n\nAuthor: <NAME>, <EMAIL> 11/2019\n\nThese functions use casatools to build sky models, divide visibilities by sky\nmodels, and fringestop visibilities.\n\n\"\"\"\n\n# always import scipy before importing casatools\nfrom scipy.special import j1\nimport casatools as cc\nimport numpy as np\nfrom numba import jit\nimport astropy.units as u\nfrom astropy.coordinates.angle_utilities import angular_separation\nfrom dsacalib import constants as ct\n\ndef calc_uvw(blen, tobs, src_epoch, src_lon, src_lat, obs='OVRO_MMA'):\n \"\"\"Calculates uvw coordinates.\n\n Uses CASA to calculate the u,v,w coordinates of the baselines `b` towards a\n source or phase center (specified by `src_epoch`, `src_lon` and `src_lat`)\n at the specified time and observatory.\n\n Parameters\n ----------\n blen : ndarray\n The ITRF coordinates of the baselines. Type float, shape (nbaselines,\n 3), units of meters.\n tobs : ndarray\n An array of floats, the times in MJD for which to calculate the uvw\n coordinates.\n src_epoch : str\n The epoch of the source or phase-center, as a CASA-recognized string\n e.g. ``'J2000'`` or ``'HADEC'``\n src_lon : astropy quantity\n The longitude of the source or phase-center, in degrees or an\n equivalent unit.\n src_lat : astropy quantity\n The latitude of the source or phase-center, in degrees or an equivalent\n unit.\n obs : string\n The name of the observatory in CASA.\n\n Returns\n -------\n bu : ndarray\n The u-value for each time and baseline, in meters. Shape is\n ``(len(b), len(tobs))``.\n bv : ndarray\n The v-value for each time and baseline, in meters. Shape is\n ``(len(b), len(tobs))``.\n bw : ndarray\n The w-value for each time and baseline, in meters. Shape is\n ``(len(b), len(tobs))``.\n \"\"\"\n tobs, blen = set_dimensions(tobs=tobs, blen=blen)\n nt = tobs.shape[0]\n nb = blen.shape[0]\n bu = np.zeros((nt, nb))\n bv = np.zeros((nt, nb))\n bw = np.zeros((nt, nb))\n\n\n # Define the reference frame\n me = cc.measures()\n qa = cc.quanta()\n if obs is not None:\n me.doframe(me.observatory(obs))\n\n if not isinstance(src_lon.ndim, float) and src_lon.ndim > 0:\n assert src_lon.ndim == 1\n assert src_lon.shape[0] == nt\n assert src_lat.shape[0] == nt\n direction_set = False\n else:\n if (src_epoch == 'HADEC') and (nt > 1):\n raise TypeError('HA and DEC must be specified at each time in '\n +'tobs.')\n me.doframe(me.direction(src_epoch,\n qa.quantity(src_lon.to_value(u.deg), 'deg'),\n qa.quantity(src_lat.to_value(u.deg), 'deg')))\n direction_set = True\n\n contains_nans = False\n\n for i in range(nt):\n me.doframe(me.epoch('UTC', qa.quantity(tobs[i], 'd')))\n if not direction_set:\n me.doframe(me.direction(src_epoch,\n qa.quantity(src_lon[i].to_value(u.deg),\n 'deg'),\n qa.quantity(src_lat[i].to_value(u.deg),\n 'deg')))\n for j in range(nb):\n bl = me.baseline('itrf', qa.quantity(blen[j, 0], 'm'),\n qa.quantity(blen[j, 1], 'm'),\n qa.quantity(blen[j, 2], 'm'))\n # Get the uvw coordinates\n try:\n uvw = me.touvw(bl)[1]['value']\n bu[i, j], bv[i, j], bw[i, j] = uvw[0], uvw[1], uvw[2]\n except KeyError:\n contains_nans = True\n bu[i, j], bv[i, j], bw[i, j] = np.nan, np.nan, np.nan\n if contains_nans:\n print('Warning: some solutions not found for u, v, w coordinates')\n return bu.T, bv.T, bw.T\n\n@jit(nopython=True)\ndef visibility_sky_model_worker(vis_model, bws, famps, f0, spec_idx, fobs):\n \"\"\"Builds complex sky models.\n\n This is a worker to contain the for loop in the visibility model\n calculation using jit. Modifies the input array `vis_model` in place.\n\n Parameters\n ----------\n vis_model : ndarray\n A placeholder for the output. A complex array initialized to zeros,\n with the same shape as the array of visibilities you wish to model.\n Dimensions (baseline, time, freq, polarization).\n bws : ndarray\n The w component of the baselines towards the phase-center or towards\n each source in the sky model. Dimensions (sources,baselines).\n famps : ndarray\n The flux of each source at the reference frequency, in Jy.\n f0 : float\n The reference frequency, in GHz.\n spec_idx : float\n The spectral index for the frequency dependence of the source flux.\n fobs : ndarray\n The central frequency of each channel of the visibilities, in GHz.\n \"\"\"\n for i in range(bws.shape[0]):\n vis_model += famps[i, ...]*((fobs/f0)**(spec_idx))* \\\n np.exp(2j*np.pi/ct.C_GHZ_M*fobs*bws[i, ...])\n\ndef _py_visibility_sky_model_worker(vis_model, bws, famps, f0, spec_idx, fobs):\n \"\"\"Builds complex sky models.\n\n A pure python version of `visibility_model_worker` for timing against.\n Modifies the input array `vis_model` in place.\n\n Parameters\n ----------\n vis_model : ndarray\n A placeholder for the output. A complex array initialized to zeros,\n with the same shape as the array of visibilities you wish to model.\n Dimensions (baseline, time, freq, polarization).\n bws : ndarray\n The w component of the baselines towards the phase-center or towards\n each source in the sky model. Dimensions (sources, baselines).\n famps : ndarray\n The flux of each source at the reference frequency, in Jy.\n f0 : float\n The reference frequency, in GHz.\n spec_idx : float\n The spectral index for the frequency dependence of the source flux.\n fobs : ndarray\n The central frequency of each channel of the visibilities, in GHz.\n \"\"\"\n for i in range(bws.shape[0]):\n vis_model += famps[i, ...]*((fobs/f0)**(spec_idx))* \\\n np.exp(2j*np.pi/ct.C_GHZ_M*fobs*bws[i, ...])\n\ndef set_dimensions(fobs=None, tobs=None, blen=None):\n \"\"\"Sets the dimensions of arrays for fringestopping.\n\n Ensures that `fobs`, `tobs` and `blen` are ndarrays with the correct\n dimensions for fringestopping using jit. Any combination of these arrays\n may be passed. If no arrays are passed, an empty list is returned\n\n Parameters\n ----------\n fobs : float or ndarray\n The central frequency of each channel in GHz. Defaults ``None``.\n tobs : float or ndarray\n The central time of each subintegration in MJD. Defaults ``None``.\n blen : ndarray\n The baselines in ITRF coordinates with dimensions (nbaselines, 3) or\n (3) if a single baseline. Defaults ``None``.\n\n Returns\n -------\n list\n A list of the arrays passed, altered to contain the correct dimensions\n for fringestopping. The list may contain:\n\n fobs : ndarray\n The central frequency of each channel in GHz with dimensions\n (channels, 1). Included if `fobs` is not set to ``None``.\n\n tobs : ndarray\n The central time of each subintegration in MJD with dimensions\n (time). Included if `tobs` is not set to ``None``.\n\n b : ndarray\n The baselines in ITRF coordaintes with dimensions (baselines, 3) or\n (1, 3) if a single baseline. Included if ``b`` is not set to None.\n \"\"\"\n to_return = []\n if fobs is not None:\n if not isinstance(fobs, np.ndarray):\n fobs = np.array(fobs)\n if fobs.ndim < 1:\n fobs = fobs[np.newaxis]\n fobs = fobs[:, np.newaxis]\n to_return += [fobs]\n\n if tobs is not None:\n if not isinstance(tobs, np.ndarray):\n tobs = np.array(tobs)\n if tobs.ndim < 1:\n tobs = tobs[np.newaxis]\n to_return += [tobs]\n\n if blen is not None:\n if isinstance(blen, list):\n blen = np.array(blen)\n if blen.ndim < 2:\n blen = blen[np.newaxis, :]\n to_return += [blen]\n return to_return\n\ndef visibility_sky_model(vis_shape, vis_dtype, blen, sources, tobs, fobs, lst,\n pt_dec):\n \"\"\"Calculates the sky model visibilities.\n\n Calculates the sky model visibilities on the baselines `b` and at times\n `tobs`. Ensures that the returned sky model is the same datatype and the\n same shape as specified by `vis_shape` and `vis_dtype` to ensure\n compatability with jit.\n\n Parameters\n ----------\n vis_shape : tuple\n The shape of the visibilities: (baselines, time, frequency,\n polarization).\n vis_dtype: numpy datatype\n The datatype of the visibilities.\n blen: real array\n The baselines to calculate visibilities for, shape (nbaselines, 3),\n units of meters in ITRF coords.\n sources : list(src class instances)\n The sources to include in the sky model.\n tobs : float or ndarray\n The times at which to calculate the visibility model, in MJD.\n fobs : float or ndarray\n The frequencies at which to calculate the visibility model, in GHz.\n lst : ndarray\n The local sidereal time in radians for each time in tobs.\n pt_dec : float\n The antenna pointing declination in radians.\n\n Returns\n -------\n vis_model : ndarray\n The modelled complex visibilities, dimensions (directions, baselines,\n time, frequency, polarization).\n \"\"\"\n # Something seriously wrong here.\n raise NotImplementedError\n fobs, tobs, blen = set_dimensions(fobs, tobs, blen)\n bws = np.zeros((len(sources), len(blen), len(tobs), 1, 1))\n famps = np.zeros((len(sources), 1, len(tobs), len(fobs), 1))\n for i, src in enumerate(sources):\n _, _, bw = calc_uvw(blen, tobs, src.epoch, src.ra, src.dec)\n bws[i, :, :, 0, 0] = bw\n famps[i, 0, :, :, 0] = src.I*pb_resp(lst, pt_dec,\n src.ra.to_value(u.rad),\n src.dec.to_value(u.rad),\n fobs.squeeze())\n # Calculate the sky model using jit\n vis_model = np.zeros(vis_shape, dtype=vis_dtype)\n visibility_sky_model_worker(vis_model, bws, famps, ct.F0, ct.SPEC_IDX,\n fobs)\n return vis_model\n\ndef fringestop(vis, blen, source, tobs, fobs, pt_dec, return_model=False):\n \"\"\"Fringestops on a source.\n\n Fringestops on a source (or sky position) by dividing the input\n visibilities by a phase only model. The input visibilities, vis, are\n modified in place.\n\n Parameters\n ----------\n vis : ndarray\n The visibilities t be fringestopped, with dimensions (baselines, time,\n freq, pol). `vis` is modified in place.\n blen : ndarray\n The ITRF coordinates of the baselines in meters, with dimensions (3,\n baselines).\n source : src class instance\n The source to fringestop on.\n tobs : ndarray\n The observation time (the center of each bin) in MJD.\n fobs : ndarray\n The observing frequency (the center of each bin) in GHz.\n pt_dec : float\n The pointing declination of the array, in radians.\n return_model : boolean\n If ``True``, the fringestopping model is returned. Defaults ``False``.\n\n Returns\n -------\n vis_model : ndarray\n The phase-only visibility model by which the visiblities were divided.\n Returned only if `return_model` is set to ``True``.\n \"\"\"\n fobs, tobs, blen = set_dimensions(fobs, tobs, blen)\n _, _, bw = calc_uvw(blen, tobs, source.epoch, source.ra, source.dec)\n # note that the time shouldn't matter below\n _, _, bwp = calc_uvw(blen, tobs[len(tobs)//2], 'HADEC', 0.*u.rad,\n pt_dec*u.rad)\n bw = bw - bwp\n vis_model = np.exp(2j*np.pi/ct.C_GHZ_M*fobs* \\\n bw[..., np.newaxis, np.newaxis])\n vis /= vis_model\n if return_model:\n return vis_model\n return\n\ndef divide_visibility_sky_model(vis, blen, sources, tobs, fobs, lst, pt_dec,\n return_model=False):\n \"\"\"Calculates and applies the sky model visibilities.\n\n Calculates the sky model visibilities on the baselines `blen` and at times\n `tobs`. Divides the input visibilities, `vis`, by the sky model. `vis` is\n modified in-place.\n\n Parameters\n ----------\n vis : ndarray\n The observed complex visibilities, with dimensions (baselines, time,\n frequency, polarization). `vis` will be updated in place to the\n fringe-stopped visibilities\n blen : ndarray\n The baselines for which to calculate visibilities, shape (nbaselines,\n 3), units of meters in ITRF coords.\n sources : list(src class objects)\n The list of sources to include in the sky model.\n tobs : float or ndarray\n The times for which to calculate the visibility model in MJD.\n fobs: float or arr(float)\n The frequency for which to calculate the model in GHz.\n lst : fload or ndarray\n The lsts at which to calculate the visibility model.\n pt_dec : float\n The pointing declination in radians.\n return_model: boolean\n If set to ``True``, the visibility model will be returned. Defaults to\n ``False``.\n\n Returns\n -------\n vis_model: ndarray\n The modelled visibilities, dimensions (baselines, time, frequency,\n polarization). Returned only if `return_model` is set to True.\n \"\"\"\n vis_model = visibility_sky_model(vis.shape, vis.dtype, blen, sources, tobs,\n fobs, lst, pt_dec)\n vis /= vis_model\n if return_model:\n return vis_model\n return\n\ndef complex_sky_model(source, ant_ra, pt_dec, fobs, tobs, blen, dish_dia=4.65,\n spind=0.7, pointing_ra=None):\n \"\"\"Computes the complex sky model, taking into account pointing errors.\n\n Use when the pointing error is large enough to introduce a phase error in\n the visibilities if not properly accounted for.\n\n Parameters\n ----------\n source : src class instance\n The source to model. The source flux (`src.I`), right ascension\n (`src.ra`) and declination (`src.dec`) must be specified.\n ant_ra : ndarray\n The right ascension pointing of the antenna in each time bin of the\n observation, in radians. If az=0 deg or az=180 deg, this is the lst of\n each time bin in the observation in radians.\n pt_dec : float\n The pointing declination of the observation in radians.\n fobs : array\n The observing frequency of the center of each channel in GHz.\n tobs : array\n The observing time of the center of each timebin in MJD.\n blen : ndarray\n The ITRF dimensions of each baseline, in m.\n dish_dia : float\n The dish diameter in m. Defaults 4.65.\n spind : float\n The spectral index of the source. Defaults 0.7.\n pt_ra : float\n The pointing (either physically or through fringestopping) right\n ascension in radians. If None, defaults to the pointing of the antenna,\n `ant_ra`. In other words, assumes fringestopping on the meridian.\n\n Returns\n -------\n ndarray\n The calculated complex sky model.\n \"\"\"\n raise NotImplementedError\n if pointing_ra is None:\n pointing_ra = ant_ra\n model_amplitude = amplitude_sky_model(source, ant_ra, pt_dec, fobs,\n dish_dia=dish_dia, spind=spind)\n fobs, tobs, blen = set_dimensions(fobs, tobs, blen)\n _, _, bw = calc_uvw(blen, tobs, source.epoch, source.ra, source.dec)\n _, _, bwp = calc_uvw(blen, tobs, 'RADEC', pointing_ra*u.rad, pt_dec*u.rad)\n bw = bw - bwp\n model_phase = np.exp(2j*np.pi/ct.C_GHZ_M*fobs*\n bw[..., np.newaxis, np.newaxis])\n return model_amplitude*model_phase\n\ndef amplitude_sky_model(source, ant_ra, pt_dec, fobs, dish_dia=4.65,\n spind=0.7):\n \"\"\"Computes the amplitude sky model due to the primary beam.\n\n Computes the amplitude sky model for a single source due to the primary\n beam response of an antenna.\n\n Parameters\n ----------\n source : src class instance\n The source to model. The source flux (`src.I`), right ascension\n (`src.ra`) and declination (`src.dec`) must be specified.\n ant_ra : ndarray\n The right ascension pointing of the antenna in each time bin of the\n observation, in radians. If az=0 deg or az=180 deg, this is the lst of\n each time bin in the observation in radians.\n pt_dec : float\n The pointing declination of the observation in radians.\n fobs : array\n The observing frequency of the center of each channel in GHz.\n dish_dia : float\n The dish diameter in m. Defaults 4.65.\n spind : float\n The spectral index of the source. Defaults 0.7.\n\n Returns\n -------\n ndarray\n The calculated amplitude sky model.\n \"\"\"\n # Should add spectral index\n return source.I*(fobs/1.4)**(-spind)*pb_resp(\n ant_ra, pt_dec, source.ra.to_value(u.rad), source.dec.to_value(u.rad),\n fobs, dish_dia\n )\n\ndef pb_resp_uniform_ill(ant_ra, ant_dec, src_ra, src_dec, freq, dish_dia=4.65):\n \"\"\"Computes the primary beam response towards a direction on the sky.\n\n Assumes uniform illumination of the disk. Returns a value between 0 and 1\n for each value passed in `ant_ra`.\n\n Parameters\n ----------\n ant_ra : float or ndarray\n The antenna right ascension pointing in radians. If an array, must be\n one-dimensional.\n ant_dec : float\n The antenna declination pointing in radians.\n src_ra : float\n The source right ascension in radians.\n src_dec : float\n The source declination in radians.\n freq : ndarray\n The frequency of each channel in GHz.\n dish_dia : float\n The dish diameter in meters. Defaults to ``4.65``.\n\n Returns\n -------\n pb : ndarray\n The primary beam response, dimensions (`ant_ra`, `freq`).\n \"\"\"\n dis = angular_separation(ant_ra, ant_dec, src_ra, src_dec)\n lam = 0.299792458/freq\n pb = (2.0*j1(np.pi*dis[:, np.newaxis]*dish_dia/lam)/ \\\n (np.pi*dis[:, np.newaxis]*dish_dia/lam))**2\n return pb\n\ndef pb_resp(ant_ra, ant_dec, src_ra, src_dec, freq, dish_dia=4.34):\n \"\"\"Computes the primary beam response towards a direction on the sky.\n\n Assumes tapered illumination of the disk. Returns a value between 0 and 1\n for each value passed in `ant_ra`.\n\n Parameters\n ----------\n ant_ra : float or ndarray\n The antenna right ascension pointing in radians. If an array, must be\n one-dimensional.\n ant_dec : float\n The antenna declination pointing in radians.\n src_ra : float\n The source right ascension in radians.\n src_dec : float\n The source declination in radians.\n freq : ndarray\n The frequency of each channel in GHz.\n dish_dia : float\n The dish diameter in meters. Defaults to ``4.65``.\n\n Returns\n -------\n pb : ndarray\n The primary beam response, dimensions (`ant_ra`, `freq`) if an array is\n passed to `ant_ra`, or (`freq`) if a float is passed to `ant_ra`.\n \"\"\"\n dis = np.array(angular_separation(ant_ra, ant_dec, src_ra, src_dec))\n if dis.ndim > 0 and dis.shape[0] > 1:\n dis = dis[:, np.newaxis] # prepare for broadcasting\n\n lam = 0.299792458/freq\n arg = 1.2*dis*dish_dia/lam\n pb = (np.cos(np.pi*arg)/(1-4*arg**2))**2\n return pb\n", "id": "7702881", "language": "Python", "matching_score": 5.300586700439453, "max_stars_count": 1, "path": "dsacalib/fringestopping.py" }, { "content": "\"\"\"\nDSAMFS/FRINGESTOPPING.PY\n\n<NAME>, <EMAIL> 11/2019\n\nCasa-based routines for calculating and applying fringe-stopping phases\nto visibilities\n\"\"\"\nimport sys\nimport os\nimport numpy as np\nimport scipy # pylint: disable=unused-import\nimport casatools as cc\nimport astropy.units as u\nfrom dsautils import cnf\nfrom dsacalib import constants as ct\nfrom dsacalib.fringestopping import calc_uvw\n\nMYCONF = cnf.Conf()\nREFMJD = MYCONF.get('fringe')['refmjd']\n\ndef calc_uvw_blt(blen, tobs, src_epoch, src_lon, src_lat, obs='OVRO_MMA'):\n \"\"\"Calculates uvw coordinates.\n\n Uses CASA to calculate the u,v,w coordinates of the baselines `b` towards a\n source or phase center (specified by `src_epoch`, `src_lon` and `src_lat`)\n at the specified time and observatory.\n\n Parameters\n ----------\n blen : ndarray\n The ITRF coordinates of the baselines. Type float, shape (nblt,\n 3), units of meters.\n tobs : ndarray\n An array of floats, the times in MJD for which to calculate the uvw\n coordinates, shape (nblt).\n src_epoch : str\n The epoch of the source or phase-center, as a CASA-recognized string\n e.g. ``'J2000'`` or ``'HADEC'``\n src_lon : astropy quantity\n The longitude of the source or phase-center, in degrees or an\n equivalent unit.\n src_lat : astropy quantity\n The latitude of the source or phase-center, in degrees or an equivalent\n unit.\n\n Returns\n -------\n bu : ndarray\n The u-value for each time and baseline, in meters. Shape is\n ``(len(b), len(tobs))``.\n bv : ndarray\n The v-value for each time and baseline, in meters. Shape is\n ``(len(b), len(tobs))``.\n bw : ndarray\n The w-value for each time and baseline, in meters. Shape is\n ``(len(b), len(tobs))``.\n \"\"\"\n nblt = tobs.shape[0]\n buvw = np.zeros((nblt, 3))\n # Define the reference frame\n me = cc.measures()\n qa = cc.quanta()\n if obs is not None:\n me.doframe(me.observatory(obs))\n if not isinstance(src_lon.ndim, float) and src_lon.ndim > 0:\n assert src_lon.ndim == 1\n assert src_lon.shape[0] == nblt\n assert src_lat.shape[0] == nblt\n direction_set = False\n else:\n if (src_epoch == 'HADEC') and (nblt > 1):\n raise TypeError('HA and DEC must be specified at each '\n 'baseline-time in tobs.')\n me.doframe(me.direction(src_epoch,\n qa.quantity(src_lon.to_value(u.deg), 'deg'),\n qa.quantity(src_lat.to_value(u.deg), 'deg')))\n direction_set = True\n contains_nans = False\n for i in range(nblt):\n me.doframe(me.epoch('UTC', qa.quantity(tobs[i], 'd')))\n if not direction_set:\n me.doframe(me.direction(src_epoch,\n qa.quantity(src_lon[i].to_value(u.deg),\n 'deg'),\n qa.quantity(src_lat[i].to_value(u.deg),\n 'deg')))\n bl = me.baseline('itrf', qa.quantity(blen[i, 0], 'm'),\n qa.quantity(blen[i, 1], 'm'),\n qa.quantity(blen[i, 2], 'm'))\n # Get the uvw coordinates\n try:\n buvw[i, :] = me.touvw(bl)[1]['value']\n except KeyError:\n contains_nans = True\n buvw[i, :] = np.ones(3)*np.nan\n if contains_nans:\n print('Warning: some solutions not found for u, v, w coordinates')\n return buvw\n\n\ndef generate_fringestopping_table(\n blen,\n pt_dec,\n nint,\n tsamp, \n antenna_order,\n outrigger_delays,\n bname,\n outname='fringestopping_table',\n mjd0=REFMJD\n):\n \"\"\"Generates a table of the w vectors towards a source.\n\n Generates a table for use in fringestopping and writes it to a numpy\n pickle file named fringestopping_table.npz\n\n Parameters\n ----------\n blen : array\n The lengths of the baselines in ITRF coordinates, in m. Dimensions\n (nbaselines, 3).\n pt_dec : float\n The pointing declination in radians.\n nint : int\n The number of time integrations to calculate the table for.\n tsamp : float\n The sampling time in seconds.\n antenna_order : list\n The order of the antennas.\n outrigger_delays : dict\n The outrigger delays in ns.\n bname : list\n The names of each baseline. Length nbaselines. Names are strings.\n outname : str\n The prefix to use for the table to which to save the w vectors. Will\n save the output to `outname`.npy Defaults ``fringestopping_table``.\n mjd0 : float\n The start time in MJD. Defaults 58849.0.\n \"\"\"\n # Get the indices that correspond to baselines with the refant\n # Use the first antenna as the refant so that the baselines are in\n # the same order as the antennas\n refidxs = []\n refant = str(antenna_order[0])\n for i, bn in enumerate(bname):\n if refant in bn:\n refidxs += [i]\n\n # Get the geometric delays at the \"source\" position and meridian\n dt = np.arange(nint)*tsamp\n dt = dt-np.median(dt)\n hangle = dt*360/ct.SECONDS_PER_SIDEREAL_DAY\n _bu, _bv, bw = calc_uvw(\n blen,\n mjd0+dt/ct.SECONDS_PER_DAY,\n 'HADEC',\n hangle*u.deg, \n np.ones(hangle.shape)*(pt_dec*u.rad).to(u.deg)\n )\n _bu, _bv, bwref = calc_uvw(\n blen,\n mjd0,\n 'HADEC',\n 0.*u.deg,\n (pt_dec*u.rad).to(u.deg)\n )\n ant_bw = bwref[refidxs] \n bw = bw-bwref\n bw = bw.T\n bwref = bwref.T\n \n # Add in per-antenna delays for each baseline\n for i, bn in enumerate(bname):\n ant1, ant2 = bn.split('-')\n # Add back in bw at the meridian calculated per antenna\n bw[:, i] += ant_bw[antenna_order.index(int(ant2)), :] - \\\n ant_bw[antenna_order.index(int(ant1)), :]\n # Add in outrigger delays\n bw[:, i] += (outrigger_delays.get(int(ant1), 0) - \\\n outrigger_delays.get(int(ant2), 0))*0.29979245800000004\n\n # Save the fringestopping table\n if os.path.exists(outname):\n os.unlink(outname)\n np.savez(outname, dec_rad=pt_dec, tsamp_s=tsamp, ha=hangle, bw=bw,\n bwref=bwref, antenna_order=antenna_order, outrigger_delays=outrigger_delays, ant_bw=ant_bw)\n\ndef zenith_visibility_model(fobs, fstable='fringestopping_table.npz'):\n \"\"\"Creates the visibility model from the fringestopping table.\n\n Parameters\n ----------\n fobs : array\n The observing frequency of each channel in GHz.\n fstable : str\n The full path to the fringestopping table. Defaults\n ``fringestopping_table.npz``.\n\n Returns\n -------\n ndarray\n The visibility model, dimensions (1, time, baseline, frequency,\n polarization).\n \"\"\"\n data = np.load(fstable)\n bws = data['bw']\n vis_model = np.exp(2j*np.pi/ct.C_GHZ_M*fobs[:, np.newaxis]*\n bws[np.newaxis, :, :, np.newaxis, np.newaxis])\n return vis_model\n\ndef fringestop_on_zenith(vis, vis_model, nans=False):\n \"\"\"Performs meridian fringestopping.\n\n Fringestops on hour angle 0, declination pointing declination for the\n midpoint of each integration, then integrates the data. The number of\n samples to integrate by is set by the length of the second axis of\n `vis_model`.\n\n Parameters\n ----------\n vis : ndarray\n The input visibilities, dimensions (time, baseline, frequency, pol).\n vis_model : ndarray\n The visibility model, dimensions (1, nint, baseline, frequency, pol).\n nans : boolean\n Whether the visibility array is nan-padded. Defaults False.\n\n Returns\n -------\n narray\n The fringe-stopped and integrated visibilities. Dimensions (time,\n baseline, frequency, pol).\n \"\"\"\n nint = vis_model.shape[1]\n nt, nbl, nchan, npol = vis.shape\n assert nt%nint == 0, ('Number of times in the visibility file must be '\n 'divisible by nint')\n vis = vis.reshape(-1, nint, nbl, nchan, npol)\n print(vis.shape, vis_model.shape)\n vis /= vis_model\n if nans:\n nsamples = np.count_nonzero(~np.isnan(vis), axis=1)\n vis = np.nanmean(vis, axis=1)\n\n else:\n vis = np.mean(vis, axis=1)\n nsamples = np.ones(vis.shape)*nint\n return vis, nsamples\n\ndef write_fs_delay_table(msname, source, blen, tobs, nant):\n \"\"\"Writes the fringestopping delays to a delay calibration table.\n\n Not tested. Table is written to `msname`_`cal.name`_fscal\n\n Parameters\n ----------\n msname : str\n The prefix of the ms for which this table is generated.\n source : src class instance\n The source (or location) to fringestop on.\n blen : ndarray\n The ITRF coordinates of the baselines. Dimensions (baselines, 3).\n tobs : array\n The observation time of each time bin in mjd.\n nant : int\n The number of antennas in the array.\n \"\"\"\n nt = tobs.shape[0]\n _bu, _bv, bw = calc_uvw(blen, tobs, source.epoch, source.ra, source.dec)\n\n ant_delay = np.zeros((nt, nant))\n ant_delay[:, 1:] = bw[:nant-1, :].T/ct.C_GHZ_M #pylint: disable=unsubscriptable-object\n\n error = 0\n tb = cc.table()\n error += not tb.open('{0}/templatekcal'.format(ct.PKG_DATA_PATH))\n error += not tb.copy('{0}_{1}_fscal'.format(msname, source.name))\n error += not tb.close()\n\n error += not tb.open('{0}_{1}_fscal'.format(msname, source.name),\n nomodify=False)\n error += not tb.addrows(nant*nt-tb.nrows())\n error += not tb.flush()\n assert tb.nrows() == nant*nt\n error += not tb.putcol(\n 'TIME', np.tile((tobs*u.d).to_value(u.s).reshape(-1, 1),\n (1, nant)).flatten())\n error += not tb.putcol('FIELD_ID', np.zeros(nt*nant, dtype=np.int32))\n error += not tb.putcol(\n 'SPECTRAL_WINDOW_ID', np.zeros(nt*nant, dtype=np.int32))\n error += not tb.putcol(\n 'ANTENNA1', np.tile(np.arange(nant, dtype=np.int32).reshape(1, nant),\n (nt, 1)).flatten())\n error += not tb.putcol('ANTENNA2', -1*np.ones(nt*nant, dtype=np.int32))\n error += not tb.putcol('INTERVAL', np.zeros(nt*nant, dtype=np.int32))\n error += not tb.putcol('SCAN_NUMBER', np.ones(nt*nant, dtype=np.int32))\n error += not tb.putcol('OBSERVATION_ID', np.zeros(nt*nant, dtype=np.int32))\n error += not tb.putcol(\n 'FPARAM', np.tile(ant_delay.reshape(1, -1), (2, 1)).reshape(2, 1, -1))\n error += not tb.putcol(\n 'PARAMERR', np.zeros((2, 1, nt*nant), dtype=np.float32))\n error += not tb.putcol('FLAG', np.zeros((2, 1, nt*nant), dtype=bool))\n error += not tb.putcol('SNR', np.zeros((2, 1, nt*nant), dtype=np.float64))\n error += not tb.flush()\n error += not tb.close()\n\n if error > 0:\n sys.stderr.write('{0} errors occured during calibration'.format(error))\n", "id": "1011646", "language": "Python", "matching_score": 4.761505603790283, "max_stars_count": 0, "path": "dsamfs/fringestopping.py" }, { "content": "\"\"\"Constants used in the calibration of DSA-110 visibilities.\n\nAuthor: <NAME>, <EMAIL>, 10/2019\n\"\"\"\nimport astropy.units as u\nimport astropy.constants as c\nimport scipy # pylint: disable=unused-import\nimport casatools as cc\nimport numpy as np\nimport dsacalib\n\n# The number of seconds in a sidereal day\nSECONDS_PER_SIDEREAL_DAY = 3600*23.9344699\n\n# The number of seconds in a day\nSECONDS_PER_DAY = 3600*24\n\nDEG_PER_HOUR = 360/SECONDS_PER_SIDEREAL_DAY*3600\n\n# Time between time the packet says as start and first\n# sample recorded - used only for dsa-10 correlator\n# Not in use for DSA-110\nTIME_OFFSET = 4.294967296\nCASA_TIME_OFFSET = 0.00042824074625968933 # in days\n\n# The integration time of the visibilities in seconds\n# used only for dsa-10 correlator\n# Not in use for DSA-110\nTSAMP = 8.192e-6*128*384\n\n# The longitude and latitude of the OVRO site\n# in radians\nme = cc.measures()\novro_loc = me.observatory('OVRO')\nOVRO_LON = ovro_loc['m0']['value']\nOVRO_LAT = ovro_loc['m1']['value']\nOVRO_ALT = ovro_loc['m2']['value']\n\n# c expressed in units relevant to us\nC_GHZ_M = c.c.to_value(u.GHz*u.m)\n\n# Amount to integrate data by after fringestopping,\n# when writing to a CASA ms\n# Currently integrating for 10-s\n# When commissioning DSA-110, want 1-s integrations\n# Not in use for DSA-110\nNINT = int(np.floor(10/TSAMP))\n\n# Default sky model parameters\nF0 = 1.4 # Frequency of flux in GHz\nSPEC_IDX = -0.7 # Spectral index\n\n# Backup IERS table\nIERS_TABLE = 'file://{0}/data/finals2000A.all'.format(dsacalib.__path__[0])\n# Templates & other package data\nPKG_DATA_PATH = '{0}/data/'.format(dsacalib.__path__[0])\n", "id": "4074136", "language": "Python", "matching_score": 2.5653159618377686, "max_stars_count": 1, "path": "dsacalib/constants.py" }, { "content": "import dsacalib.constants as ct\nimport astropy.units as u\nimport os\nimport numpy as np\nimport dsautils.cnf as dsc\n\n\ndef test_casa_location():\n ovro_lon = -118.283400*u.deg\n ovro_lat = 37.233386*u.deg\n ovro_height = 1188*u.m\n assert np.abs(ct.OVRO_LON - ovro_lon.to_value(u.rad)) < 0.1\n assert np.abs(ct.OVRO_LAT - ovro_lat.to_value(u.rad)) < 0.1\n assert np.abs(ct.OVRO_ALT - ovro_height.to_value(u.m)) < 1\n\ndef test_data():\n assert os.path.exists(ct.IERS_TABLE.replace('file://', ''))\n assert os.path.exists('{0}/template_gcal_ant'.format(ct.PKG_DATA_PATH))\n\ndef test_cnf():\n conf = dsc.Conf()\n params = conf.get('corr')\n assert 'ch0' in params.keys()\n assert 'antenna_order' in params.keys()\n corr_list = list(params['ch0'].keys())\n corr_list = [int(cl.strip('corr')) for cl in corr_list]\n antennas_plot = np.array(list(params['antenna_order'].values()))\n", "id": "2911635", "language": "Python", "matching_score": 1.6657880544662476, "max_stars_count": 1, "path": "tests/test_constants.py" }, { "content": "from setuptools import setup\n\nsetup(name='dsa110-calib',\n version='0.3',\n url='http://github.com/dsa110/dsa110-calib/',\n author='<NAME>',\n author_email='<EMAIL>',\n packages=['dsacalib'],\n package_data={'dsacalib':['data/*',\n 'data/template_gcal_ant/*',\n 'data/template_gcal_ant/ANTENNA/*',\n 'data/template_gcal_ant/FIELD/*',\n 'data/template_gcal_ant/HISTORY/*',\n 'data/template_gcal_ant/OBSERVATION/*',\n 'data/template_gcal_ant/SPECTRAL_WINDOW/*'\n ]},\n install_requires=['astropy',\n 'casatools',\n 'casatasks',\n 'casadata',\n 'cython',\n 'h5py',\n 'matplotlib',\n 'numba',\n 'numpy',\n 'pandas',\n 'pytest',\n 'codecov',\n 'coverage',\n 'pyyaml',\n 'scipy',\n 'etcd3',\n 'structlog',\n 'dsa110-antpos',\n 'dsa110-pyutils',\n 'dsa110-meridian-fs'\n ],\n dependency_links = [\n \"https://github.com/dsa110/dsa110-antpos/tarball/master#egg=dsa110-antpos\",\n \"https://github.com/dsa110/dsa110-pyutils/tarball/master#egg=dsa110-pyutils\",\n \"https://github.com/dsa110/dsa110-meridian-fs/tarball/main#egg=dsa110-meridian-fs\"\n ]\n)\n", "id": "788642", "language": "Python", "matching_score": 3.8034181594848633, "max_stars_count": 1, "path": "setup.py" }, { "content": "from setuptools import setup\nfrom dsautils.version import get_git_version\n\nsetup(name='dsa110-T3',\n version=get_git_version(),\n url='http://github.com/dsa110/dsa110-T3/',\n author='<NAME>',\n author_email='<EMAIL>',\n packages=['dsaT3'],\n package_data={'dsaT3':['data/*']},\n install_requires=['astropy',\n 'casatools',\n 'casatasks',\n 'casadata',\n 'matplotlib',\n 'numpy==1.19.5',\n 'pytest',\n 'codecov',\n 'coverage',\n 'pyyaml',\n 'scipy',\n 'etcd3',\n 'structlog',\n 'dsa110-pyutils',\n 'dsa110-meridian-fs',\n 'dsa110-calib',\n 'sigpyproc',\n 'slack',\n 'slackclient',\n 'tensorflow==2.5.0',\n ],\n dependency_links = [\n \"https://github.com/dsa110/dsa110-antpos/tarball/master#egg=dsa110-antpos\",\n \"https://github.com/dsa110/dsa110-pyutils/tarball/master#egg=dsa110-pyutils\",\n \"https://github.com/dsa110/dsa110-meridian-fs/tarball/main#egg=dsa110-meridian-fs\",\n \"https://github.com/dsa110/dsa110-calib/tarball/main#egg=dsa110-calib\",\n ]\n)\n", "id": "9599072", "language": "Python", "matching_score": 4.566139221191406, "max_stars_count": 0, "path": "setup.py" }, { "content": "from setuptools import setup\nfrom dsautils.version import get_git_version\n\nversion = get_git_version()\nassert version is not None\n\nsetup(name='dsa110-meridian-fs',\n version=version,\n url='http://github.com/dsa110/dsa110-meridian-fs/',\n author='<NAME>',\n author_email='<EMAIL>',\n packages=['dsamfs'],\n package_data={\n 'dsamfs': ['data/*.txt', 'data/*.yaml'],\n },\n install_requires=['astropy',\n 'casatools',\n 'casadata',\n 'cython',\n 'h5py',\n 'matplotlib',\n 'numba',\n 'numpy',\n 'pandas',\n 'pytest',\n 'scipy',\n 'coverage',\n 'codecov',\n 'pyyaml',\n 'etcd3',\n 'structlog',\n 'dsa110-antpos',\n 'dsa110-pyutils',\n 'dsa110-calib',\n 'pyuvdata'\n ],\n dependency_links=[\n \"https://github.com/dsa110/dsa110-antpos/tarball/master#egg=dsa110-antpos\",\n \"https://github.com/dsa110/dsa110-pyutils/tarball/master#egg=dsa110-pyutils\",\n \"https://casa-pip.nrao.edu/repository/pypi-casa-release/simple\",\n \"https://github.com/dsa110/dsa110-calib/main#egg=dsa110-calib\",\n ],\n zip_safe=False)\n\n", "id": "7291779", "language": "Python", "matching_score": 2.313441753387451, "max_stars_count": 0, "path": "setup.py" }, { "content": "from setuptools import setup, find_packages\nfrom version import get_git_version\nimport glob\n\nsetup(name='dsa110-event',\n version=get_git_version(),\n url='http://github.com/dsa110/dsa110-event',\n packages=find_packages(),\n requirements=['caltechdata_api', 'requests', 'voevent-parse', 'datacite'],\n include_package_data=True,\n# package_data={'event': ['data/*.json']},\n data_files=[('event/data', glob.glob('data/*.json'))],\n entry_points='''\n [console_scripts]\n dsaevent=event.cli:cli\n ''',\n zip_safe=False)\n", "id": "7534367", "language": "Python", "matching_score": 2.5971081256866455, "max_stars_count": 0, "path": "setup.py" }, { "content": "from setuptools import setup\n\nsetup(name='dsa110-catalogs',\n version='0.2',\n url='http://github.com/dsa110/dsa110-catalogs',\n packages=['catalogs'],\n requirements=['astropy', 'astroquery', 'numpy'],\n zip_safe=False)\n", "id": "3600243", "language": "Python", "matching_score": 0.007437769789248705, "max_stars_count": 0, "path": "setup.py" }, { "content": "from time import sleep\nfrom dask.distributed import Client\nfrom dsautils import dsa_store\nfrom dsaT3 import T3_manager\nimport glob, os, json\nfrom dsautils import dsa_functions36\n\nclient = Client('10.42.0.232:8786')\nde = dsa_store.DsaStore()\n\ndef task(a):\n\n T3dict = T3_manager.run(a)\n return T3dict\n\ndef task_nowait(a):\n\n T3dict = T3_manager.run_nowait(a)\n return T3dict\n\n\ntasks = []\ndef cb_func(dd):\n global tasks\n corrname = dd['corrname']\n trigger = dd['trigger']\n if corrname == 'corr03':\n res = client.submit(task, trigger)\n tasks.append(res)\n\n# set watch\nwid = de.add_watch('/mon/corr/1/voltage', cb_func)\n\n# clean up existing triggers\ndatestring = de.get_dict('/cnf/datestring')\ntrig_jsons = sorted(glob.glob('/data/dsa110/T2/'+datestring+'/cluster_output*.json'))\nfor fl in trig_jsons:\n f = open(fl)\n d = json.load(f)\n trigname = list(d.keys())[0]\n if not os.path.exists('/home/ubuntu/data/T3/'+trigname+'.png'):\n res = client.submit(task_nowait, d)\n tasks.append(res)\n \n\nwhile True:\n try:\n print(f'{len(tasks)} tasks in queue')\n for future in tasks:\n print(future)\n if future.done():\n print(future.result())\n tasks.remove(future)\n\n de.put_dict('/mon/service/T3manager',{'cadence': 5, 'time': dsa_functions36.current_mjd()})\n sleep(5)\n except KeyboardInterrupt:\n print(f'Cancelling {len(tasks)} tasks and exiting')\n for future in tasks:\n future.cancel()\n tasks.remove(future)\n break\n", "id": "10197529", "language": "Python", "matching_score": 2.709916591644287, "max_stars_count": 0, "path": "services/tasktrigger.py" }, { "content": "import dsautils.dsa_store as ds\nimport sys\nimport numpy as np\nfrom time import sleep\nfrom dsautils import dsa_functions36\nfrom pkg_resources import resource_filename\nimport os\nimport subprocess\n\n# defaults\ndatestring = 'dummy'\ndocopy = False\n\ndef datestring_func():\n def a(event):\n global datestring\n datestring=event\n return a\n \ndef docopy_func():\n def a(event):\n global docopy\n if event=='True':\n docopy=True\n if event=='False':\n docopy=False\n return a\n \n# add callbacks from etcd\nmy_ds = ds.DsaStore()\ndocopy = my_ds.get_dict('/cmd/corr/docopy') == 'True'\ndatestring = my_ds.get_dict('/cnf/datestring')\nmy_ds.add_watch('/cnf/datestring', datestring_func())\nmy_ds.add_watch('/cmd/corr/docopy', docopy_func())\n# scriptname = resource_filename('dsaT3', '../services/send_cands.bash')\nscriptname = '/home/ubuntu/dana/code/dsa110-T3/services/send_cands.bash' #hard code for now\n\nprint(datestring)\nprint(docopy)\n\nwhile True:\n if docopy:\n cmd = [scriptname, datestring]\n output = subprocess.check_output(\n cmd,\n stderr=subprocess.STDOUT\n )\n print(output)\n\n key = '/mon/service/send_cands'\n value = {'cadence': 20, 'time': dsa_functions36.current_mjd()}\n try:\n my_ds.put_dict(key, value)\n except:\n print('COULD NOT CONNECT TO ETCD')\n\n sleep(20)\n", "id": "927677", "language": "Python", "matching_score": 0.7496883869171143, "max_stars_count": 0, "path": "services/send_cands.py" }, { "content": "import string\nimport datetime\nfrom astropy import time\nfrom dsautils import dsa_store\nds = dsa_store.DsaStore()\n\n\ndef get_lastname():\n \"\"\" Look at etcd to get name of last triggered candidate\n Return of None means that the name generation should start anew.\n \"\"\"\n\n try:\n lastname, vv = ds.get_dict('/mon/corr/1/trigger').popitem()\n except:\n lastname = None\n \n return lastname\n\n\ndef increment_name(mjd, lastname=None, suffixlength=4):\n \"\"\" Use mjd to create unique name for event.\n \"\"\"\n\n dt = time.Time(mjd, format='mjd', scale='utc').to_datetime()\n if lastname is None: # generate new name for this yymmdd\n suffix = string.ascii_lowercase[0]*suffixlength\n else:\n yymmdd = lastname[:-suffixlength]\n print(f'yymmdd: {yymmdd}')\n dt0 = datetime.datetime(int('20'+yymmdd[0:2]), int(yymmdd[2:4]), int(yymmdd[4:6]))\n if dt.year > dt0.year or dt.month > dt0.month or dt.day > dt0.day:\n # new day, so name starts over\n suffix = string.ascii_lowercase[0]*suffixlength\n else:\n # same day, so increment name\n lastsuffix = lastname[-suffixlength:]\n lastnumber = suffixtonumber(lastsuffix)\n suffix = f'{numbertosuffix(lastnumber+1):a>4}' # increment name\n\n return f'{str(dt.year)[2:]}{dt.month:02d}{dt.day:02d}{suffix}'\n\n\ndef suffixtonumber(suffix):\n \"\"\" Given a set of ascii_lowercase values, get a base 26 number.\n a = 0, ... z = 25, aa = 26, ...\n \"\"\"\n\n # int(base=26) doesn't quite work, since first ten ints are actually ints!\n base36 = '0123456789abcdefghijklmnopqrstuvwxyz'\n return int(''.join([base36[base36.index(b)-10] for b in suffix]), 26)\n\n\ndef numbertosuffix(num, base=26, numerals=string.ascii_lowercase):\n \"\"\" Given a base=26 number, convert to ascii_lowercase-based name.\n Taken from https://stackoverflow.com/questions/60039572/how-to-increment-alphanumeric-number-in-python\n \"\"\"\n\n return ((num == 0) and numerals[0]) or (numbertosuffix(num // base, base, numerals).lstrip(numerals[0]) + numerals[num % base])\n", "id": "9114554", "language": "Python", "matching_score": 1.911874532699585, "max_stars_count": 0, "path": "event/names.py" }, { "content": "import pytest\nimport os.path\nfrom event import names\nfrom astropy import time\n\n\ndef test_lastname():\n name = names.get_lastname()\n assert name is None or type(name) == str\n\n\ndef test_newnamet():\n mjd = time.Time.now().mjd\n name = names.increment_name(mjd)\n\n assert name is not None\n\ndef test_increment():\n mjd = time.Time.now().mjd\n name = names.increment_name(mjd)\n name2 = names.increment_name(mjd, lastname=name)\n\n assert name != name2\n", "id": "2785014", "language": "Python", "matching_score": 0.3020239770412445, "max_stars_count": 0, "path": "tests/test_names.py" }, { "content": "\"\"\"\nSimple tests to ensure calibration files are created. Do not test validity of\nsolutions.\n\"\"\"\nimport numpy as np\nimport astropy.units as u\nimport dsacalib.routines as dr\nfrom dsacalib.utils import src\nimport dsacalib\nfrom astropy.time import Time\n\ndef __init__():\n return\n\ndef test_dsa10(tmpdir):\n datadir = '{0}/data/'.format(dsacalib.__path__[0])\n pt_dec = 49.033386*np.pi/180.\n fname = '{0}/J0542+4951_test.fits'.format(datadir)\n cal = src('3C147', '05h42m36.137916s', '49d51m07.233560s', 22.8796)\n msname = '{0}/{1}'.format(tmpdir, cal.name)\n badants = [1, 3, 4, 7, 10]\n antpos = '{0}/antpos_ITRF.txt'.format(datadir)\n refant = '2'\n dr.dsa10_cal(fname, msname, cal, pt_dec, antpos, refant, badants)\n\ndef test_3ant(tmpdir):\n M87 = src('M87','12h30m49.4233s','+12d23m28.043s',138.4870)\n obs_params = {'fname': '{0}/data/{1}_test.fits'.format(dsacalib.__path__[0],\n M87.name),\n 'msname': '{0}/{1}'.format(tmpdir, M87.name),\n 'cal': M87,\n 'utc_start': Time('2020-04-16T06:09:42')}\n ant_params = {'pt_dec': (12.391123*u.deg).to_value(u.rad),\n 'antenna_order': [9, 2, 6],\n 'refant': '2',\n 'antpos': '{0}/data/antpos_ITRF.txt'.format(\n dsacalib.__path__[0])}\n status, caltime = dr.triple_antenna_cal(obs_params, ant_params)\n assert status == 0\n\ndef test_3ant_sefd(tmpdir):\n M87 = src('M87', '12h30m49.4233s', '+12d23m28.043s', 138.4870)\n obs_params = {'fname': '{0}/data/{1}_test.fits'.format(\n dsacalib.__path__[0], M87.name),\n 'msname': '{0}/{1}'.format(tmpdir, M87.name),\n 'cal': M87,\n 'utc_start': Time('2020-04-16T06:09:42')}\n ant_params = {'pt_dec': (12.391123*u.deg).to_value(u.rad),\n 'antenna_order': [9, 2, 6],\n 'refant': '2',\n 'antpos': '{0}/data/antpos_ITRF.txt'.format(\n dsacalib.__path__[0])}\n status, caltime = dr.triple_antenna_cal(obs_params, ant_params, sefd=True)\n assert status == 0\n # Current file is too short to fit the SEFD\n # sefds, ant_gains, ant_transit_time = calculate_sefd(obs_params, ant_params)\n", "id": "1517829", "language": "Python", "matching_score": 4.251128196716309, "max_stars_count": 1, "path": "tests/test_end2end.py" }, { "content": "import numpy as np\nimport astropy.units as u\nfrom dsacalib.utils import *\nfrom dsacalib.routines import triple_antenna_cal\nimport dsautils.calstatus as cs\nfrom caltools import caltools\n\n# Parameters that will need to be passed in or saved somehow \ndatadir= '/home/dsa/data/'\ncal = src('M87','12h30m49.4233s','+12d23m28.043s',138.4870)\ncaltbl = caltools.list_calibrators(cal.ra,cal.dec,\n extent_info=True,radius=1/60)['NVSS']\ncal.pa = caltbl[0]['position_angle']\ncal.min_axis = caltbl[0]['minor_axis']\ncal.maj_axis = caltbl[0]['major_axis']\n\nobs_params = {'fname':'{0}/M87_1.fits'.format(datadir),\n 'msname':'M87_1',\n 'cal':cal,\n 'utc_start':Time('2020-04-16T06:09:42')}\n\nant_params = {'pt_dec':cal.dec.to_value(u.rad),\n 'antenna_order':[9,2,6],\n 'refant':'2',\n 'antpos':'/home/dsa/data/antpos_ITRF.txt'}\n\nptoffsets = {'dracosdec':(np.array([[0.61445538, 0.54614568], [0.23613347, 0.31217943], [0.24186434, 0.20372287]])*u.deg).to_value(u.rad),\n 'rdec':(12.39*u.deg).to_value(u.rad),\n 'ddec':(0*u.deg).to_value(u.rad)}\n\nstatus,caltime = triple_antenna_cal(obs_params,ant_params,show_plots=False,throw_exceptions=True)\n\nif status > 0:\n print('Errors during calibration: {0}'.format(cs.decode(status)))\n\n", "id": "8887465", "language": "Python", "matching_score": 0.7863237857818604, "max_stars_count": 1, "path": "scripts/triple_antenna_cal.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Tests for calfits object\n\n\"\"\"\nimport pytest\nimport os\nimport numpy as np\nfrom astropy.io import fits\n\nfrom pyuvdata import UVCal\nimport pyuvdata.tests as uvtest\nfrom pyuvdata.data import DATA_PATH\nimport pyuvdata.utils as uvutils\n\n\ndef test_readwriteread(tmp_path):\n \"\"\"\n Omnical fits loopback test.\n\n Read in calfits file, write out new calfits file, read back in and check for\n object equality.\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(testfile)\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n\n return\n\n\ndef test_readwriteread_no_freq_range(tmp_path):\n # test without freq_range parameter\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_omnical.fits\")\n\n cal_in.read_calfits(testfile)\n cal_in.freq_range = None\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n\n return\n\n\ndef test_readwriteread_delays(tmp_path):\n \"\"\"\n Read-Write-Read test with a fits calibration files containing delays.\n\n Read in uvfits file, write out new uvfits file, read back in and check for\n object equality\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\")\n write_file = str(tmp_path / \"outtest_firstcal.fits\")\n cal_in.read_calfits(testfile)\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n del cal_in\n del cal_out\n\n\ndef test_error_unknown_cal_type(tmp_path):\n \"\"\"\n Test an error is raised when writing an unknown cal type.\n \"\"\"\n cal_in = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\")\n write_file = str(tmp_path / \"outtest_firstcal.fits\")\n cal_in.read_calfits(testfile)\n\n cal_in._set_unknown_cal_type()\n with pytest.raises(ValueError, match=\"unknown calibration type\"):\n cal_in.write_calfits(write_file, run_check=False, clobber=True)\n\n return\n\n\[email protected](\n \"header_dict,error_msg\",\n [\n ({\"flag\": \"CDELT2\"}, \"Jones values are different in FLAGS\"),\n ({\"flag\": \"CDELT3\"}, \"Time values are different in FLAGS\"),\n ({\"flag\": \"CRVAL5\"}, \"Spectral window values are different in FLAGS\"),\n ({\"totqual\": \"CDELT1\"}, \"Jones values are different in TOTQLTY\"),\n ({\"totqual\": \"CDELT2\"}, \"Time values are different in TOTQLTY\"),\n ({\"totqual\": \"CRVAL4\"}, \"Spectral window values are different in TOTQLTY\"),\n ],\n)\ndef test_fits_header_errors_delay(tmp_path, header_dict, error_msg):\n # change values for various axes in flag and total quality hdus to not\n # match primary hdu\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\")\n write_file = str(tmp_path / \"outtest_firstcal.fits\")\n write_file2 = str(tmp_path / \"outtest_firstcal2.fits\")\n\n cal_in.read_calfits(testfile)\n\n # Create filler jones info\n cal_in.jones_array = np.array([-5, -6, -7, -8])\n cal_in.Njones = 4\n cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)\n cal_in.delay_array = np.ones(\n cal_in._delay_array.expected_shape(cal_in), dtype=np.float64\n )\n cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))\n\n # add total_quality_array so that can be tested as well\n cal_in.total_quality_array = np.zeros(\n cal_in._total_quality_array.expected_shape(cal_in)\n )\n\n # write file\n cal_in.write_calfits(write_file, clobber=True)\n\n unit = list(header_dict.keys())[0]\n keyword = header_dict[unit]\n\n fname = fits.open(write_file)\n data = fname[0].data\n primary_hdr = fname[0].header\n hdunames = uvutils._fits_indexhdus(fname)\n ant_hdu = fname[hdunames[\"ANTENNAS\"]]\n flag_hdu = fname[hdunames[\"FLAGS\"]]\n flag_hdr = flag_hdu.header\n totqualhdu = fname[hdunames[\"TOTQLTY\"]]\n totqualhdr = totqualhdu.header\n\n if unit == \"flag\":\n flag_hdr[keyword] *= 2\n elif unit == \"totqual\":\n totqualhdr[keyword] *= 2\n\n prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)\n hdulist = fits.HDUList([prihdu, ant_hdu])\n flag_hdu = fits.ImageHDU(data=flag_hdu.data, header=flag_hdr)\n hdulist.append(flag_hdu)\n totqualhdu = fits.ImageHDU(data=totqualhdu.data, header=totqualhdr)\n hdulist.append(totqualhdu)\n\n hdulist.writeto(write_file2, overwrite=True)\n hdulist.close()\n\n with pytest.raises(ValueError, match=error_msg):\n cal_out.read_calfits(write_file2)\n\n return\n\n\[email protected](\n \"header_dict,error_msg\",\n [\n ({\"totqual\": \"CDELT1\"}, \"Jones values are different in TOTQLTY\"),\n ({\"totqual\": \"CDELT2\"}, \"Time values are different in TOTQLTY\"),\n ({\"totqual\": \"CDELT3\"}, \"Frequency values are different in TOTQLTY\"),\n ({\"totqual\": \"CRVAL4\"}, \"Spectral window values are different in TOTQLTY\"),\n ],\n)\ndef test_fits_header_errors_gain(tmp_path, header_dict, error_msg):\n # repeat for gain type file\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_omnical.fits\")\n write_file2 = str(tmp_path / \"outtest_omnical2.fits\")\n cal_in.read_calfits(testfile)\n\n # Create filler jones info\n cal_in.jones_array = np.array([-5, -6, -7, -8])\n cal_in.Njones = 4\n cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)\n cal_in.gain_array = np.ones(\n cal_in._gain_array.expected_shape(cal_in), dtype=np.complex64\n )\n cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))\n\n # add total_quality_array so that can be tested as well\n cal_in.total_quality_array = np.zeros(\n cal_in._total_quality_array.expected_shape(cal_in)\n )\n\n # write file\n cal_in.write_calfits(write_file, clobber=True)\n\n unit = list(header_dict.keys())[0]\n keyword = header_dict[unit]\n\n fname = fits.open(write_file)\n data = fname[0].data\n primary_hdr = fname[0].header\n hdunames = uvutils._fits_indexhdus(fname)\n ant_hdu = fname[hdunames[\"ANTENNAS\"]]\n totqualhdu = fname[hdunames[\"TOTQLTY\"]]\n totqualhdr = totqualhdu.header\n\n if unit == \"totqual\":\n totqualhdr[keyword] *= 2\n\n prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)\n hdulist = fits.HDUList([prihdu, ant_hdu])\n totqualhdu = fits.ImageHDU(data=totqualhdu.data, header=totqualhdr)\n hdulist.append(totqualhdu)\n\n hdulist.writeto(write_file2, overwrite=True)\n hdulist.close()\n\n with pytest.raises(ValueError, match=error_msg):\n cal_out.read_calfits(write_file2)\n\n return\n\n\ndef test_extra_keywords_boolean(tmp_path):\n cal_in = UVCal()\n cal_out = UVCal()\n calfits_file = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n testfile = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(calfits_file)\n\n # check handling of boolean keywords\n cal_in.extra_keywords[\"bool\"] = True\n cal_in.extra_keywords[\"bool2\"] = False\n cal_in.write_calfits(testfile, clobber=True)\n cal_out.read_calfits(testfile)\n\n assert cal_in == cal_out\n\n return\n\n\ndef test_extra_keywords_int(tmp_path):\n cal_in = UVCal()\n cal_out = UVCal()\n calfits_file = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n testfile = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(calfits_file)\n\n # check handling of int-like keywords\n cal_in.extra_keywords[\"int1\"] = np.int(5)\n cal_in.extra_keywords[\"int2\"] = 7\n cal_in.write_calfits(testfile, clobber=True)\n cal_out.read_calfits(testfile)\n\n assert cal_in == cal_out\n\n return\n\n\ndef test_extra_keywords_float(tmp_path):\n cal_in = UVCal()\n cal_out = UVCal()\n calfits_file = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n testfile = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(calfits_file)\n\n # check handling of float-like keywords\n cal_in.extra_keywords[\"float1\"] = np.int64(5.3)\n cal_in.extra_keywords[\"float2\"] = 6.9\n cal_in.write_calfits(testfile, clobber=True)\n cal_out.read_calfits(testfile)\n\n assert cal_in == cal_out\n\n return\n\n\ndef test_extra_keywords_complex(tmp_path):\n cal_in = UVCal()\n cal_out = UVCal()\n calfits_file = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n testfile = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(calfits_file)\n\n # check handling of complex-like keywords\n cal_in.extra_keywords[\"complex1\"] = np.complex64(5.3 + 1.2j)\n cal_in.extra_keywords[\"complex2\"] = 6.9 + 4.6j\n cal_in.write_calfits(testfile, clobber=True)\n cal_out.read_calfits(testfile)\n\n assert cal_in == cal_out\n\n return\n\n\ndef test_extra_keywords_comment(tmp_path):\n cal_in = UVCal()\n cal_out = UVCal()\n calfits_file = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n testfile = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(calfits_file)\n\n # check handling of comment keywords\n cal_in.extra_keywords[\"comment\"] = (\n \"this is a very long comment that will \"\n \"be broken into several lines\\nif \"\n \"everything works properly.\"\n )\n cal_in.write_calfits(testfile, clobber=True)\n cal_out.read_calfits(testfile)\n\n assert cal_in == cal_out\n\n return\n\n\[email protected](\n \"ex_val,error_msg\",\n [\n ({\"testdict\": {\"testkey\": 23}}, \"Extra keyword testdict is of\"),\n ({\"testlist\": [12, 14, 90]}, \"Extra keyword testlist is of\"),\n ({\"testarr\": np.array([12, 14, 90])}, \"Extra keyword testarr is of\"),\n ],\n)\ndef test_extra_keywords_errors(tmp_path, ex_val, error_msg):\n cal_in = UVCal()\n calfits_file = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n testfile = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(calfits_file)\n\n # check for warnings & errors with extra_keywords that are dicts, lists or arrays\n keyword = list(ex_val.keys())[0]\n val = ex_val[keyword]\n cal_in.extra_keywords[keyword] = val\n with uvtest.check_warnings(\n UserWarning, f\"{keyword} in extra_keywords is a list, array or dict\"\n ):\n cal_in.check()\n with pytest.raises(TypeError, match=error_msg):\n cal_in.write_calfits(testfile, run_check=False)\n\n return\n\n\ndef test_extra_keywords_warnings(tmp_path):\n cal_in = UVCal()\n calfits_file = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n testfile = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(calfits_file)\n\n # check for warnings with extra_keywords keys that are too long\n cal_in.extra_keywords[\"test_long_key\"] = True\n with uvtest.check_warnings(\n UserWarning, \"key test_long_key in extra_keywords is longer than 8 characters\"\n ):\n cal_in.check()\n with uvtest.check_warnings(\n UserWarning, \"key test_long_key in extra_keywords is longer than 8 characters\"\n ):\n cal_in.write_calfits(testfile, run_check=False, clobber=True)\n\n return\n\n\ndef test_input_flag_array_gain(tmp_path):\n \"\"\"\n Test when data file has input flag array.\n\n Currently we do not have a testfile, so we will artifically create one\n and check for internal consistency.\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_input_flags.fits\")\n cal_in.read_calfits(testfile)\n cal_in.input_flag_array = np.zeros(\n cal_in._input_flag_array.expected_shape(cal_in), dtype=bool\n )\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n\n\ndef test_input_flag_array_delay(tmp_path):\n \"\"\"\n Test when data file has input flag array.\n\n Currently we do not have a testfile, so we will artifically create one\n and check for internal consistency.\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\")\n write_file = str(tmp_path / \"outtest_input_flags.fits\")\n cal_in.read_calfits(testfile)\n cal_in.input_flag_array = np.zeros(\n cal_in._input_flag_array.expected_shape(cal_in), dtype=bool\n )\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n\n\ndef test_jones_gain(tmp_path):\n \"\"\"\n Test when data file has more than one element in Jones matrix.\n\n Currently we do not have a testfile, so we will artifically create one\n and check for internal consistency.\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_jones.fits\")\n cal_in.read_calfits(testfile)\n\n # Create filler jones info\n cal_in.jones_array = np.array([-5, -6, -7, -8])\n cal_in.Njones = 4\n cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)\n cal_in.gain_array = np.ones(\n cal_in._gain_array.expected_shape(cal_in), dtype=np.complex64\n )\n cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))\n\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n\n\ndef test_jones_delay(tmp_path):\n \"\"\"\n Test when data file has more than one element in Jones matrix.\n\n Currently we do not have a testfile, so we will artifically create one\n and check for internal consistency.\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\")\n write_file = str(tmp_path / \"outtest_jones.fits\")\n cal_in.read_calfits(testfile)\n\n # Create filler jones info\n cal_in.jones_array = np.array([-5, -6, -7, -8])\n cal_in.Njones = 4\n cal_in.flag_array = np.zeros(cal_in._flag_array.expected_shape(cal_in), dtype=bool)\n cal_in.delay_array = np.ones(\n cal_in._delay_array.expected_shape(cal_in), dtype=np.float64\n )\n cal_in.quality_array = np.zeros(cal_in._quality_array.expected_shape(cal_in))\n\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n\n\ndef test_readwriteread_total_quality_array(tmp_path):\n \"\"\"\n Test when data file has a total quality array.\n\n Currently we have no such file, so we will artificially create one and\n check for internal consistency.\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_total_quality_array.fits\")\n cal_in.read_calfits(testfile)\n\n # Create filler total quality array\n cal_in.total_quality_array = np.zeros(\n cal_in._total_quality_array.expected_shape(cal_in)\n )\n\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n del cal_in\n del cal_out\n\n # also test delay-type calibrations\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\")\n write_file = str(tmp_path / \"outtest_total_quality_array_delays.fits\")\n cal_in.read_calfits(testfile)\n\n cal_in.total_quality_array = np.zeros(\n cal_in._total_quality_array.expected_shape(cal_in)\n )\n\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n del cal_in\n del cal_out\n\n\ndef test_total_quality_array_size():\n \"\"\"\n Test that total quality array defaults to the proper size\n \"\"\"\n\n cal_in = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n cal_in.read_calfits(testfile)\n\n # Create filler total quality array\n cal_in.total_quality_array = np.zeros(\n cal_in._total_quality_array.expected_shape(cal_in)\n )\n\n proper_shape = (cal_in.Nspws, cal_in.Nfreqs, cal_in.Ntimes, cal_in.Njones)\n assert cal_in.total_quality_array.shape == proper_shape\n del cal_in\n\n # also test delay-type calibrations\n cal_in = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\")\n cal_in.read_calfits(testfile)\n\n cal_in.total_quality_array = np.zeros(\n cal_in._total_quality_array.expected_shape(cal_in)\n )\n\n proper_shape = (cal_in.Nspws, 1, cal_in.Ntimes, cal_in.Njones)\n assert cal_in.total_quality_array.shape == proper_shape\n del cal_in\n\n\ndef test_write_time_precision(tmp_path):\n \"\"\"\n Test that times are being written to appropriate precision (see issue 311).\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(testfile)\n # overwrite time array to break old code\n dt = cal_in.integration_time / (24.0 * 60.0 * 60.0)\n cal_in.time_array = dt * np.arange(cal_in.Ntimes)\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n\n\ndef test_read_noversion_history(tmp_path):\n \"\"\"\n Test that version info gets added to the history if it's missing\n \"\"\"\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_omnical.fits\")\n write_file2 = str(tmp_path / \"outtest_omnical2.fits\")\n cal_in.read_calfits(testfile)\n\n cal_in.write_calfits(write_file, clobber=True)\n\n fname = fits.open(write_file)\n data = fname[0].data\n primary_hdr = fname[0].header\n hdunames = uvutils._fits_indexhdus(fname)\n ant_hdu = fname[hdunames[\"ANTENNAS\"]]\n\n primary_hdr[\"HISTORY\"] = \"\"\n\n prihdu = fits.PrimaryHDU(data=data, header=primary_hdr)\n hdulist = fits.HDUList([prihdu, ant_hdu])\n\n hdulist.writeto(write_file2, overwrite=True)\n hdulist.close()\n\n cal_out.read_calfits(write_file2)\n assert cal_in == cal_out\n\n\ndef test_write_freq_spacing_not_channel_width(tmp_path):\n cal_in = UVCal()\n cal_out = UVCal()\n testfile = os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\")\n write_file = str(tmp_path / \"outtest_omnical.fits\")\n cal_in.read_calfits(testfile)\n\n # select every other frequency -- then evenly spaced but doesn't match channel width\n cal_in.select(freq_chans=np.arange(0, 10, 2))\n\n cal_in.write_calfits(write_file, clobber=True)\n cal_out.read_calfits(write_file)\n assert cal_in == cal_out\n", "id": "4248955", "language": "Python", "matching_score": 6.082428455352783, "max_stars_count": 0, "path": "pyuvdata/uvcal/tests/test_calfits.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\"\"\"Class for reading and writing calibration FITS files.\"\"\"\nimport warnings\n\nimport numpy as np\nfrom astropy.io import fits\n\nfrom .uvcal import UVCal\nfrom .. import utils as uvutils\n\n__all__ = [\"CALFITS\"]\n\n\nclass CALFITS(UVCal):\n \"\"\"\n Defines a calfits-specific class for reading and writing calfits files.\n\n This class should not be interacted with directly, instead use the read_calfits\n and write_calfits methods on the UVCal class.\n\n \"\"\"\n\n def write_calfits(\n self,\n filename,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n clobber=False,\n ):\n \"\"\"\n Write the data to a calfits file.\n\n Parameters\n ----------\n filename : str\n The calfits file to write to.\n run_check : bool\n Option to check for the existence and proper shapes of\n parameters before writing the file.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of\n parameters before writing the file.\n clobber : bool\n Option to overwrite the filename if the file already exists.\n\n \"\"\"\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n if self.Nfreqs > 1:\n freq_spacing = self.freq_array[0, 1:] - self.freq_array[0, :-1]\n if not np.isclose(\n np.min(freq_spacing),\n np.max(freq_spacing),\n rtol=self._freq_array.tols[0],\n atol=self._freq_array.tols[1],\n ):\n raise ValueError(\n \"The frequencies are not evenly spaced (probably \"\n \"because of a select operation). The calfits format \"\n \"does not support unevenly spaced frequencies.\"\n )\n if np.isclose(freq_spacing[0], self.channel_width):\n freq_spacing = self.channel_width\n else:\n rounded_spacing = np.around(\n freq_spacing, int(np.ceil(np.log10(self._freq_array.tols[1]) * -1))\n )\n freq_spacing = rounded_spacing[0]\n else:\n freq_spacing = self.channel_width\n\n if self.Ntimes > 1:\n time_spacing = np.diff(self.time_array)\n if not np.isclose(\n np.min(time_spacing),\n np.max(time_spacing),\n rtol=self._time_array.tols[0],\n atol=self._time_array.tols[1],\n ):\n raise ValueError(\n \"The times are not evenly spaced (probably \"\n \"because of a select operation). The calfits format \"\n \"does not support unevenly spaced times.\"\n )\n if np.isclose(time_spacing[0], self.integration_time / (24.0 * 60.0 ** 2)):\n time_spacing = self.integration_time / (24.0 * 60.0 ** 2)\n else:\n rounded_spacing = np.around(\n time_spacing,\n int(\n np.ceil(np.log10(self._time_array.tols[1] / self.Ntimes) * -1)\n + 1\n ),\n )\n time_spacing = rounded_spacing[0]\n else:\n time_spacing = self.integration_time / (24.0 * 60.0 ** 2)\n\n if self.Njones > 1:\n jones_spacing = np.diff(self.jones_array)\n if np.min(jones_spacing) < np.max(jones_spacing):\n raise ValueError(\n \"The jones values are not evenly spaced.\"\n \"The calibration fits file format does not\"\n \" support unevenly spaced polarizations.\"\n )\n jones_spacing = jones_spacing[0]\n else:\n jones_spacing = -1\n\n prihdr = fits.Header()\n if self.total_quality_array is not None:\n totqualhdr = fits.Header()\n totqualhdr[\"EXTNAME\"] = \"TOTQLTY\"\n if self.cal_type != \"gain\":\n sechdr = fits.Header()\n sechdr[\"EXTNAME\"] = \"FLAGS\"\n # Conforming to fits format\n prihdr[\"SIMPLE\"] = True\n prihdr[\"TELESCOP\"] = self.telescope_name\n prihdr[\"GNCONVEN\"] = self.gain_convention\n prihdr[\"CALTYPE\"] = self.cal_type\n prihdr[\"CALSTYLE\"] = self.cal_style\n if self.sky_field is not None:\n prihdr[\"FIELD\"] = self.sky_field\n if self.sky_catalog is not None:\n prihdr[\"CATALOG\"] = self.sky_catalog\n if self.ref_antenna_name is not None:\n prihdr[\"REFANT\"] = self.ref_antenna_name\n if self.Nsources is not None:\n prihdr[\"NSOURCES\"] = self.Nsources\n if self.baseline_range is not None:\n prihdr[\"BL_RANGE\"] = (\n \"[\" + \", \".join([str(b) for b in self.baseline_range]) + \"]\"\n )\n if self.diffuse_model is not None:\n prihdr[\"DIFFUSE\"] = self.diffuse_model\n if self.gain_scale is not None:\n prihdr[\"GNSCALE\"] = self.gain_scale\n prihdr[\"INTTIME\"] = self.integration_time\n prihdr[\"CHWIDTH\"] = self.channel_width\n prihdr[\"XORIENT\"] = self.x_orientation\n if self.cal_type == \"delay\":\n prihdr[\"FRQRANGE\"] = \",\".join(map(str, self.freq_range))\n elif self.freq_range is not None:\n prihdr[\"FRQRANGE\"] = \",\".join(map(str, self.freq_range))\n prihdr[\"TMERANGE\"] = \",\".join(map(str, self.time_range))\n\n if self.observer:\n prihdr[\"OBSERVER\"] = self.observer\n if self.git_origin_cal:\n prihdr[\"ORIGCAL\"] = self.git_origin_cal\n if self.git_hash_cal:\n prihdr[\"HASHCAL\"] = self.git_hash_cal\n\n if self.cal_type == \"unknown\":\n raise ValueError(\n \"unknown calibration type. Do not know how to \" \"store parameters\"\n )\n\n # Define primary header values\n # Arrays have (column-major) dimensions of\n # [Nimages, Njones, Ntimes, Nfreqs, Nspw, Nantennas]\n # For a \"delay\"-type calibration, Nfreqs is a shallow axis\n\n # set the axis for number of arrays\n prihdr[\"CTYPE1\"] = (\"Narrays\", \"Number of image arrays.\")\n prihdr[\"CUNIT1\"] = \"Integer\"\n prihdr[\"CDELT1\"] = 1\n prihdr[\"CRPIX1\"] = 1\n prihdr[\"CRVAL1\"] = 1\n\n # Jones axis\n prihdr[\"CTYPE2\"] = (\"JONES\", \"Jones matrix array\")\n prihdr[\"CUNIT2\"] = (\"Integer\", \"representative integer for polarization.\")\n prihdr[\"CRPIX2\"] = 1\n prihdr[\"CRVAL2\"] = self.jones_array[0] # always start with first jones.\n prihdr[\"CDELT2\"] = jones_spacing\n\n # time axis\n prihdr[\"CTYPE3\"] = (\"TIME\", \"Time axis.\")\n prihdr[\"CUNIT3\"] = (\"JD\", \"Time in julian date format\")\n prihdr[\"CRPIX3\"] = 1\n prihdr[\"CRVAL3\"] = self.time_array[0]\n prihdr[\"CDELT3\"] = time_spacing\n\n # freq axis\n prihdr[\"CTYPE4\"] = (\"FREQS\", \"Frequency.\")\n prihdr[\"CUNIT4\"] = \"Hz\"\n prihdr[\"CRPIX4\"] = 1\n prihdr[\"CRVAL4\"] = self.freq_array[0][0]\n prihdr[\"CDELT4\"] = freq_spacing\n\n # spw axis: number of spectral windows\n prihdr[\"CTYPE5\"] = (\"IF\", \"Spectral window number.\")\n prihdr[\"CUNIT5\"] = \"Integer\"\n prihdr[\"CRPIX5\"] = 1\n prihdr[\"CRVAL5\"] = 1\n prihdr[\"CDELT5\"] = 1\n\n # antenna axis\n prihdr[\"CTYPE6\"] = (\"ANTAXIS\", \"See ANTARR in ANTENNA extension for values.\")\n prihdr[\"CUNIT6\"] = \"Integer\"\n prihdr[\"CRPIX6\"] = 1\n prihdr[\"CRVAL6\"] = 1\n prihdr[\"CDELT6\"] = -1\n\n # end standard keywords; begin user-defined keywords\n for key, value in self.extra_keywords.items():\n # header keywords have to be 8 characters or less\n if len(str(key)) > 8:\n warnings.warn(\n \"key {key} in extra_keywords is longer than 8 \"\n \"characters. It will be truncated to 8 as required \"\n \"by the calfits file format.\".format(key=key)\n )\n keyword = key[:8].upper()\n if isinstance(value, (dict, list, np.ndarray)):\n raise TypeError(\n \"Extra keyword {keyword} is of {keytype}. \"\n \"Only strings and numbers are \"\n \"supported in calfits.\".format(keyword=key, keytype=type(value))\n )\n\n if keyword == \"COMMENT\":\n for line in value.splitlines():\n prihdr.add_comment(line)\n else:\n prihdr[keyword] = value\n\n for line in self.history.splitlines():\n prihdr.add_history(line)\n\n # define data section based on calibration type\n if self.cal_type == \"gain\":\n if self.input_flag_array is not None:\n pridata = np.concatenate(\n [\n self.gain_array.real[:, :, :, :, :, np.newaxis],\n self.gain_array.imag[:, :, :, :, :, np.newaxis],\n self.flag_array[:, :, :, :, :, np.newaxis],\n self.input_flag_array[:, :, :, :, :, np.newaxis],\n self.quality_array[:, :, :, :, :, np.newaxis],\n ],\n axis=-1,\n )\n else:\n pridata = np.concatenate(\n [\n self.gain_array.real[:, :, :, :, :, np.newaxis],\n self.gain_array.imag[:, :, :, :, :, np.newaxis],\n self.flag_array[:, :, :, :, :, np.newaxis],\n self.quality_array[:, :, :, :, :, np.newaxis],\n ],\n axis=-1,\n )\n\n elif self.cal_type == \"delay\":\n pridata = np.concatenate(\n [\n self.delay_array[:, :, :, :, :, np.newaxis],\n self.quality_array[:, :, :, :, :, np.newaxis],\n ],\n axis=-1,\n )\n\n # Set headers for the second hdu containing the flags. Only in\n # cal_type=delay\n # Can't put in primary header because frequency axis is shallow there,\n # but not here\n # Header values are the same as the primary header\n sechdr[\"CTYPE1\"] = (\"Narrays\", \"Number of image arrays.\")\n sechdr[\"CUNIT1\"] = \"Integer\"\n sechdr[\"CRPIX1\"] = 1\n sechdr[\"CRVAL1\"] = 1\n sechdr[\"CDELT1\"] = 1\n\n sechdr[\"CTYPE2\"] = (\"JONES\", \"Jones matrix array\")\n sechdr[\"CUNIT2\"] = (\"Integer\", \"representative integer for polarization.\")\n sechdr[\"CRPIX2\"] = 1\n sechdr[\"CRVAL2\"] = self.jones_array[0] # always start with first jones.\n sechdr[\"CDELT2\"] = jones_spacing\n\n sechdr[\"CTYPE3\"] = (\"TIME\", \"Time axis.\")\n sechdr[\"CUNIT3\"] = (\"JD\", \"Time in julian date format\")\n sechdr[\"CRPIX3\"] = 1\n sechdr[\"CRVAL3\"] = self.time_array[0]\n sechdr[\"CDELT3\"] = time_spacing\n\n sechdr[\"CTYPE4\"] = (\"FREQS\", \"Valid frequencies to apply delay.\")\n sechdr[\"CUNIT4\"] = \"Hz\"\n sechdr[\"CRPIX4\"] = 1\n sechdr[\"CRVAL4\"] = self.freq_array[0][0]\n sechdr[\"CDELT4\"] = freq_spacing\n\n sechdr[\"CTYPE5\"] = (\"IF\", \"Spectral window number.\")\n sechdr[\"CUNIT5\"] = \"Integer\"\n sechdr[\"CRPIX5\"] = 1\n sechdr[\"CRVAL5\"] = 1\n sechdr[\"CDELT5\"] = 1\n\n sechdr[\"CTYPE6\"] = (\n \"ANTAXIS\",\n \"See ANTARR in ANTENNA extension for values.\",\n )\n\n # convert from bool to int64; undone on read\n if self.input_flag_array is not None:\n secdata = np.concatenate(\n [\n self.flag_array.astype(np.int64)[:, :, :, :, :, np.newaxis],\n self.input_flag_array.astype(np.int64)[\n :, :, :, :, :, np.newaxis\n ],\n ],\n axis=-1,\n )\n else:\n secdata = self.flag_array.astype(np.int64)[:, :, :, :, :, np.newaxis]\n\n if self.total_quality_array is not None:\n # Set headers for the hdu containing the total_quality_array\n # No antenna axis, so we have [Njones, Ntime, Nfreq, Nspws]\n totqualhdr[\"CTYPE1\"] = (\"JONES\", \"Jones matrix array\")\n totqualhdr[\"CUNIT1\"] = (\n \"Integer\",\n \"representative integer for polarization.\",\n )\n totqualhdr[\"CRPIX1\"] = 1\n totqualhdr[\"CRVAL1\"] = self.jones_array[0] # always start with first jones.\n totqualhdr[\"CDELT1\"] = jones_spacing\n\n totqualhdr[\"CTYPE2\"] = (\"TIME\", \"Time axis.\")\n totqualhdr[\"CUNIT2\"] = (\"JD\", \"Time in julian date format\")\n totqualhdr[\"CRPIX2\"] = 1\n totqualhdr[\"CRVAL2\"] = self.time_array[0]\n totqualhdr[\"CDELT2\"] = time_spacing\n\n totqualhdr[\"CTYPE3\"] = (\"FREQS\", \"Valid frequencies to apply delay.\")\n totqualhdr[\"CUNIT3\"] = \"Hz\"\n totqualhdr[\"CRPIX3\"] = 1\n totqualhdr[\"CRVAL3\"] = self.freq_array[0][0]\n totqualhdr[\"CDELT3\"] = freq_spacing\n\n # spws axis: number of spectral windows\n totqualhdr[\"CTYPE4\"] = (\"IF\", \"Spectral window number.\")\n totqualhdr[\"CUNIT4\"] = \"Integer\"\n totqualhdr[\"CRPIX4\"] = 1\n totqualhdr[\"CRVAL4\"] = 1\n totqualhdr[\"CDELT4\"] = 1\n totqualdata = self.total_quality_array\n\n # make HDUs\n prihdu = fits.PrimaryHDU(data=pridata, header=prihdr)\n\n # ant HDU\n col1 = fits.Column(name=\"ANTNAME\", format=\"8A\", array=self.antenna_names)\n col2 = fits.Column(name=\"ANTINDEX\", format=\"D\", array=self.antenna_numbers)\n if self.Nants_data == self.Nants_telescope:\n col3 = fits.Column(name=\"ANTARR\", format=\"D\", array=self.ant_array)\n else:\n # ant_array is shorter than the other columns.\n # Pad the extra rows with -1s. Need to undo on read.\n nants_add = self.Nants_telescope - self.Nants_data\n ant_array_use = np.append(\n self.ant_array, np.zeros(nants_add, dtype=np.int) - 1\n )\n col3 = fits.Column(name=\"ANTARR\", format=\"D\", array=ant_array_use)\n cols = fits.ColDefs([col1, col2, col3])\n ant_hdu = fits.BinTableHDU.from_columns(cols)\n ant_hdu.header[\"EXTNAME\"] = \"ANTENNAS\"\n\n hdulist = fits.HDUList([prihdu, ant_hdu])\n\n if self.cal_type != \"gain\":\n sechdu = fits.ImageHDU(data=secdata, header=sechdr)\n hdulist.append(sechdu)\n\n if self.total_quality_array is not None:\n totqualhdu = fits.ImageHDU(data=totqualdata, header=totqualhdr)\n hdulist.append(totqualhdu)\n\n hdulist.writeto(filename, overwrite=clobber)\n hdulist.close()\n\n def read_calfits(\n self, filename, run_check=True, check_extra=True, run_check_acceptability=True\n ):\n \"\"\"\n Read data from a calfits file.\n\n Parameters\n ----------\n filename : str\n The calfits file to read from.\n run_check : bool\n Option to check for the existence and proper shapes of\n parameters after reading in the file.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of\n parameters after reading in the file.\n\n \"\"\"\n with fits.open(filename) as fname:\n data = fname[0].data\n hdr = fname[0].header.copy()\n hdunames = uvutils._fits_indexhdus(fname)\n\n anthdu = fname[hdunames[\"ANTENNAS\"]]\n self.Nants_telescope = anthdu.header[\"NAXIS2\"]\n antdata = anthdu.data\n self.antenna_names = np.array(list(map(str, antdata[\"ANTNAME\"])))\n self.antenna_numbers = np.array(list(map(int, antdata[\"ANTINDEX\"])))\n self.ant_array = np.array(list(map(int, antdata[\"ANTARR\"])))\n if np.min(self.ant_array) < 0:\n # ant_array was shorter than the other columns, so it was\n # padded with -1s.\n # Remove the padded entries.\n self.ant_array = self.ant_array[np.where(self.ant_array >= 0)[0]]\n\n self.channel_width = hdr.pop(\"CHWIDTH\")\n self.integration_time = hdr.pop(\"INTTIME\")\n self.telescope_name = hdr.pop(\"TELESCOP\")\n self.history = str(hdr.get(\"HISTORY\", \"\"))\n\n if not uvutils._check_history_version(\n self.history, self.pyuvdata_version_str\n ):\n if not self.history.endswith(\"\\n\"):\n self.history += \"\\n\"\n\n self.history += self.pyuvdata_version_str\n\n self.time_range = list(map(float, hdr.pop(\"TMERANGE\").split(\",\")))\n self.gain_convention = hdr.pop(\"GNCONVEN\")\n self.gain_scale = hdr.pop(\"GNSCALE\", None)\n self.x_orientation = hdr.pop(\"XORIENT\")\n self.cal_type = hdr.pop(\"CALTYPE\")\n if self.cal_type == \"delay\":\n self.freq_range = list(map(float, hdr.pop(\"FRQRANGE\").split(\",\")))\n else:\n if \"FRQRANGE\" in hdr:\n self.freq_range = list(map(float, hdr.pop(\"FRQRANGE\").split(\",\")))\n\n self.cal_style = hdr.pop(\"CALSTYLE\")\n self.sky_field = hdr.pop(\"FIELD\", None)\n self.sky_catalog = hdr.pop(\"CATALOG\", None)\n self.ref_antenna_name = hdr.pop(\"REFANT\", None)\n self.Nsources = hdr.pop(\"NSOURCES\", None)\n bl_range_string = hdr.pop(\"BL_RANGE\", None)\n if bl_range_string is not None:\n self.baseline_range = [\n float(b) for b in bl_range_string.strip(\"[\").strip(\"]\").split(\",\")\n ]\n self.diffuse_model = hdr.pop(\"DIFFUSE\", None)\n\n self.observer = hdr.pop(\"OBSERVER\", None)\n self.git_origin_cal = hdr.pop(\"ORIGCAL\", None)\n self.git_hash_cal = hdr.pop(\"HASHCAL\", None)\n\n # generate polarization and time array for either cal_type.\n self.Njones = hdr.pop(\"NAXIS2\")\n self.jones_array = uvutils._fits_gethduaxis(fname[0], 2)\n self.Ntimes = hdr.pop(\"NAXIS3\")\n self.time_array = uvutils._fits_gethduaxis(fname[0], 3)\n\n self.Nspws = hdr.pop(\"NAXIS5\")\n # subtract 1 to be zero-indexed\n self.spw_array = uvutils._fits_gethduaxis(fname[0], 5) - 1\n\n # get data.\n if self.cal_type == \"gain\":\n self._set_gain()\n self.gain_array = data[:, :, :, :, :, 0] + 1j * data[:, :, :, :, :, 1]\n self.flag_array = data[:, :, :, :, :, 2].astype(\"bool\")\n if hdr.pop(\"NAXIS1\") == 5:\n self.input_flag_array = data[:, :, :, :, :, 3].astype(\"bool\")\n self.quality_array = data[:, :, :, :, :, 4]\n else:\n self.quality_array = data[:, :, :, :, :, 3]\n\n self.Nants_data = hdr.pop(\"NAXIS6\")\n\n # generate frequency array from primary data unit.\n self.Nfreqs = hdr.pop(\"NAXIS4\")\n self.freq_array = uvutils._fits_gethduaxis(fname[0], 4)\n self.freq_array.shape = (self.Nspws,) + self.freq_array.shape\n\n if self.cal_type == \"delay\":\n self._set_delay()\n self.Nants_data = hdr.pop(\"NAXIS6\")\n\n self.delay_array = data[:, :, :, :, :, 0]\n self.quality_array = data[:, :, :, :, :, 1]\n\n sechdu = fname[hdunames[\"FLAGS\"]]\n flag_data = sechdu.data\n if sechdu.header[\"NAXIS1\"] == 2:\n self.flag_array = flag_data[:, :, :, :, :, 0].astype(\"bool\")\n self.input_flag_array = flag_data[:, :, :, :, :, 1].astype(\"bool\")\n else:\n self.flag_array = flag_data[:, :, :, :, :, 0].astype(\"bool\")\n\n # generate frequency array from flag data unit\n # (no freq axis in primary).\n self.Nfreqs = sechdu.header[\"NAXIS4\"]\n self.freq_array = uvutils._fits_gethduaxis(sechdu, 4)\n self.freq_array.shape = (self.Nspws,) + self.freq_array.shape\n\n spw_array = uvutils._fits_gethduaxis(sechdu, 5) - 1\n\n if not np.allclose(spw_array, self.spw_array):\n raise ValueError(\n \"Spectral window values are different in FLAGS HDU than\"\n \" in primary HDU\"\n )\n\n time_array = uvutils._fits_gethduaxis(sechdu, 3)\n if not np.allclose(\n time_array,\n self.time_array,\n rtol=self._time_array.tols[0],\n atol=self._time_array.tols[0],\n ):\n raise ValueError(\n \"Time values are different in FLAGS HDU than in primary HDU\"\n )\n\n jones_array = uvutils._fits_gethduaxis(sechdu, 2)\n if not np.allclose(\n jones_array,\n self.jones_array,\n rtol=self._jones_array.tols[0],\n atol=self._jones_array.tols[0],\n ):\n raise ValueError(\n \"Jones values are different in FLAGS HDU than in primary HDU\"\n )\n\n self.extra_keywords = uvutils._get_fits_extra_keywords(hdr)\n\n # get total quality array if present\n if \"TOTQLTY\" in hdunames:\n totqualhdu = fname[hdunames[\"TOTQLTY\"]]\n self.total_quality_array = totqualhdu.data\n spw_array = uvutils._fits_gethduaxis(totqualhdu, 4) - 1\n if not np.allclose(spw_array, self.spw_array):\n raise ValueError(\n \"Spectral window values are different in \"\n \"TOTQLTY HDU than in primary HDU. primary HDU \"\n \"has {pspw}, TOTQLTY has {tspw}\".format(\n pspw=self.spw_array, tspw=spw_array\n )\n )\n\n if self.cal_type != \"delay\":\n # delay-type files won't have a freq_array\n freq_array = uvutils._fits_gethduaxis(totqualhdu, 3)\n freq_array.shape = (self.Nspws,) + freq_array.shape\n if not np.allclose(\n freq_array,\n self.freq_array,\n rtol=self._freq_array.tols[0],\n atol=self._freq_array.tols[0],\n ):\n raise ValueError(\n \"Frequency values are different in TOTQLTY HDU than\"\n \" in primary HDU\"\n )\n\n time_array = uvutils._fits_gethduaxis(totqualhdu, 2)\n if not np.allclose(\n time_array,\n self.time_array,\n rtol=self._time_array.tols[0],\n atol=self._time_array.tols[0],\n ):\n raise ValueError(\n \"Time values are different in TOTQLTY HDU than in primary HDU\"\n )\n\n jones_array = uvutils._fits_gethduaxis(totqualhdu, 1)\n if not np.allclose(\n jones_array,\n self.jones_array,\n rtol=self._jones_array.tols[0],\n atol=self._jones_array.tols[0],\n ):\n raise ValueError(\n \"Jones values are different in TOTQLTY HDU than in primary HDU\"\n )\n\n else:\n self.total_quality_array = None\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n", "id": "11328284", "language": "Python", "matching_score": 4.96517276763916, "max_stars_count": 0, "path": "pyuvdata/uvcal/calfits.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2019 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Primary container for radio interferometer flag manipulation.\"\"\"\nimport numpy as np\nimport os\nimport warnings\nimport h5py\nimport pathlib\n\nfrom ..uvbase import UVBase\nfrom .. import parameter as uvp\nfrom ..uvdata import UVData\nfrom ..uvcal import UVCal\nfrom .. import utils as uvutils\nfrom .. import telescopes as uvtel\n\n__all__ = [\"UVFlag\", \"flags2waterfall\", \"and_rows_cols\", \"lst_from_uv\"]\n\n\ndef and_rows_cols(waterfall):\n \"\"\"Perform logical and over rows and cols of a waterfall.\n\n For a 2D flag waterfall, flag pixels only if fully flagged along\n time and/or frequency\n\n Parameters\n ----------\n waterfall : 2D boolean array of shape (Ntimes, Nfreqs)\n\n Returns\n -------\n wf : 2D array\n A 2D array (size same as input) where only times/integrations\n that were fully flagged are flagged.\n\n \"\"\"\n wf = np.zeros_like(waterfall, dtype=np.bool)\n Ntimes, Nfreqs = waterfall.shape\n wf[:, (np.sum(waterfall, axis=0) / Ntimes) == 1] = True\n wf[(np.sum(waterfall, axis=1) / Nfreqs) == 1] = True\n return wf\n\n\ndef lst_from_uv(uv):\n \"\"\"Calculate the lst_array for a UVData or UVCal object.\n\n Parameters\n ----------\n uv : a UVData or UVCal object.\n Object from which lsts are calculated\n\n Returns\n -------\n lst_array: array of float\n lst_array corresponding to time_array and at telescope location.\n Units are radian.\n\n \"\"\"\n if not isinstance(uv, (UVCal, UVData)):\n raise ValueError(\n \"Function lst_from_uv can only operate on \" \"UVCal or UVData object.\"\n )\n\n tel = uvtel.get_telescope(uv.telescope_name)\n lat, lon, alt = tel.telescope_location_lat_lon_alt_degrees\n lst_array = uvutils.get_lst_for_time(uv.time_array, lat, lon, alt)\n return lst_array\n\n\ndef flags2waterfall(uv, flag_array=None, keep_pol=False):\n \"\"\"Convert a flag array to a 2D waterfall of dimensions (Ntimes, Nfreqs).\n\n Averages over baselines and polarizations (in the case of visibility data),\n or antennas and jones parameters (in case of calibrationd data).\n\n Parameters\n ----------\n uv : A UVData or UVCal object\n Object defines the times and frequencies, and supplies the\n flag_array to convert (if flag_array not specified)\n flag_array : Optional,\n flag array to convert instead of uv.flag_array.\n Must have same dimensions as uv.flag_array.\n keep_pol : bool\n Option to keep the polarization axis intact.\n\n Returns\n -------\n waterfall : 2D array or 3D array\n Waterfall of averaged flags, for example fraction of baselines\n which are flagged for every time and frequency (in case of UVData input)\n Size is (Ntimes, Nfreqs) or (Ntimes, Nfreqs, Npols).\n\n \"\"\"\n if not isinstance(uv, (UVData, UVCal)):\n raise ValueError(\n \"flags2waterfall() requires a UVData or UVCal object as \"\n \"the first argument.\"\n )\n if flag_array is None:\n flag_array = uv.flag_array\n if uv.flag_array.shape != flag_array.shape:\n raise ValueError(\"Flag array must align with UVData or UVCal object.\")\n\n if isinstance(uv, UVCal):\n if keep_pol:\n waterfall = np.swapaxes(np.mean(flag_array, axis=(0, 1)), 0, 1)\n else:\n waterfall = np.mean(flag_array, axis=(0, 1, 4)).T\n else:\n if keep_pol:\n waterfall = np.zeros((uv.Ntimes, uv.Nfreqs, uv.Npols))\n for i, t in enumerate(np.unique(uv.time_array)):\n waterfall[i, :] = np.mean(\n flag_array[uv.time_array == t, 0, :, :], axis=0\n )\n else:\n waterfall = np.zeros((uv.Ntimes, uv.Nfreqs))\n for i, t in enumerate(np.unique(uv.time_array)):\n waterfall[i, :] = np.mean(\n flag_array[uv.time_array == t, 0, :, :], axis=(0, 2)\n )\n\n return waterfall\n\n\nclass UVFlag(UVBase):\n \"\"\"Object to handle flag arrays and waterfalls for interferometric datasets.\n\n Supports reading/writing, and stores all relevant information to combine\n flags and apply to data.\n Initialization of the UVFlag object requires some parameters. Metadata is\n copied from indata object. If indata is subclass of UVData or UVCal,\n the weights_array will be set to all ones.\n Lists or tuples are iterated through, treating each entry with an\n individual UVFlag init.\n\n Parameters\n ----------\n indata : UVData, UVCal, str, pathlib.Path, list of compatible combination\n Input to initialize UVFlag object. If str, assumed to be path to previously\n saved UVFlag object. UVData and UVCal objects cannot be directly combined,\n unless waterfall is True.\n mode : {\"metric\", \"flag\"}, optional\n The mode determines whether the object has a floating point metric_array\n or a boolean flag_array.\n copy_flags : bool, optional\n Whether to copy flags from indata to new UVFlag object\n waterfall : bool, optional\n Whether to immediately initialize as a waterfall object, with flag/metric\n axes: time, frequency, polarization.\n history : str, optional\n History string to attach to object.\n label: str, optional\n String used for labeling the object (e.g. 'FM').\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after creating UVFlag object.\n check_extra : bool\n Option to check optional parameters as well as required ones (the\n default is True, meaning the optional parameters will be checked).\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n creating UVFlag object.\n\n Attributes\n ----------\n UVParameter objects :\n For full list see the UVFlag Parameters Documentation.\n (https://pyuvdata.readthedocs.io/en/latest/uvflag_parameters.html)\n Some are always required, some are required for certain phase_types\n and others are always optional.\n\n\n \"\"\"\n\n def __init__(\n self,\n indata=None,\n mode=\"metric\",\n copy_flags=False,\n waterfall=False,\n history=\"\",\n label=\"\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Initialize the object.\"\"\"\n # standard angle tolerance: 10 mas in radians.\n # Should perhaps be decreased to 1 mas in the future\n radian_tol = 10 * 2 * np.pi * 1e-3 / (60.0 * 60.0 * 360.0)\n\n desc = (\n \"The mode determines whether the object has a \"\n \"floating point metric_array or a boolean flag_array. \"\n 'Options: {\"metric\", \"flag\"}. Default is \"metric\".'\n )\n self._mode = uvp.UVParameter(\n \"mode\",\n description=desc,\n form=\"str\",\n expected_type=str,\n acceptable_vals=[\"metric\", \"flag\"],\n )\n\n desc = (\n \"String used for labeling the object (e.g. 'FM'). \"\n \"Default is empty string.\"\n )\n self._label = uvp.UVParameter(\n \"label\", description=desc, form=\"str\", expected_type=str\n )\n\n desc = (\n \"The type of object defines the form of some arrays \"\n \" and also how metrics/flags are combined. \"\n 'Accepted types:\"waterfall\", \"baseline\", \"antenna\"'\n )\n self._type = uvp.UVParameter(\n \"type\",\n description=desc,\n form=\"str\",\n expected_type=str,\n acceptable_vals=[\"antenna\", \"baseline\", \"waterfall\"],\n )\n\n self._Ntimes = uvp.UVParameter(\n \"Ntimes\", description=\"Number of times\", expected_type=int\n )\n desc = \"Number of baselines. \" 'Only Required for \"baseline\" type objects.'\n self._Nbls = uvp.UVParameter(\n \"Nbls\", description=desc, expected_type=int, required=False\n )\n self._Nblts = uvp.UVParameter(\n \"Nblts\",\n description=\"Number of baseline-times \"\n \"(i.e. number of spectra). Not necessarily \"\n \"equal to Nbls * Ntimes\",\n expected_type=int,\n )\n self._Nspws = uvp.UVParameter(\n \"Nspws\",\n description=\"Number of spectral windows \"\n \"(ie non-contiguous spectral chunks). \"\n \"More than one spectral window is not \"\n \"currently supported.\",\n expected_type=int,\n required=False,\n )\n self._Nfreqs = uvp.UVParameter(\n \"Nfreqs\", description=\"Number of frequency channels\", expected_type=int\n )\n self._Npols = uvp.UVParameter(\n \"Npols\", description=\"Number of polarizations\", expected_type=int\n )\n\n desc = (\n \"Floating point metric information, only availble in metric mode. \"\n \"shape (Nblts, Nspws, Nfreq, Npols).\"\n )\n self._metric_array = uvp.UVParameter(\n \"metric_array\",\n description=desc,\n form=(\"Nblts\", \"Nspws\", \"Nfreqs\", \"Npols\"),\n expected_type=np.float,\n required=False,\n )\n\n desc = (\n \"Boolean flag, True is flagged, only availble in flag mode. \"\n \"shape (Nblts, Nspws, Nfreq, Npols).\"\n )\n self._flag_array = uvp.UVParameter(\n \"flag_array\",\n description=desc,\n form=(\"Nblts\", \"Nspws\", \"Nfreqs\", \"Npols\"),\n expected_type=np.bool,\n required=False,\n )\n\n desc = \"Floating point weight information, shape (Nblts, Nspws, Nfreq, Npols).\"\n self._weights_array = uvp.UVParameter(\n \"weights_array\",\n description=desc,\n form=(\"Nblts\", \"Nspws\", \"Nfreqs\", \"Npols\"),\n expected_type=np.float,\n )\n\n desc = (\n \"Floating point weight information about sum of squares of weights\"\n \" when weighted data converted from baseline to waterfall mode.\"\n )\n self._weights_square_array = uvp.UVParameter(\n \"weights_square_array\",\n description=desc,\n form=(\"Nblts\", \"Nspws\", \"Nfreqs\", \"Npols\"),\n expected_type=np.float,\n required=False,\n )\n\n desc = (\n \"Array of times, center of integration, shape (Nblts), \" \"units Julian Date\"\n )\n self._time_array = uvp.UVParameter(\n \"time_array\",\n description=desc,\n form=(\"Nblts\",),\n expected_type=np.float,\n tols=1e-3 / (60.0 * 60.0 * 24.0),\n ) # 1 ms in days\n\n desc = \"Array of lsts, center of integration, shape (Nblts), \" \"units radians\"\n self._lst_array = uvp.UVParameter(\n \"lst_array\",\n description=desc,\n form=(\"Nblts\",),\n expected_type=np.float,\n tols=radian_tol,\n )\n\n desc = (\n \"Array of first antenna indices, shape (Nblts). \"\n 'Only available for \"baseline\" type objects. '\n \"type = int, 0 indexed\"\n )\n self._ant_1_array = uvp.UVParameter(\n \"ant_1_array\", description=desc, expected_type=int, form=(\"Nblts\",)\n )\n desc = (\n \"Array of second antenna indices, shape (Nblts). \"\n 'Only available for \"baseline\" type objects. '\n \"type = int, 0 indexed\"\n )\n self._ant_2_array = uvp.UVParameter(\n \"ant_2_array\", description=desc, expected_type=int, form=(\"Nblts\",)\n )\n\n desc = (\n \"Array of antenna numbers, shape (Nants_data), \"\n 'Only available for \"antenna\" type objects. '\n \"type = int, 0 indexed\"\n )\n self._ant_array = uvp.UVParameter(\n \"ant_array\", description=desc, expected_type=int, form=(\"Nants_data\",)\n )\n\n desc = (\n \"Array of baseline indices, shape (Nblts). \"\n 'Only available for \"baseline\" type objects. '\n \"type = int; baseline = 2048 * (ant1+1) + (ant2+1) + 2^16\"\n )\n self._baseline_array = uvp.UVParameter(\n \"baseline_array\", description=desc, expected_type=int, form=(\"Nblts\",)\n )\n\n desc = (\n \"Array of frequencies, center of the channel, \"\n \"shape (Nspws, Nfreqs), units Hz\"\n )\n self._freq_array = uvp.UVParameter(\n \"freq_array\",\n description=desc,\n form=(\"Nspws\", \"Nfreqs\"),\n expected_type=np.float,\n tols=1e-3,\n ) # mHz\n\n desc = (\n \"Array of polarization integers, shape (Npols). \"\n \"AIPS Memo 117 says: pseudo-stokes 1:4 (pI, pQ, pU, pV); \"\n \"circular -1:-4 (RR, LL, RL, LR); linear -5:-8 (XX, YY, XY, YX). \"\n \"NOTE: AIPS Memo 117 actually calls the pseudo-Stokes polarizations \"\n '\"Stokes\", but this is inaccurate as visibilities cannot be in '\n \"true Stokes polarizations for physical antennas. We adopt the \"\n \"term pseudo-Stokes to refer to linear combinations of instrumental \"\n \"visibility polarizations (e.g. pI = xx + yy).\"\n )\n self._polarization_array = uvp.UVParameter(\n \"polarization_array\",\n description=desc,\n expected_type=int,\n acceptable_vals=list(np.arange(-8, 0)) + list(np.arange(1, 5)),\n form=(\"Npols\",),\n )\n\n self._history = uvp.UVParameter(\n \"history\",\n description=\"String of history, units English\",\n form=\"str\",\n expected_type=str,\n )\n\n # ---antenna information ---\n desc = (\n \"Number of antennas in the array. \"\n 'Only available for \"baseline\" type objects. '\n \"May be larger than the number of antennas with data.\"\n )\n self._Nants_telescope = uvp.UVParameter(\n \"Nants_telescope\", description=desc, expected_type=int, required=False\n )\n desc = (\n \"Number of antennas with data present. \"\n 'Only available for \"baseline\" or \"antenna\" type objects.'\n \"May be smaller than the number of antennas in the array\"\n )\n self._Nants_data = uvp.UVParameter(\n \"Nants_data\", description=desc, expected_type=int, required=True\n )\n # --extra information ---\n desc = (\n \"Orientation of the physical dipole corresponding to what is \"\n 'labelled as the x polarization. Options are \"east\" '\n '(indicating east/west orientation) and \"north\" (indicating '\n \"north/south orientation)\"\n )\n self._x_orientation = uvp.UVParameter(\n \"x_orientation\",\n description=desc,\n required=False,\n expected_type=str,\n acceptable_vals=[\"east\", \"north\"],\n )\n\n # initialize the underlying UVBase properties\n super(UVFlag, self).__init__()\n\n self.history = \"\" # Added to at the end\n\n self.label = \"\" # Added to at the end\n if isinstance(indata, (list, tuple)):\n self.__init__(\n indata[0],\n mode=mode,\n copy_flags=copy_flags,\n waterfall=waterfall,\n history=history,\n label=label,\n run_check=run_check,\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n )\n if len(indata) > 1:\n for i in indata[1:]:\n fobj = UVFlag(\n i,\n mode=mode,\n copy_flags=copy_flags,\n waterfall=waterfall,\n history=history,\n run_check=run_check,\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n )\n self.__add__(\n fobj,\n run_check=run_check,\n inplace=True,\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n )\n del fobj\n\n elif issubclass(indata.__class__, (str, pathlib.Path)):\n # Given a path, read indata\n self.read(\n indata,\n history,\n run_check=run_check,\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n )\n elif issubclass(indata.__class__, UVData):\n self.from_uvdata(\n indata,\n mode=mode,\n copy_flags=copy_flags,\n waterfall=waterfall,\n history=history,\n label=label,\n run_check=run_check,\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n )\n\n elif issubclass(indata.__class__, UVCal):\n self.from_uvcal(\n indata,\n mode=mode,\n copy_flags=copy_flags,\n waterfall=waterfall,\n history=history,\n label=label,\n run_check=run_check,\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n )\n\n elif indata is not None:\n raise ValueError(\n \"input to UVFlag.__init__ must be one of: \"\n \"list, tuple, string, pathlib.Path, UVData, or UVCal.\"\n )\n\n @property\n def _data_params(self):\n \"\"\"List of strings giving the data-like parameters.\"\"\"\n if not hasattr(self, \"mode\") or self.mode is None:\n return None\n elif self.mode == \"flag\":\n return [\"flag_array\"]\n elif self.mode == \"metric\":\n if self.weights_square_array is None:\n return [\"metric_array\", \"weights_array\"]\n else:\n return [\"metric_array\", \"weights_array\", \"weights_square_array\"]\n else:\n raise ValueError(\n \"Invalid mode. Mode must be one of \"\n + \", \".join([\"{}\"] * len(self._mode.acceptable_vals)).format(\n *self._mode.acceptable_vals\n )\n )\n\n @property\n def data_like_parameters(self):\n \"\"\"Return iterator of defined parameters which are data-like.\"\"\"\n for key in self._data_params:\n if hasattr(self, key):\n yield getattr(self, key)\n\n @property\n def pol_collapsed(self):\n \"\"\"Determine if this object has had pols collapsed.\"\"\"\n if not hasattr(self, \"polarization_array\") or self.polarization_array is None:\n return False\n elif isinstance(self.polarization_array.item(0), str):\n return True\n else:\n return False\n\n def _check_pol_state(self):\n if self.pol_collapsed:\n # collapsed pol objects have a different type for\n # the polarization array.\n self._polarization_array.expected_type = str\n self._polarization_array.acceptable_vals = None\n else:\n self._polarization_array.expected_type = int\n self._polarization_array.acceptable_vals = list(np.arange(-8, 0)) + list(\n np.arange(1, 5)\n )\n\n def _set_mode_flag(self):\n \"\"\"Set the mode and required parameters consistent with a flag object.\"\"\"\n self.mode = \"flag\"\n self._flag_array.required = True\n self._metric_array.required = False\n self._weights_array.required = False\n if self.weights_square_array is not None:\n self.weights_square_array = None\n\n return\n\n def _set_mode_metric(self):\n \"\"\"Set the mode and required parameters consistent with a metric object.\"\"\"\n self.mode = \"metric\"\n self._flag_array.required = False\n self._metric_array.required = True\n self._weights_array.required = True\n\n if self.weights_array is None and self.metric_array is not None:\n self.weights_array = np.ones_like(self.metric_array, dtype=float)\n\n return\n\n def _set_type_antenna(self):\n \"\"\"Set the type and required propertis consistent with an antenna object.\"\"\"\n self.type = \"antenna\"\n self._ant_array.required = True\n self._baseline_array.required = False\n self._ant_1_array.required = False\n self._ant_2_array.required = False\n self._Nants_telescope.required = False\n self._Nants_data.required = True\n self._Nbls.required = False\n self._Nspws.required = True\n self._Nblts.required = False\n\n desc = (\n \"Floating point metric information, \"\n \"has shape (Nants_data, Nspws, Nfreqs, Ntimes, Npols).\"\n )\n self._metric_array.desc = desc\n self._metric_array.form = (\"Nants_data\", \"Nspws\", \"Nfreqs\", \"Ntimes\", \"Npols\")\n\n desc = (\n \"Boolean flag, True is flagged, \"\n \"has shape (Nants_data, Nspws, Nfreqs, Ntimes, Npols).\"\n )\n self._flag_array.desc = desc\n self._flag_array.form = (\"Nants_data\", \"Nspws\", \"Nfreqs\", \"Ntimes\", \"Npols\")\n\n desc = (\n \"Floating point weight information, \"\n \"has shape (Nants_data, Nspws, Nfreqs, Ntimes, Npols).\"\n )\n self._weights_array.desc = desc\n self._weights_array.form = (\"Nants_data\", \"Nspws\", \"Nfreqs\", \"Ntimes\", \"Npols\")\n\n desc = (\n \"Array of unique times, center of integration, shape (Ntimes), \"\n \"units Julian Date\"\n )\n self._time_array.form = (\"Ntimes\",)\n\n desc = (\n \"Array of unique lsts, center of integration, shape (Ntimes), \"\n \"units radians\"\n )\n self._lst_array.form = (\"Ntimes\",)\n\n desc = (\n \"Array of frequencies, center of the channel, \"\n \"shape (Nspws, Nfreqs), units Hz\"\n )\n self._freq_array.form = (\"Nspws\", \"Nfreqs\")\n\n def _set_type_baseline(self):\n \"\"\"Set the type and required propertis consistent with a baseline object.\"\"\"\n self.type = \"baseline\"\n self._ant_array.required = False\n self._baseline_array.required = True\n self._ant_1_array.required = True\n self._ant_2_array.required = True\n self._Nants_telescope.required = True\n self._Nants_data.required = True\n self._Nbls.required = True\n self._Nblts.required = True\n self._Nspws.required = True\n\n if self.time_array is not None:\n self.Nblts = len(self.time_array)\n\n desc = \"Floating point metric information, shape (Nblts, Nspws, Nfreqs, Npols).\"\n self._metric_array.desc = desc\n self._metric_array.form = (\"Nblts\", \"Nspws\", \"Nfreqs\", \"Npols\")\n\n desc = \"Boolean flag, True is flagged, shape (Nblts, Nfreqs, Npols)\"\n self._flag_array.desc = desc\n self._flag_array.form = (\"Nblts\", \"Nspws\", \"Nfreqs\", \"Npols\")\n\n desc = \"Floating point weight information, has shape (Nblts, Nfreqs, Npols).\"\n self._weights_array.desc = desc\n self._weights_array.form = (\"Nblts\", \"Nspws\", \"Nfreqs\", \"Npols\")\n\n desc = (\n \"Array of unique times, center of integration, shape (Ntimes), \"\n \"units Julian Date\"\n )\n self._time_array.form = (\"Nblts\",)\n\n desc = (\n \"Array of unique lsts, center of integration, shape (Ntimes), \"\n \"units radians\"\n )\n self._lst_array.form = (\"Nblts\",)\n\n desc = (\n \"Array of frequencies, center of the channel, \"\n \"shape (Nspws, Nfreqs), units Hz\"\n )\n self._freq_array.form = (\"Nspws\", \"Nfreqs\")\n\n def _set_type_waterfall(self):\n \"\"\"Set the type and required propertis consistent with a waterfall object.\"\"\"\n self.type = \"waterfall\"\n self._ant_array.required = False\n self._baseline_array.required = False\n self._ant_1_array.required = False\n self._ant_2_array.required = False\n self._Nants_telescope.required = False\n self._Nants_data.required = False\n self._Nbls.required = False\n self._Nspws.required = False\n self._Nblts.required = False\n\n desc = \"Floating point metric information, shape (Ntimes, Nfreqs, Npols).\"\n self._metric_array.desc = desc\n self._metric_array.form = (\"Ntimes\", \"Nfreqs\", \"Npols\")\n\n desc = \"Boolean flag, True is flagged, shape (Ntimes, Nfreqs, Npols)\"\n self._flag_array.desc = desc\n self._flag_array.form = (\"Ntimes\", \"Nfreqs\", \"Npols\")\n\n desc = \"Floating point weight information, has shape (Ntimes, Nfreqs, Npols).\"\n self._weights_array.desc = desc\n self._weights_array.form = (\"Ntimes\", \"Nfreqs\", \"Npols\")\n\n desc = (\n \"Floating point weight information about sum of squares of weights\"\n \" when weighted data converted from baseline to waterfall mode.\"\n \" Has shape (Ntimes, Nfreqs, Npols).\"\n )\n self._weights_square_array.desc = desc\n self._weights_square_array.form = (\"Ntimes\", \"Nfreqs\", \"Npols\")\n\n desc = (\n \"Array of unique times, center of integration, shape (Ntimes), \"\n \"units Julian Date\"\n )\n self._time_array.form = (\"Ntimes\",)\n\n desc = (\n \"Array of unique lsts, center of integration, shape (Ntimes), \"\n \"units radians\"\n )\n self._lst_array.form = (\"Ntimes\",)\n\n desc = (\n \"Array of frequencies, center of the channel, \" \"shape (Nfreqs), units Hz\"\n )\n self._freq_array.form = (\"Nfreqs\",)\n\n def clear_unused_attributes(self):\n \"\"\"Remove unused attributes.\n\n Useful when changing type or mode or to save memory.\n Will set all non-required attributes to None, except x_orientation and\n weights_square_array.\n\n \"\"\"\n for p in self:\n attr = getattr(self, p)\n if (\n not attr.required\n and attr.value is not None\n and attr.name != \"x_orientation\"\n and attr.name != \"weights_square_array\"\n ):\n attr.value = None\n setattr(self, p, attr)\n\n def __eq__(self, other, check_history=True, check_extra=True):\n \"\"\"Check Equality of two UVFlag objects.\n\n Parameters\n ----------\n other: UVFlag\n object to check against\n check_history : bool\n Include the history keyword when comparing UVFlag objects.\n check_extra : bool\n Include non-required parameters when comparing UVFlag objects.\n\n \"\"\"\n if check_history:\n return super(UVFlag, self).__eq__(other, check_extra=check_extra)\n\n else:\n # initial check that the classes are the same\n # then strip the histories\n if isinstance(other, self.__class__):\n _h1 = self.history\n self.history = None\n\n _h2 = other.history\n other.history = None\n\n truth = super(UVFlag, self).__eq__(other, check_extra=check_extra)\n\n self.history = _h1\n other.history = _h2\n\n return truth\n else:\n print(\"Classes do not match\")\n return False\n\n def __ne__(self, other, check_history=True, check_extra=True):\n \"\"\"Not Equal.\"\"\"\n return not self.__eq__(\n other, check_history=check_history, check_extra=check_extra\n )\n\n def antpair2ind(self, ant1, ant2):\n \"\"\"Get blt indices for given (ordered) antenna pair.\n\n Parameters\n ----------\n ant1 : int or array_like of int\n Number of the first antenna\n ant2 : int or array_like of int\n Number of the second antenna\n\n Returns\n -------\n int or array_like of int\n baseline number(s) corresponding to the input antenna number\n\n \"\"\"\n if self.type != \"baseline\":\n raise ValueError(\n \"UVFlag object of type \" + self.type + \" does not \"\n \"contain antenna pairs to index.\"\n )\n return np.where((self.ant_1_array == ant1) & (self.ant_2_array == ant2))[0]\n\n def baseline_to_antnums(self, baseline):\n \"\"\"Get the antenna numbers corresponding to a given baseline number.\n\n Parameters\n ----------\n baseline : int\n baseline number\n\n Returns\n -------\n tuple\n Antenna numbers corresponding to baseline.\n\n \"\"\"\n assert self.type == \"baseline\", 'Must be \"baseline\" type UVFlag object.'\n return uvutils.baseline_to_antnums(baseline, self.Nants_telescope)\n\n def get_baseline_nums(self):\n \"\"\"Return numpy array of unique baseline numbers in data.\"\"\"\n assert self.type == \"baseline\", 'Must be \"baseline\" type UVFlag object.'\n return np.unique(self.baseline_array)\n\n def get_antpairs(self):\n \"\"\"Return list of unique antpair tuples (ant1, ant2) in data.\"\"\"\n assert self.type == \"baseline\", 'Must be \"baseline\" type UVFlag object.'\n return [self.baseline_to_antnums(bl) for bl in self.get_baseline_nums()]\n\n def get_ants(self):\n \"\"\"\n Get the unique antennas that have data associated with them.\n\n Returns\n -------\n ndarray of int\n Array of unique antennas with data associated with them.\n \"\"\"\n if self.type == \"baseline\":\n return np.unique(np.append(self.ant_1_array, self.ant_2_array))\n elif self.type == \"antenna\":\n return np.unique(self.ant_array)\n elif self.type == \"waterfall\":\n raise ValueError(\"A waterfall type UVFlag object has no sense of antennas.\")\n\n def get_pols(self):\n \"\"\"\n Get the polarizations in the data.\n\n Returns\n -------\n list of str\n list of polarizations (as strings) in the data.\n \"\"\"\n return uvutils.polnum2str(\n self.polarization_array, x_orientation=self.x_orientation\n )\n\n def parse_ants(self, ant_str, print_toggle=False):\n \"\"\"\n Get antpair and polarization from parsing an aipy-style ant string.\n\n Used to support the the select function.\n This function is only useable when the UVFlag type is 'baseline'.\n Generates two lists of antenna pair tuples and polarization indices based\n on parsing of the string ant_str. If no valid polarizations (pseudo-Stokes\n params, or combinations of [lr] or [xy]) or antenna numbers are found in\n ant_str, ant_pairs_nums and polarizations are returned as None.\n\n Parameters\n ----------\n ant_str : str\n String containing antenna information to parse. Can be 'all',\n 'auto', 'cross', or combinations of antenna numbers and polarization\n indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used\n in front of an antenna number or baseline to exclude it from being\n output in ant_pairs_nums. If ant_str has a minus sign as the first\n character, 'all,' will be appended to the beginning of the string.\n See the tutorial for examples of valid strings and their behavior.\n print_toggle : bool\n Boolean for printing parsed baselines for a visual user check.\n\n Returns\n -------\n ant_pairs_nums : list of tuples of int or None\n List of tuples containing the parsed pairs of antenna numbers, or\n None if ant_str is 'all' or a pseudo-Stokes polarizations.\n polarizations : list of int or None\n List of desired polarizations or None if ant_str does not contain a\n polarization specification.\n\n \"\"\"\n if self.type != \"baseline\":\n raise ValueError(\n \"UVFlag objects can only call 'parse_ants' function \"\n \"if type is 'baseline'.\"\n )\n return uvutils.parse_ants(\n self,\n ant_str=ant_str,\n print_toggle=print_toggle,\n x_orientation=self.x_orientation,\n )\n\n def collapse_pol(\n self,\n method=\"quadmean\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Collapse the polarization axis using a given method.\n\n If the original UVFlag object has more than one polarization,\n the resulting polarization_array will be a single element array with a\n comma separated string encoding the original polarizations.\n\n Parameters\n ----------\n method : str, {\"quadmean\", \"absmean\", \"mean\", \"or\", \"and\"}\n How to collapse the dimension(s).\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after collapsing polarizations.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n collapsing polarizations.\n\n \"\"\"\n method = method.lower()\n if self.mode == \"flag\":\n darr = self.flag_array\n else:\n darr = self.metric_array\n if len(self.polarization_array) > 1:\n if self.mode == \"metric\":\n _weights = self.weights_array\n else:\n _weights = np.ones_like(darr)\n # Collapse pol dimension. But note we retain a polarization axis.\n d, w = uvutils.collapse(\n darr, method, axis=-1, weights=_weights, return_weights=True\n )\n darr = np.expand_dims(d, axis=d.ndim)\n\n if self.mode == \"metric\":\n self.weights_array = np.expand_dims(w, axis=w.ndim)\n\n self.polarization_array = np.array(\n [\",\".join(map(str, self.polarization_array))], dtype=np.str_\n )\n\n self.Npols = len(self.polarization_array)\n self._check_pol_state()\n else:\n warnings.warn(\n \"Cannot collapse polarization axis when only one pol present.\"\n )\n return\n if ((method == \"or\") or (method == \"and\")) and (self.mode == \"flag\"):\n self.flag_array = darr\n else:\n self.metric_array = darr\n self._set_mode_metric()\n self.clear_unused_attributes()\n self.history += \"Pol axis collapse. \"\n\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n def to_waterfall(\n self,\n method=\"quadmean\",\n keep_pol=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n return_weights_square=False,\n ):\n \"\"\"Convert an 'antenna' or 'baseline' type object to waterfall.\n\n Parameters\n ----------\n method : str, {\"quadmean\", \"absmean\", \"mean\", \"or\", \"and\"}\n How to collapse the dimension(s).\n keep_pol : bool\n Whether to also collapse the polarization dimension\n If keep_pol is False, and the original UVFlag object has more\n than one polarization, the resulting polarization_array\n will be a single element array with a comma separated string\n encoding the original polarizations.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after converting to waterfall type.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n converting to waterfall type.\n return_weights_square: bool\n Option to compute the sum of the squares of the weights when\n collapsing baseline object to waterfall. Not used if type is not\n baseline to begin with. Fills an optional parameter if so.\n\n \"\"\"\n method = method.lower()\n if self.type == \"waterfall\" and (\n keep_pol or (len(self.polarization_array) == 1)\n ):\n warnings.warn(\"This object is already a waterfall. Nothing to change.\")\n return\n if (not keep_pol) and (len(self.polarization_array) > 1):\n self.collapse_pol(method)\n\n if self.mode == \"flag\":\n darr = self.flag_array\n else:\n darr = self.metric_array\n\n if self.type == \"antenna\":\n d, w = uvutils.collapse(\n darr,\n method,\n axis=(0, 1),\n weights=self.weights_array,\n return_weights=True,\n )\n darr = np.swapaxes(d, 0, 1)\n if self.mode == \"metric\":\n self.weights_array = np.swapaxes(w, 0, 1)\n elif self.type == \"baseline\":\n Nt = len(np.unique(self.time_array))\n Nf = len(self.freq_array[0, :])\n Np = len(self.polarization_array)\n d = np.zeros((Nt, Nf, Np))\n w = np.zeros((Nt, Nf, Np))\n if return_weights_square:\n ws = np.zeros((Nt, Nf, Np))\n for i, t in enumerate(np.unique(self.time_array)):\n ind = self.time_array == t\n if self.mode == \"metric\":\n _weights = self.weights_array[ind, :, :]\n else:\n _weights = np.ones_like(darr[ind, :, :], dtype=float)\n if return_weights_square:\n d[i, :, :], w[i, :, :], ws[i, :, :] = uvutils.collapse(\n darr[ind, :, :],\n method,\n axis=0,\n weights=_weights,\n return_weights=True,\n return_weights_square=return_weights_square,\n )\n else:\n d[i, :, :], w[i, :, :] = uvutils.collapse(\n darr[ind, :, :],\n method,\n axis=0,\n weights=_weights,\n return_weights=True,\n return_weights_square=return_weights_square,\n )\n darr = d\n if self.mode == \"metric\":\n self.weights_array = w\n if return_weights_square:\n self.weights_square_array = ws\n self.time_array, ri = np.unique(self.time_array, return_index=True)\n self.lst_array = self.lst_array[ri]\n if ((method == \"or\") or (method == \"and\")) and (self.mode == \"flag\"):\n # If using a boolean operation (AND/OR) and in flag mode, stay in flag\n # flags should be bool, but somehow it is cast as float64\n # is reacasting to bool like this best?\n self.flag_array = darr.astype(bool)\n else:\n # Otherwise change to (or stay in) metric\n self.metric_array = darr\n self._set_mode_metric()\n self.freq_array = self.freq_array.flatten()\n self.Nspws = None\n self._set_type_waterfall()\n self.history += 'Collapsed to type \"waterfall\". ' # + self.pyuvdata_version_str\n\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n self.clear_unused_attributes()\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n def to_baseline(\n self,\n uv,\n force_pol=False,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Convert a UVFlag object of type \"waterfall\" to type \"baseline\".\n\n Broadcasts the flag array to all baselines.\n This function does NOT apply flags to uv.\n\n Parameters\n ----------\n uv : UVData or UVFlag object\n Objcet with type baseline to match.\n force_pol : bool\n If True, will use 1 pol to broadcast to any other pol.\n Otherwise, will require polarizations match.\n For example, this keyword is useful if one flags on all\n pols combined, and wants to broadcast back to individual pols.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after converting to baseline type.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n converting to baseline type.\n\n \"\"\"\n if self.type == \"baseline\":\n return\n if not (\n issubclass(uv.__class__, UVData)\n or (isinstance(uv, UVFlag) and uv.type == \"baseline\")\n ):\n raise ValueError(\n \"Must pass in UVData object or UVFlag object of type \"\n '\"baseline\" to match.'\n )\n\n # Deal with polarization\n if force_pol and self.polarization_array.size == 1:\n # Use single pol for all pols, regardless\n self.polarization_array = uv.polarization_array\n # Broadcast arrays\n if self.mode == \"flag\":\n self.flag_array = self.flag_array.repeat(\n self.polarization_array.size, axis=-1\n )\n else:\n self.metric_array = self.metric_array.repeat(\n self.polarization_array.size, axis=-1\n )\n self.weights_array = self.weights_array.repeat(\n self.polarization_array.size, axis=-1\n )\n self.Npols = len(self.polarization_array)\n self._check_pol_state()\n\n # Now the pol axes should match regardless of force_pol.\n if not np.array_equal(uv.polarization_array, self.polarization_array):\n if self.polarization_array.size == 1:\n raise ValueError(\n \"Polarizations do not match. Try keyword force_pol\"\n + \" if you wish to broadcast to all polarizations.\"\n )\n else:\n raise ValueError(\"Polarizations could not be made to match.\")\n if self.type == \"waterfall\":\n # Populate arrays\n if self.mode == \"flag\":\n arr = np.zeros_like(uv.flag_array)\n sarr = self.flag_array\n elif self.mode == \"metric\":\n arr = np.zeros_like(uv.flag_array, dtype=float)\n warr = np.zeros_like(uv.flag_array, dtype=np.float)\n sarr = self.metric_array\n for i, t in enumerate(np.unique(self.time_array)):\n ti = np.where(uv.time_array == t)\n arr[ti, :, :, :] = sarr[i, :, :][np.newaxis, np.newaxis, :, :]\n if self.mode == \"metric\":\n warr[ti, :, :, :] = self.weights_array[i, :, :][\n np.newaxis, np.newaxis, :, :\n ]\n if self.mode == \"flag\":\n self.flag_array = arr\n elif self.mode == \"metric\":\n self.metric_array = arr\n self.weights_array = warr\n\n elif self.type == \"antenna\":\n if self.mode == \"metric\":\n raise NotImplementedError(\n \"Cannot currently convert from \"\n \"antenna type, metric mode to \"\n \"baseline type UVFlag object.\"\n )\n else:\n ants_data = np.unique(uv.ant_1_array.tolist() + uv.ant_2_array.tolist())\n new_ants = np.setdiff1d(ants_data, self.ant_array)\n if new_ants.size > 0:\n self.ant_array = np.append(self.ant_array, new_ants).tolist()\n # make new flags of the same shape but with first axis the\n # size of the new ants\n flag_shape = list(self.flag_array.shape)\n flag_shape[0] = new_ants.size\n new_flags = np.full(flag_shape, True, dtype=bool)\n self.flag_array = np.append(self.flag_array, new_flags, axis=0)\n\n baseline_flags = np.full(\n (uv.Nblts, self.Nspws, self.Nfreqs, self.Npols), True, dtype=bool\n )\n\n for t_index, bl in enumerate(uv.baseline_array):\n uvf_t_index = np.nonzero(uv.time_array[t_index] == self.time_array)[\n 0\n ]\n if uvf_t_index.size > 0:\n # if the time is found in the array\n # input the or'ed data from each antenna\n ant1, ant2 = uv.baseline_to_antnums(bl)\n ant1_index = np.nonzero(np.array(self.ant_array) == ant1)\n ant2_index = np.nonzero(np.array(self.ant_array) == ant2)\n or_flag = np.logical_or(\n self.flag_array[ant1_index, :, :, uvf_t_index, :],\n self.flag_array[ant2_index, :, :, uvf_t_index, :],\n )\n\n baseline_flags[t_index, :, :, :] = or_flag.copy()\n\n self.flag_array = baseline_flags\n\n # Check the frequency array for Nspws, otherwise broadcast to 1,Nfreqs\n self.freq_array = np.atleast_2d(self.freq_array)\n self.Nspws = self.freq_array.shape[0]\n\n self.baseline_array = uv.baseline_array\n self.Nbls = np.unique(self.baseline_array).size\n self.ant_1_array = uv.ant_1_array\n self.ant_2_array = uv.ant_2_array\n self.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)\n\n self.time_array = uv.time_array\n self.lst_array = uv.lst_array\n self.Nblts = self.time_array.size\n\n self.Nants_telescope = int(uv.Nants_telescope)\n self._set_type_baseline()\n self.clear_unused_attributes()\n self.history += 'Broadcast to type \"baseline\". '\n\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n def to_antenna(\n self,\n uv,\n force_pol=False,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Convert a UVFlag object of type \"waterfall\" to type \"antenna\".\n\n Broadcasts the flag array to all antennas.\n This function does NOT apply flags to uv.\n\n Parameters\n ----------\n uv : UVCal or UVFlag object\n object of type antenna to match.\n force_pol : bool\n If True, will use 1 pol to broadcast to any other pol.\n Otherwise, will require polarizations match.\n For example, this keyword is useful if one flags on all\n pols combined, and wants to broadcast back to individual pols.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after converting to antenna type.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n converting to antenna type.\n\n \"\"\"\n if self.type == \"antenna\":\n return\n if not (\n issubclass(uv.__class__, UVCal)\n or (isinstance(uv, UVFlag) and uv.type == \"antenna\")\n ):\n raise ValueError(\n \"Must pass in UVCal object or UVFlag object of type \"\n '\"antenna\" to match.'\n )\n if self.type != \"waterfall\":\n raise ValueError(\n 'Cannot convert from type \"' + self.type + '\" to \"antenna\".'\n )\n # Deal with polarization\n if issubclass(uv.__class__, UVCal):\n polarr = uv.jones_array\n else:\n polarr = uv.polarization_array\n if force_pol and self.polarization_array.size == 1:\n # Use single pol for all pols, regardless\n self.polarization_array = polarr\n # Broadcast arrays\n if self.mode == \"flag\":\n self.flag_array = self.flag_array.repeat(\n self.polarization_array.size, axis=-1\n )\n else:\n self.metric_array = self.metric_array.repeat(\n self.polarization_array.size, axis=-1\n )\n self.weights_array = self.weights_array.repeat(\n self.polarization_array.size, axis=-1\n )\n self.Npols = len(self.polarization_array)\n self._check_pol_state()\n\n # Now the pol axes should match regardless of force_pol.\n if not np.array_equal(polarr, self.polarization_array):\n if self.polarization_array.size == 1:\n raise ValueError(\n \"Polarizations do not match. Try keyword force_pol\"\n + \"if you wish to broadcast to all polarizations.\"\n )\n else:\n raise ValueError(\"Polarizations could not be made to match.\")\n # Populate arrays\n if self.mode == \"flag\":\n self.flag_array = np.swapaxes(self.flag_array, 0, 1)[\n np.newaxis, np.newaxis, :, :, :\n ]\n self.flag_array = self.flag_array.repeat(len(uv.ant_array), axis=0)\n elif self.mode == \"metric\":\n self.metric_array = np.swapaxes(self.metric_array, 0, 1)[\n np.newaxis, np.newaxis, :, :, :\n ]\n self.metric_array = self.metric_array.repeat(len(uv.ant_array), axis=0)\n self.weights_array = np.swapaxes(self.weights_array, 0, 1)[\n np.newaxis, np.newaxis, :, :, :\n ]\n self.weights_array = self.weights_array.repeat(len(uv.ant_array), axis=0)\n self.ant_array = uv.ant_array\n self.Nants_data = len(uv.ant_array)\n # Check the frequency array for Nspws, otherwise broadcast to 1,Nfreqs\n self.freq_array = np.atleast_2d(self.freq_array)\n self.Nspws = self.freq_array.shape[0]\n\n self._set_type_antenna()\n self.history += 'Broadcast to type \"antenna\". '\n\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n def to_flag(\n self,\n threshold=np.inf,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Convert to flag mode.\n\n This function is NOT SMART. Removes metric_array and creates a\n flag_array from a simple threshold on the metric values.\n\n Parameters\n ----------\n threshold : float\n Metric value over which the corresponding flag is\n set to True. Default is np.inf, which results in flags of all False.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after converting to flag mode.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n converting to flag mode.\n\n \"\"\"\n if self.mode == \"flag\":\n return\n elif self.mode == \"metric\":\n self.flag_array = np.where(self.metric_array >= threshold, True, False)\n self._set_mode_flag()\n else:\n raise ValueError(\n \"Unknown UVFlag mode: \" + self.mode + \". Cannot convert to flag.\"\n )\n self.history += 'Converted to mode \"flag\". '\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n self.clear_unused_attributes()\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n def to_metric(\n self,\n convert_wgts=False,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Convert to metric mode.\n\n This function is NOT SMART. Simply recasts flag_array as float\n and uses this as the metric array.\n\n Parameters\n ----------\n convert_wgts : bool\n if True convert self.weights_array to ones\n unless a column or row is completely flagged, in which case\n convert those pixels to zero. This is used when reinterpretting\n flags as metrics to calculate flag fraction. Zero weighting\n completely flagged rows/columns prevents those from counting\n against a threshold along the other dimension.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after converting to metric mode.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n converting to metric mode.\n\n \"\"\"\n if self.mode == \"metric\":\n return\n elif self.mode == \"flag\":\n self.metric_array = self.flag_array.astype(np.float)\n self._set_mode_metric()\n\n if convert_wgts:\n self.weights_array = np.ones_like(self.weights_array)\n if self.type == \"waterfall\":\n for i, pol in enumerate(self.polarization_array):\n self.weights_array[:, :, i] *= ~and_rows_cols(\n self.flag_array[:, :, i]\n )\n elif self.type == \"baseline\":\n for i, pol in enumerate(self.polarization_array):\n for j, ap in enumerate(self.get_antpairs()):\n inds = self.antpair2ind(*ap)\n self.weights_array[inds, 0, :, i] *= ~and_rows_cols(\n self.flag_array[inds, 0, :, i]\n )\n elif self.type == \"antenna\":\n for i, pol in enumerate(self.polarization_array):\n for j in range(self.weights_array.shape[0]):\n self.weights_array[j, 0, :, :, i] *= ~and_rows_cols(\n self.flag_array[j, 0, :, :, i]\n )\n else:\n raise ValueError(\n \"Unknown UVFlag mode: \" + self.mode + \". Cannot convert to metric.\"\n )\n self.history += 'Converted to mode \"metric\". '\n\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n self.clear_unused_attributes()\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n def __add__(\n self,\n other,\n inplace=False,\n axis=\"time\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Add two UVFlag objects together along a given axis.\n\n Parameters\n ----------\n other : UVFlag\n object to combine with self.\n axis : str\n Axis along which to combine UVFlag objects.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after combining two objects.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n combining two objects.\n inplace : bool\n Option to perform the select directly on self or return a new UVData\n object with just the selected data.\n\n Returns\n -------\n uvf : UVFlag\n If inplace==False, return new UVFlag object.\n\n \"\"\"\n # Handle in place\n if inplace:\n this = self\n else:\n this = self.copy()\n\n # Check that objects are compatible\n if not isinstance(other, this.__class__):\n raise ValueError(\"Only UVFlag objects can be added to a UVFlag object\")\n if this.type != other.type:\n raise ValueError(\n \"UVFlag object of type \" + other.type + \" cannot be \"\n \"added to object of type \" + this.type + \".\"\n )\n if this.mode != other.mode:\n raise ValueError(\n \"UVFlag object of mode \" + other.mode + \" cannot be \"\n \"added to object of mode \" + this.type + \".\"\n )\n\n # Simplify axis referencing\n axis = axis.lower()\n type_nums = {\"waterfall\": 0, \"baseline\": 1, \"antenna\": 2}\n axis_nums = {\n \"time\": [0, 0, 3],\n \"baseline\": [None, 0, None],\n \"antenna\": [None, None, 0],\n \"frequency\": [1, 2, 2],\n \"polarization\": [2, 3, 4],\n \"pol\": [2, 3, 4],\n \"jones\": [2, 3, 4],\n }\n ax = axis_nums[axis][type_nums[self.type]]\n if axis == \"time\":\n this.time_array = np.concatenate([this.time_array, other.time_array])\n this.lst_array = np.concatenate([this.lst_array, other.lst_array])\n if this.type == \"baseline\":\n this.baseline_array = np.concatenate(\n [this.baseline_array, other.baseline_array]\n )\n this.ant_1_array = np.concatenate([this.ant_1_array, other.ant_1_array])\n this.ant_2_array = np.concatenate([this.ant_2_array, other.ant_2_array])\n this.Nants_data = int(\n np.union1d(this.ant_1_array, this.ant_2_array).size\n )\n\n this.Ntimes = np.unique(this.time_array).size\n this.Nblts = len(this.time_array)\n\n elif axis == \"baseline\":\n if self.type != \"baseline\":\n raise ValueError(\n \"Flag object of type \" + self.type + \" cannot be \"\n \"concatenated along baseline axis.\"\n )\n this.time_array = np.concatenate([this.time_array, other.time_array])\n this.lst_array = np.concatenate([this.lst_array, other.lst_array])\n this.baseline_array = np.concatenate(\n [this.baseline_array, other.baseline_array]\n )\n this.ant_1_array = np.concatenate([this.ant_1_array, other.ant_1_array])\n this.ant_2_array = np.concatenate([this.ant_2_array, other.ant_2_array])\n this.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)\n\n this.Nbls = np.unique(this.baseline_array).size\n this.Nblts = len(this.baseline_array)\n\n elif axis == \"antenna\":\n if self.type != \"antenna\":\n raise ValueError(\n \"Flag object of type \" + self.type + \" cannot be \"\n \"concatenated along antenna axis.\"\n )\n this.ant_array = np.concatenate([this.ant_array, other.ant_array])\n this.Nants_data = len(this.ant_array)\n elif axis == \"frequency\":\n this.freq_array = np.concatenate(\n [this.freq_array, other.freq_array], axis=-1\n )\n this.Nfreqs = np.unique(this.freq_array.flatten()).size\n elif axis in [\"polarization\", \"pol\", \"jones\"]:\n if this.pol_collapsed:\n raise NotImplementedError(\n \"Two UVFlag objects with their \"\n \"polarizations collapsed cannot be \"\n \"added along the polarization axis \"\n \"at this time.\"\n )\n this.polarization_array = np.concatenate(\n [this.polarization_array, other.polarization_array]\n )\n this.Npols = len(this.polarization_array)\n\n for attr in this._data_params:\n # Check that 'other' also has the attribute filled\n if getattr(other, attr) is not None:\n setattr(\n this,\n attr,\n np.concatenate(\n [getattr(this, attr), getattr(other, attr)], axis=ax\n ),\n )\n # May 21, 2020 - should only happen for weights_square_array attr\n else:\n raise ValueError(\n f\"{attr} optional parameter is missing from second UVFlag\"\n f\" object. To concatenate two {this.mode} objects, they\"\n \" must both contain the same optional parameters set.\"\n )\n\n this.history += \"Data combined along \" + axis + \" axis. \"\n if not uvutils._check_history_version(this.history, this.pyuvdata_version_str):\n this.history += this.pyuvdata_version_str\n\n this.Ntimes = np.unique(this.time_array).size\n\n if run_check:\n this.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n if not inplace:\n return this\n\n def __iadd__(\n self,\n other,\n axis=\"time\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"In place add.\n\n Parameters\n ----------\n other : UVFlag\n object to combine with self.\n axis : str\n Axis along which to combine UVFlag objects.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after combining two objects.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n combining two objects.\n\n \"\"\"\n self.__add__(\n other,\n inplace=True,\n axis=axis,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n )\n return self\n\n def __or__(\n self,\n other,\n inplace=False,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Combine two UVFlag objects in \"flag\" mode by \"OR\"-ing their flags.\n\n Parameters\n ----------\n other : UVFlag\n object to combine with self.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after combining two objects.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n combining two objects.\n inplace : bool\n Option to perform the select directly on self or return a new UVData\n object with just the selected data.\n\n Returns\n -------\n uvf : UVFlag\n If inplace==False, return new UVFlag object.\n\n \"\"\"\n if (self.mode != \"flag\") or (other.mode != \"flag\"):\n raise ValueError(\n 'UVFlag object must be in \"flag\" mode to use \"or\" function.'\n )\n\n # Handle in place\n if inplace:\n this = self\n else:\n this = self.copy()\n this.flag_array += other.flag_array\n if other.history not in this.history:\n this.history += \"Flags OR'd with: \" + other.history\n\n if not uvutils._check_history_version(this.history, this.pyuvdata_version_str):\n this.history += this.pyuvdata_version_str\n\n if run_check:\n this.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n if not inplace:\n return this\n\n def __ior__(\n self, other, run_check=True, check_extra=True, run_check_acceptability=True\n ):\n \"\"\"Perform an inplace logical or.\n\n Parameters\n ----------\n other : UVFlag\n object to combine with self.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after combining two objects.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n combining two objects.\n\n \"\"\"\n self.__or__(\n other,\n inplace=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n )\n return self\n\n def combine_metrics(\n self,\n others,\n method=\"quadmean\",\n inplace=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Combine metric arrays between different UVFlag objects together.\n\n Parameters\n ----------\n others : UVFlag or list of UVFlags\n Other UVFlag objects to combine metrics with this one.\n method : str, {\"quadmean\", \"absmean\", \"mean\", \"or\", \"and\"}\n Method to combine metrics.\n inplace : bool, optional\n Perform combination in place.\n\n Returns\n -------\n uvf : UVFlag\n If inplace==False, return new UVFlag object with combined metrics.\n\n \"\"\"\n # Ensure others is iterable (in case of single UVFlag object)\n # cannot use uvutils._get_iterable because the object itself is iterable\n if not isinstance(others, (list, tuple, np.ndarray)):\n others = [others]\n\n if np.any([not isinstance(other, UVFlag) for other in others]):\n raise ValueError('\"others\" must be UVFlag or list of UVFlag objects')\n if (self.mode != \"metric\") or np.any(\n [other.mode != \"metric\" for other in others]\n ):\n raise ValueError(\n 'UVFlag object and \"others\" must be in \"metric\" mode '\n 'to use \"add_metrics\" function.'\n )\n if inplace:\n this = self\n else:\n this = self.copy()\n method = method.lower()\n darray = np.expand_dims(this.metric_array, 0)\n warray = np.expand_dims(this.weights_array, 0)\n for other in others:\n if this.metric_array.shape != other.metric_array.shape:\n raise ValueError(\"UVFlag metric array shapes do not match.\")\n darray = np.vstack([darray, np.expand_dims(other.metric_array, 0)])\n warray = np.vstack([warray, np.expand_dims(other.weights_array, 0)])\n darray, warray = uvutils.collapse(\n darray, method, weights=warray, axis=0, return_weights=True\n )\n this.metric_array = darray\n this.weights_array = warray\n this.history += \"Combined metric arrays. \"\n\n if not uvutils._check_history_version(this.history, this.pyuvdata_version_str):\n this.history += this.pyuvdata_version_str\n\n if run_check:\n this.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n if not inplace:\n return this\n\n def _select_preprocess(\n self,\n antenna_nums,\n ant_str,\n bls,\n frequencies,\n freq_chans,\n times,\n polarizations,\n blt_inds,\n ant_inds,\n ):\n \"\"\"Build up blt_inds, freq_inds, pol_inds and history_update_string for select.\n\n Parameters\n ----------\n antenna_nums : array_like of int, optional\n The antennas numbers to keep in the object (antenna positions and\n names for the removed antennas will be retained unless\n `keep_all_metadata` is False).\n bls : list of tuple, optional\n A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of\n baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines\n to keep in the object. For length-2 tuples, the ordering of the numbers\n within the tuple does not matter. For length-3 tuples, the polarization\n string is in the order of the two antennas. If length-3 tuples are\n provided, `polarizations` must be None.\n ant_str : str, optional\n A string containing information about what antenna numbers\n and polarizations to keep in the object. Can be 'auto', 'cross', 'all',\n or combinations of antenna numbers and polarizations (e.g. '1',\n '1_2', '1x_2y'). See tutorial for more examples of valid strings and\n the behavior of different forms for ant_str.\n If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will\n be kept for both baselines (1, 2) and (2, 3) to return a valid\n pyuvdata object.\n An ant_str cannot be passed in addition to any of `antenna_nums`,\n `bls` args or the `polarizations` parameters,\n if it is a ValueError will be raised.\n frequencies : array_like of float, optional\n The frequencies to keep in the object, each value passed here should\n exist in the freq_array.\n freq_chans : array_like of int, optional\n The frequency channel numbers to keep in the object.\n times : array_like of float, optional\n The times to keep in the object, each value passed here should\n exist in the time_array.\n polarizations : array_like of int, optional\n The polarizations numbers to keep in the object, each value passed\n here should exist in the polarization_array.\n blt_inds : array_like of int, optional\n The baseline-time indices to keep in the object. This is\n not commonly used.\n ant_inds : array_like of int, optional\n The antenna indices to keep in the object. This is\n not commonly used.\n\n Returns\n -------\n blt_inds : list of int\n list of baseline-time indices to keep. Can be None (to keep everything).\n ant_inds : list of int\n list of antenna number indices to keep. Can be None\n (keep all; only valid for \"antenna\" mode).\n freq_inds : list of int\n list of frequency indices to keep. Can be None (to keep everything).\n pol_inds : list of int\n list of polarization indices to keep. Can be None (to keep everything).\n history_update_string : str\n string to append to the end of the history.\n\n \"\"\"\n # build up history string as we go\n history_update_string = \" Downselected to specific \"\n n_selects = 0\n\n if self.type == \"waterfall\":\n if antenna_nums is not None:\n raise ValueError(\n \"Cannot select on antenna_nums with waterfall type \"\n \"UVFlag objects.\"\n )\n if bls is not None:\n raise ValueError(\n \"Cannot select on bls with waterfall type \" \"UVFlag objects.\"\n )\n\n if ant_str is not None:\n if not (antenna_nums is None and bls is None and polarizations is None):\n raise ValueError(\n \"Cannot provide ant_str with antenna_nums, bls, or polarizations.\"\n )\n else:\n bls, polarizations = self.parse_ants(ant_str)\n if bls is not None and len(bls) == 0:\n raise ValueError(\n f\"There is no data matching ant_str={ant_str} in this object.\"\n )\n\n # Antennas, times and blt_inds all need to be combined into a set of\n # blts indices to keep.\n\n # test for blt_inds presence before adding inds from antennas & times\n if blt_inds is not None:\n blt_inds = uvutils._get_iterable(blt_inds)\n if np.array(blt_inds).ndim > 1:\n blt_inds = np.array(blt_inds).flatten()\n if self.type == \"baseline\":\n history_update_string += \"baseline-times\"\n else:\n history_update_string += \"times\"\n n_selects += 1\n\n if antenna_nums is not None:\n antenna_nums = uvutils._get_iterable(antenna_nums)\n if np.array(antenna_nums).ndim > 1:\n antenna_nums = np.array(antenna_nums).flatten()\n if n_selects > 0:\n history_update_string += \", antennas\"\n else:\n history_update_string += \"antennas\"\n n_selects += 1\n\n if self.type == \"baseline\":\n inds1 = np.zeros(0, dtype=np.int)\n inds2 = np.zeros(0, dtype=np.int)\n for ant in antenna_nums:\n if ant in self.ant_1_array or ant in self.ant_2_array:\n wh1 = np.where(self.ant_1_array == ant)[0]\n wh2 = np.where(self.ant_2_array == ant)[0]\n if len(wh1) > 0:\n inds1 = np.append(inds1, list(wh1))\n if len(wh2) > 0:\n inds2 = np.append(inds2, list(wh2))\n else:\n raise ValueError(\n \"Antenna number {a} is not present in the \"\n \"ant_1_array or ant_2_array\".format(a=ant)\n )\n ant_blt_inds = set(inds1).intersection(inds2)\n\n if self.type == \"antenna\":\n ant_blt_inds = None\n ant_inds = np.zeros(0, dtype=np.int)\n for ant in antenna_nums:\n if ant in self.ant_array:\n wh = np.nonzero(self.ant_array == ant)[0]\n if len(wh) > 0:\n ant_inds = np.append(ant_inds, list(wh))\n else:\n raise ValueError(\n \"Antenna number {a} is not present in the \"\n \"ant_array\".format(a=ant)\n )\n\n else:\n ant_blt_inds = None\n\n if bls is not None:\n if self.type != \"baseline\":\n raise ValueError(\n 'Only \"baseline\" mode UVFlag objects may select'\n \" along the baseline axis\"\n )\n if isinstance(bls, tuple) and (len(bls) == 2 or len(bls) == 3):\n bls = [bls]\n if not all(isinstance(item, tuple) for item in bls):\n raise ValueError(\n \"bls must be a list of tuples of antenna numbers \"\n \"(optionally with polarization).\"\n )\n if not all(\n [isinstance(item[0], (int, np.integer,)) for item in bls]\n + [isinstance(item[1], (int, np.integer,)) for item in bls]\n ):\n raise ValueError(\n \"bls must be a list of tuples of integer antenna numbers \"\n \"(optionally with polarization).\"\n )\n if all(len(item) == 3 for item in bls):\n if polarizations is not None:\n raise ValueError(\n \"Cannot provide length-3 tuples and also specify polarizations.\"\n )\n if not all(isinstance(item[2], str) for item in bls):\n raise ValueError(\n \"The third element in each bl must be a polarization string\"\n )\n\n if n_selects > 0:\n history_update_string += \", baselines\"\n else:\n history_update_string += \"baselines\"\n\n n_selects += 1\n bls_blt_inds = np.zeros(0, dtype=np.int)\n bl_pols = set()\n for bl in bls:\n if not (bl[0] in self.ant_1_array or bl[0] in self.ant_2_array):\n raise ValueError(\n \"Antenna number {a} is not present in the \"\n \"ant_1_array or ant_2_array\".format(a=bl[0])\n )\n if not (bl[1] in self.ant_1_array or bl[1] in self.ant_2_array):\n raise ValueError(\n \"Antenna number {a} is not present in the \"\n \"ant_1_array or ant_2_array\".format(a=bl[1])\n )\n wh1 = np.where(\n np.logical_and(self.ant_1_array == bl[0], self.ant_2_array == bl[1])\n )[0]\n wh2 = np.where(\n np.logical_and(self.ant_1_array == bl[1], self.ant_2_array == bl[0])\n )[0]\n if len(wh1) > 0:\n bls_blt_inds = np.append(bls_blt_inds, list(wh1))\n if len(bl) == 3:\n bl_pols.add(bl[2])\n elif len(wh2) > 0:\n bls_blt_inds = np.append(bls_blt_inds, list(wh2))\n if len(bl) == 3:\n bl_pols.add(uvutils.conj_pol(bl[2]))\n else:\n raise ValueError(\n \"Antenna pair {p} does not have any data \"\n \"associated with it.\".format(p=bl)\n )\n if len(bl_pols) > 0:\n polarizations = list(bl_pols)\n\n if ant_blt_inds is not None:\n # Use intersection (and) to join antenna_names/nums & ant_pairs_nums\n ant_blt_inds = set(ant_blt_inds).intersection(bls_blt_inds)\n else:\n ant_blt_inds = bls_blt_inds\n\n if ant_blt_inds is not None:\n if blt_inds is not None:\n # Use intersection (and) to join\n # antenna_names/nums/ant_pairs_nums with blt_inds\n blt_inds = set(blt_inds).intersection(ant_blt_inds)\n else:\n blt_inds = ant_blt_inds\n\n if times is not None:\n times = uvutils._get_iterable(times)\n if np.array(times).ndim > 1:\n times = np.array(times).flatten()\n\n if n_selects > 0:\n if (\n self.type != \"baseline\" and \"times\" not in history_update_string\n ) or self.type == \"baseline\":\n\n history_update_string += \", times\"\n else:\n history_update_string += \"times\"\n\n n_selects += 1\n\n time_blt_inds = np.zeros(0, dtype=np.int)\n for jd in times:\n if jd in self.time_array:\n time_blt_inds = np.append(\n time_blt_inds, np.where(self.time_array == jd)[0]\n )\n else:\n raise ValueError(\n \"Time {t} is not present in the time_array\".format(t=jd)\n )\n\n if blt_inds is not None:\n # Use intesection (and) to join\n # antenna_names/nums/ant_pairs_nums/blt_inds with times\n blt_inds = set(blt_inds).intersection(time_blt_inds)\n else:\n blt_inds = time_blt_inds\n\n if blt_inds is not None:\n if len(blt_inds) == 0:\n raise ValueError(\"No baseline-times were found that match criteria\")\n\n if self.type == \"baseline\":\n compare_length = self.Nblts\n else:\n compare_length = self.Ntimes\n\n if max(blt_inds) >= compare_length:\n raise ValueError(\"blt_inds contains indices that are too large\")\n if min(blt_inds) < 0:\n raise ValueError(\"blt_inds contains indices that are negative\")\n\n blt_inds = sorted(set(blt_inds))\n\n if freq_chans is not None:\n freq_chans = uvutils._get_iterable(freq_chans)\n if np.array(freq_chans).ndim > 1:\n freq_chans = np.array(freq_chans).flatten()\n if frequencies is None:\n if self.type != \"waterfall\":\n frequencies = self.freq_array[0, freq_chans]\n else:\n frequencies = self.freq_array[freq_chans]\n\n else:\n frequencies = uvutils._get_iterable(frequencies)\n if self.type != \"waterfall\":\n frequencies = np.sort(\n list(set(frequencies) | set(self.freq_array[0, freq_chans]))\n )\n else:\n frequencies = np.sort(\n list(set(frequencies) | set(self.freq_array[freq_chans]))\n )\n\n if frequencies is not None:\n frequencies = uvutils._get_iterable(frequencies)\n if np.array(frequencies).ndim > 1:\n frequencies = np.array(frequencies).flatten()\n if n_selects > 0:\n history_update_string += \", frequencies\"\n else:\n history_update_string += \"frequencies\"\n n_selects += 1\n\n freq_inds = np.zeros(0, dtype=np.int)\n # this works because we only allow one SPW. This will have to be\n # reworked when we support more.\n if self.type != \"waterfall\":\n freq_arr_use = self.freq_array[0, :]\n else:\n freq_arr_use = self.freq_array[:]\n for f in frequencies:\n if f in freq_arr_use:\n freq_inds = np.append(freq_inds, np.where(freq_arr_use == f)[0])\n else:\n raise ValueError(\n \"Frequency {f} is not present in the freq_array\".format(f=f)\n )\n\n freq_inds = sorted(set(freq_inds))\n else:\n freq_inds = None\n\n if polarizations is not None:\n polarizations = uvutils._get_iterable(polarizations)\n if np.array(polarizations).ndim > 1:\n polarizations = np.array(polarizations).flatten()\n if n_selects > 0:\n history_update_string += \", polarizations\"\n else:\n history_update_string += \"polarizations\"\n n_selects += 1\n\n pol_inds = np.zeros(0, dtype=np.int)\n for p in polarizations:\n if isinstance(p, str):\n p_num = uvutils.polstr2num(p, x_orientation=self.x_orientation)\n else:\n p_num = p\n if p_num in self.polarization_array:\n pol_inds = np.append(\n pol_inds, np.where(self.polarization_array == p_num)[0]\n )\n else:\n raise ValueError(\n \"Polarization {p} is not present in the \"\n \"polarization_array\".format(p=p)\n )\n\n pol_inds = sorted(set(pol_inds))\n else:\n pol_inds = None\n\n history_update_string += \" using pyuvdata.\"\n\n return blt_inds, ant_inds, freq_inds, pol_inds, history_update_string\n\n def _select_metadata(\n self, blt_inds, ant_inds, freq_inds, pol_inds, history_update_string\n ):\n \"\"\"Perform select on everything except the data-sized arrays.\n\n Parameters\n ----------\n blt_inds : list of int\n list of baseline-time indices to keep. Can be None (to keep everything).\n freq_inds : list of int\n list of frequency indices to keep. Can be None (to keep everything).\n pol_inds : list of int\n list of polarization indices to keep. Can be None (to keep everything).\n history_update_string : str\n string to append to the end of the history.\n keep_all_metadata : bool\n Option to keep metadata for antennas that are no longer in the dataset.\n\n \"\"\"\n if blt_inds is not None:\n if self.type == \"baseline\":\n self.Nblts = len(blt_inds)\n self.baseline_array = self.baseline_array[blt_inds]\n self.Nbls = len(np.unique(self.baseline_array))\n self.ant_1_array = self.ant_1_array[blt_inds]\n self.ant_2_array = self.ant_2_array[blt_inds]\n self.Nants_data = int(\n np.union1d(self.ant_1_array, self.ant_2_array).size\n )\n\n self.time_array = self.time_array[blt_inds]\n self.lst_array = self.lst_array[blt_inds]\n self.Ntimes = len(np.unique(self.time_array))\n\n if self.type == \"antenna\":\n if ant_inds is not None:\n self.ant_array = self.ant_array[ant_inds]\n self.Nants_data = int(len(self.ant_array))\n\n if freq_inds is not None:\n self.Nfreqs = len(freq_inds)\n if self.type != \"waterfall\":\n self.freq_array = self.freq_array[:, freq_inds]\n else:\n self.freq_array = self.freq_array[freq_inds]\n\n if pol_inds is not None:\n self.Npols = len(pol_inds)\n self.polarization_array = self.polarization_array[pol_inds]\n\n self.history = self.history + history_update_string\n\n def select(\n self,\n antenna_nums=None,\n ant_inds=None,\n bls=None,\n ant_str=None,\n frequencies=None,\n freq_chans=None,\n times=None,\n polarizations=None,\n blt_inds=None,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n inplace=True,\n ):\n \"\"\"\n Downselect data to keep on the object along various axes.\n\n Axes that can be selected along depend on the current type of the object.\n However some axis may always be selected upon, these include frequencies,\n times and polarizations.\n In \"baseline\" and \"antenna\" modes, antenna numbers may be selected.\n In \"baseline\" mode, antenna pairs may be selected.\n Specific baseline-time indices can also be selected in \"baseline\" mode,\n but this is not commonly used.\n The history attribute on the object will be updated to identify the\n operations performed.\n\n Parameters\n ----------\n antenna_nums : array_like of int, optional\n The antennas numbers to keep in the object (antenna positions and\n names for the removed antennas will be retained unless\n `keep_all_metadata` is False). This cannot be provided if\n `antenna_names` is also provided.\n bls : list of tuple, optional\n A list of antenna number tuples (e.g. [(0,1), (3,2)]) or a list of\n baseline 3-tuples (e.g. [(0,1,'xx'), (2,3,'yy')]) specifying baselines\n to keep in the object. For length-2 tuples, the ordering of the numbers\n within the tuple does not matter. For length-3 tuples, the polarization\n string is in the order of the two antennas. If length-3 tuples are\n provided, `polarizations` must be None.\n ant_str : str, optional\n A string containing information about what antenna numbers\n and polarizations to keep in the object. Can be 'auto', 'cross', 'all',\n or combinations of antenna numbers and polarizations (e.g. '1',\n '1_2', '1x_2y'). See tutorial for more examples of valid strings and\n the behavior of different forms for ant_str.\n If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will\n be kept for both baselines (1, 2) and (2, 3) to return a valid\n pyuvdata object.\n An ant_str cannot be passed in addition to any of `antenna_nums`,\n `antenna_names`, `bls` args or the `polarizations` parameters,\n if it is a ValueError will be raised.\n frequencies : array_like of float, optional\n The frequencies to keep in the object, each value passed here should\n exist in the freq_array.\n freq_chans : array_like of int, optional\n The frequency channel numbers to keep in the object.\n times : array_like of float, optional\n The times to keep in the object, each value passed here should\n exist in the time_array.\n polarizations : array_like of int, optional\n The polarizations numbers to keep in the object, each value passed\n here should exist in the polarization_array.\n blt_inds : array_like of int, optional\n The baseline-time indices to keep in the object. This is\n not commonly used.\n ant_inds : array_like of int, optional\n The antenna indices to keep in the object. This is\n not commonly used.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after downselecting data on this object.\n check_extra : bool\n Option to check optional parameters as well as required ones (the\n default is True, meaning the optional parameters will be checked).\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n downselecting data on this object.\n inplace : bool\n Option to perform the select directly on self or return a new UVData\n object with just the selected data.\n\n Returns\n -------\n UVData object or None\n None is returned if inplace is True, otherwise a new UVData object\n with just the selected data is returned\n\n Raises\n ------\n ValueError\n If any of the parameters are set to inappropriate values.\n\n \"\"\"\n if inplace:\n uv_object = self\n else:\n uv_object = self.copy()\n\n (\n blt_inds,\n ant_inds,\n freq_inds,\n pol_inds,\n history_update_string,\n ) = uv_object._select_preprocess(\n antenna_nums=antenna_nums,\n ant_str=ant_str,\n bls=bls,\n frequencies=frequencies,\n freq_chans=freq_chans,\n times=times,\n polarizations=polarizations,\n blt_inds=blt_inds,\n ant_inds=ant_inds,\n )\n\n # do select operations on everything except data_array, flag_array\n # and nsample_array\n uv_object._select_metadata(\n blt_inds, ant_inds, freq_inds, pol_inds, history_update_string\n )\n\n if blt_inds is not None:\n if self.type == \"baseline\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[blt_inds, :, :, :])\n if self.type == \"waterfall\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[blt_inds, :, :])\n if self.type == \"antenna\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[:, :, :, blt_inds, :])\n\n if ant_inds is not None and self.type == \"antenna\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[ant_inds, :, :, :])\n\n if freq_inds is not None:\n if self.type == \"baseline\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[:, :, freq_inds, :])\n if self.type == \"waterfall\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[:, freq_inds, :])\n if self.type == \"antenna\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[:, :, freq_inds, :, :])\n\n if pol_inds is not None:\n if self.type == \"baseline\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[:, :, :, pol_inds])\n if self.type == \"waterfall\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[:, :, pol_inds])\n if self.type == \"antenna\":\n for param_name, param in zip(\n self._data_params, uv_object.data_like_parameters\n ):\n setattr(uv_object, param_name, param[:, :, :, :, pol_inds])\n\n # check if object is uv_object-consistent\n if run_check:\n uv_object.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n if not inplace:\n return uv_object\n\n def read(\n self,\n filename,\n history=\"\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Read in flag/metric data from a HDF5 file.\n\n Parameters\n ----------\n filename : str or pathlib.Path\n The file name to read.\n history : str\n History string to append to UVFlag history attribute.\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after reading data.\n check_extra : bool\n Option to check optional parameters as well as required ones.\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n reading data.\n\n \"\"\"\n if isinstance(filename, (tuple, list)):\n self.read(filename[0])\n if len(filename) > 1:\n for f in filename[1:]:\n f2 = UVFlag(f, history=history)\n self += f2\n del f2\n\n else:\n if not os.path.exists(filename):\n raise IOError(filename + \" not found.\")\n\n # Open file for reading\n with h5py.File(filename, \"r\") as f:\n header = f[\"/Header\"]\n\n self.type = header[\"type\"][()].decode(\"utf8\")\n if self.type == \"antenna\":\n self._set_type_antenna()\n elif self.type == \"baseline\":\n self._set_type_baseline()\n elif self.type == \"waterfall\":\n self._set_type_waterfall()\n else:\n raise ValueError(\n \"File cannot be read. Received type \"\n \"parameter: {receive} but \"\n \"must be within acceptable values: \"\n \"{expect}\".format(\n receive=self.type,\n expect=(\", \").join(self._type.acceptable_vals),\n )\n )\n\n self.mode = header[\"mode\"][()].decode(\"utf8\")\n\n if self.mode == \"metric\":\n self._set_mode_metric()\n elif self.mode == \"flag\":\n self._set_mode_flag()\n else:\n raise ValueError(\n \"File cannot be read. Received mode \"\n \"parameter: {receive} but \"\n \"must be within acceptable values: \"\n \"{expect}\".format(\n receive=self.mode,\n expect=(\", \").join(self._mode.acceptable_vals),\n )\n )\n\n if \"x_orientation\" in header.keys():\n self.x_orientation = header[\"x_orientation\"][()].decode(\"utf8\")\n\n self.time_array = header[\"time_array\"][()]\n if \"Ntimes\" in header.keys():\n self.Ntimes = int(header[\"Ntimes\"][()])\n else:\n self.Ntimes = np.unique(self.time_array).size\n\n self.lst_array = header[\"lst_array\"][()]\n\n self.freq_array = header[\"freq_array\"][()]\n # older save files will not have this spws axis\n # at least_2d will preserve shape of 2d arrays and\n # promote 1D to (1, Nfreqs)\n if self.type != \"waterfall\":\n self.freq_array = np.atleast_2d(self.freq_array)\n\n if \"Nfreqs\" in header.keys():\n self.Nfreqs = int(header[\"Nfreqs\"][()])\n else:\n self.Nfreqs = np.unique(self.freq_array).size\n\n self.history = header[\"history\"][()].decode(\"utf8\")\n\n self.history += history\n\n if not uvutils._check_history_version(\n self.history, self.pyuvdata_version_str\n ):\n self.history += self.pyuvdata_version_str\n\n if \"label\" in header.keys():\n self.label = header[\"label\"][()].decode(\"utf8\")\n\n polarization_array = header[\"polarization_array\"][()]\n if isinstance(polarization_array[0], np.string_):\n polarization_array = np.asarray(polarization_array, dtype=np.str_)\n self.polarization_array = polarization_array\n self._check_pol_state()\n\n if \"Npols\" in header.keys():\n self.Npols = int(header[\"Npols\"][()])\n else:\n self.Npols = len(self.polarization_array)\n\n if self.type == \"baseline\":\n\n self.baseline_array = header[\"baseline_array\"][()]\n\n if \"Nblts\" in header.keys():\n self.Nblts = int(header[\"Nblts\"][()])\n else:\n self.Nblts = len(self.baseline_array)\n\n if \"Nbls\" in header.keys():\n self.Nbls = int(header[\"Nbls\"][()])\n else:\n self.Nbls = np.unique(self.baseline_array).size\n\n self.ant_1_array = header[\"ant_1_array\"][()]\n self.ant_2_array = header[\"ant_2_array\"][()]\n\n try:\n self.Nants_telescope = int(header[\"Nants_telescope\"][()])\n except KeyError:\n warnings.warn(\n \"Nants_telescope not available in file, \" \"assuming < 2048.\"\n )\n self.Nants_telescope = 2047\n\n if \"Nants_data\" in header.keys():\n self.Nants_data = int(header[\"Nants_data\"][()])\n else:\n self.Nants_data = int(\n np.unique(\n np.union1d(self.ant_1_array, self.ant_2_array)\n ).size\n )\n\n if \"Nspws\" in header.keys():\n self.Nspws = int(header[\"Nspws\"][()])\n else:\n self.Nspws = np.shape(self.freq_array)[0]\n\n elif self.type == \"antenna\":\n self.ant_array = header[\"ant_array\"][()]\n try:\n self.Nants_data = int(header[\"Nants_data\"][()])\n except KeyError:\n warnings.warn(\n \"Nants_data not available in file, \"\n \"attempting to calculate from ant_array.\"\n )\n self.Nants_data = len(self.ant_array)\n\n if \"Nspws\" in header.keys():\n self.Nspws = int(header[\"Nspws\"][()])\n else:\n self.Nspws = np.shape(self.freq_array)[0]\n\n dgrp = f[\"/Data\"]\n if self.mode == \"metric\":\n self.metric_array = dgrp[\"metric_array\"][()]\n self.weights_array = dgrp[\"weights_array\"][()]\n if \"weights_square_array\" in dgrp:\n self.weights_square_array = dgrp[\"weights_square_array\"][()]\n elif self.mode == \"flag\":\n self.flag_array = dgrp[\"flag_array\"][()]\n\n self.clear_unused_attributes()\n\n if run_check:\n self.check(\n check_extra=check_extra,\n run_check_acceptability=run_check_acceptability,\n )\n\n def write(self, filename, clobber=False, data_compression=\"lzf\"):\n \"\"\"Write a UVFlag object to a hdf5 file.\n\n Parameters\n ----------\n filename : str\n The file to write to.\n clobber : bool\n Option to overwrite the file if it already exists.\n data_compression : str\n HDF5 filter to apply when writing the data_array.\n If no compression is wanted, set to None.\n\n \"\"\"\n if os.path.exists(filename):\n if clobber:\n print(\"File \" + filename + \" exists; clobbering\")\n else:\n raise ValueError(\"File \" + filename + \" exists; skipping\")\n\n with h5py.File(filename, \"w\") as f:\n header = f.create_group(\"Header\")\n\n # write out metadata\n header[\"type\"] = np.string_(self.type)\n header[\"mode\"] = np.string_(self.mode)\n\n header[\"Ntimes\"] = self.Ntimes\n header[\"time_array\"] = self.time_array\n header[\"lst_array\"] = self.lst_array\n\n header[\"freq_array\"] = self.freq_array\n header[\"Nfreqs\"] = self.Nfreqs\n\n header[\"Npols\"] = self.Npols\n\n if self.x_orientation is not None:\n header[\"x_orientation\"] = np.string_(self.x_orientation)\n\n if isinstance(self.polarization_array.item(0), str):\n polarization_array = np.asarray(\n self.polarization_array, dtype=np.string_\n )\n else:\n polarization_array = self.polarization_array\n header[\"polarization_array\"] = polarization_array\n\n if not uvutils._check_history_version(\n self.history, self.pyuvdata_version_str\n ):\n self.history += self.pyuvdata_version_str\n\n header[\"history\"] = np.string_(self.history)\n\n header[\"label\"] = np.string_(self.label)\n\n if self.type == \"baseline\":\n header[\"baseline_array\"] = self.baseline_array\n header[\"Nbls\"] = self.Nbls\n header[\"Nblts\"] = self.Nblts\n header[\"ant_1_array\"] = self.ant_1_array\n header[\"ant_2_array\"] = self.ant_2_array\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nants_telescope\"] = self.Nants_telescope\n header[\"Nspws\"] = self.Nspws\n\n elif self.type == \"antenna\":\n header[\"ant_array\"] = self.ant_array\n header[\"Nants_data\"] = self.Nants_data\n header[\"Nspws\"] = self.Nspws\n\n dgrp = f.create_group(\"Data\")\n if self.mode == \"metric\":\n dgrp.create_dataset(\n \"metric_array\",\n chunks=True,\n data=self.metric_array,\n compression=data_compression,\n )\n dgrp.create_dataset(\n \"weights_array\",\n chunks=True,\n data=self.weights_array,\n compression=data_compression,\n )\n if self.weights_square_array is not None:\n dgrp.create_dataset(\n \"weights_square_array\",\n chunks=True,\n data=self.weights_square_array,\n compression=data_compression,\n )\n elif self.mode == \"flag\":\n dgrp.create_dataset(\n \"flag_array\",\n chunks=True,\n data=self.flag_array,\n compression=data_compression,\n )\n\n def from_uvdata(\n self,\n indata,\n mode=\"metric\",\n copy_flags=False,\n waterfall=False,\n history=\"\",\n label=\"\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Construct a UVFlag object from a UVData object.\n\n Parameters\n ----------\n indata : UVData\n Input to initialize UVFlag object.\n mode : {\"metric\", \"flag\"}, optional\n The mode determines whether the object has a floating point metric_array\n or a boolean flag_array.\n copy_flags : bool, optional\n Whether to copy flags from indata to new UVFlag object\n waterfall : bool, optional\n Whether to immediately initialize as a waterfall object, with flag/metric\n axes: time, frequency, polarization.\n history : str, optional\n History string to attach to object.\n label: str, optional\n String used for labeling the object (e.g. 'FM').\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after creating UVFlag object.\n check_extra : bool\n Option to check optional parameters as well as required ones (the\n default is True, meaning the optional parameters will be checked).\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n creating UVFlag object.\n\n \"\"\"\n if not issubclass(indata.__class__, UVData):\n raise ValueError(\n \"from_uvdata can only initialize a UVFlag object from an input \"\n \"UVData object or a subclass of a UVData object.\"\n )\n\n if mode.lower() == \"metric\":\n self._set_mode_metric()\n elif mode.lower() == \"flag\":\n self._set_mode_flag()\n else:\n raise ValueError(\n \"Input mode must be within acceptable values: \"\n \"{}\".format((\", \").join(self._mode.acceptable_vals))\n )\n\n if waterfall:\n self._set_type_waterfall()\n self.history += 'Flag object with type \"waterfall\" created. '\n if not uvutils._check_history_version(\n self.history, self.pyuvdata_version_str\n ):\n self.history += self.pyuvdata_version_str\n\n self.time_array, ri = np.unique(indata.time_array, return_index=True)\n self.Ntimes = len(self.time_array)\n self.freq_array = indata.freq_array[0, :]\n self.Nspws = None\n self.Nfreqs = len(self.freq_array)\n self.Nblts = len(self.time_array)\n self.polarization_array = indata.polarization_array\n self.Npols = len(self.polarization_array)\n self.lst_array = indata.lst_array[ri]\n if copy_flags:\n raise NotImplementedError(\n \"Cannot copy flags when initializing waterfall UVFlag from \"\n \"UVData or UVCal.\"\n )\n else:\n if self.mode == \"flag\":\n self.flag_array = np.zeros(\n (\n len(self.time_array),\n len(self.freq_array),\n len(self.polarization_array),\n ),\n np.bool,\n )\n elif self.mode == \"metric\":\n self.metric_array = np.zeros(\n (\n len(self.time_array),\n len(self.freq_array),\n len(self.polarization_array),\n )\n )\n\n else:\n self._set_type_baseline()\n self.history += 'Flag object with type \"baseline\" created. '\n if not uvutils._check_history_version(\n self.history, self.pyuvdata_version_str\n ):\n self.history += self.pyuvdata_version_str\n\n self.baseline_array = indata.baseline_array\n self.Nbls = np.unique(self.baseline_array).size\n self.Nblts = len(self.baseline_array)\n self.ant_1_array = indata.ant_1_array\n self.ant_2_array = indata.ant_2_array\n self.Nants_data = indata.Nants_data\n\n self.time_array = indata.time_array\n self.lst_array = indata.lst_array\n self.Ntimes = np.unique(self.time_array).size\n\n self.freq_array = indata.freq_array\n self.Nfreqs = np.unique(self.freq_array).size\n self.Nspws = indata.Nspws\n\n self.polarization_array = indata.polarization_array\n self.Npols = len(self.polarization_array)\n self.Nants_telescope = indata.Nants_telescope\n if copy_flags:\n self.flag_array = indata.flag_array\n self.history += (\n \" Flags copied from \" + str(indata.__class__) + \" object.\"\n )\n if self.mode == \"metric\":\n warnings.warn(\n 'Copying flags to type==\"baseline\" ' 'results in mode==\"flag\".'\n )\n self._set_mode_flag()\n else:\n if self.mode == \"flag\":\n self.flag_array = np.zeros_like(indata.flag_array)\n elif self.mode == \"metric\":\n self.metric_array = np.zeros_like(indata.flag_array).astype(\n np.float\n )\n\n if indata.x_orientation is not None:\n self.x_orientation = indata.x_orientation\n\n if self.mode == \"metric\":\n self.weights_array = np.ones(self.metric_array.shape)\n\n if history not in self.history:\n self.history += history\n self.label += label\n\n self.clear_unused_attributes()\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n return\n\n def from_uvcal(\n self,\n indata,\n mode=\"metric\",\n copy_flags=False,\n waterfall=False,\n history=\"\",\n label=\"\",\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n \"\"\"Construct a UVFlag object from a UVCal object.\n\n Parameters\n ----------\n indata : UVData\n Input to initialize UVFlag object.\n mode : {\"metric\", \"flag\"}, optional\n The mode determines whether the object has a floating point metric_array\n or a boolean flag_array.\n copy_flags : bool, optional\n Whether to copy flags from indata to new UVFlag object\n waterfall : bool, optional\n Whether to immediately initialize as a waterfall object, with flag/metric\n axes: time, frequency, polarization.\n history : str, optional\n History string to attach to object.\n label: str, optional\n String used for labeling the object (e.g. 'FM').\n run_check : bool\n Option to check for the existence and proper shapes of parameters\n after creating UVFlag object.\n check_extra : bool\n Option to check optional parameters as well as required ones (the\n default is True, meaning the optional parameters will be checked).\n run_check_acceptability : bool\n Option to check acceptable range of the values of parameters after\n creating UVFlag object.\n\n \"\"\"\n if not issubclass(indata.__class__, UVCal):\n raise ValueError(\n \"from_uvcal can only initialize a UVFlag object from an input \"\n \"UVCal object or a subclass of a UVCal object.\"\n )\n\n if mode.lower() == \"metric\":\n self._set_mode_metric()\n elif mode.lower() == \"flag\":\n self._set_mode_flag()\n else:\n raise ValueError(\n \"Input mode must be within acceptable values: \"\n \"{}\".format((\", \").join(self._mode.acceptable_vals))\n )\n\n if waterfall:\n self._set_type_waterfall()\n self.history += 'Flag object with type \"waterfall\" created. '\n if not uvutils._check_history_version(\n self.history, self.pyuvdata_version_str\n ):\n self.history += self.pyuvdata_version_str\n\n self.time_array, ri = np.unique(indata.time_array, return_index=True)\n self.Ntimes = len(self.time_array)\n self.freq_array = indata.freq_array[0, :]\n self.Nspws = None\n self.Nfreqs = len(self.freq_array)\n self.Nblts = len(self.time_array)\n self.polarization_array = indata.jones_array\n self.Npols = len(self.polarization_array)\n self.lst_array = lst_from_uv(indata)[ri]\n if copy_flags:\n raise NotImplementedError(\n \"Cannot copy flags when \"\n \"initializing waterfall UVFlag \"\n \"from UVData or UVCal.\"\n )\n else:\n if self.mode == \"flag\":\n self.flag_array = np.zeros(\n (\n len(self.time_array),\n len(self.freq_array),\n len(self.polarization_array),\n ),\n np.bool,\n )\n elif self.mode == \"metric\":\n self.metric_array = np.zeros(\n (\n len(self.time_array),\n len(self.freq_array),\n len(self.polarization_array),\n )\n )\n\n else:\n self._set_type_antenna()\n self.history += 'Flag object with type \"antenna\" created. '\n if not uvutils._check_history_version(\n self.history, self.pyuvdata_version_str\n ):\n self.history += self.pyuvdata_version_str\n self.ant_array = indata.ant_array\n self.Nants_data = len(self.ant_array)\n\n self.time_array = indata.time_array\n self.lst_array = lst_from_uv(indata)\n self.Ntimes = np.unique(self.time_array).size\n self.Nblts = self.Ntimes\n\n self.freq_array = indata.freq_array\n self.Nspws = indata.Nspws\n self.Nfreqs = np.unique(self.freq_array).size\n\n self.polarization_array = indata.jones_array\n self.Npols = len(self.polarization_array)\n if copy_flags:\n self.flag_array = indata.flag_array\n self.history += (\n \" Flags copied from \" + str(indata.__class__) + \" object.\"\n )\n if self.mode == \"metric\":\n warnings.warn(\n 'Copying flags to type==\"antenna\" ' 'results in mode==\"flag\".'\n )\n self._set_mode_flag()\n else:\n if self.mode == \"flag\":\n self.flag_array = np.zeros_like(indata.flag_array)\n elif self.mode == \"metric\":\n self.metric_array = np.zeros_like(indata.flag_array).astype(\n np.float\n )\n if self.mode == \"metric\":\n self.weights_array = np.ones(self.metric_array.shape)\n\n if indata.x_orientation is not None:\n self.x_orientation = indata.x_orientation\n\n if history not in self.history:\n self.history += history\n self.label += label\n\n self.clear_unused_attributes()\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n return\n", "id": "11721200", "language": "Python", "matching_score": 8.836396217346191, "max_stars_count": 0, "path": "pyuvdata/uvflag/uvflag.py" }, { "content": "# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Commonly used utility functions.\"\"\"\nimport re\nimport copy\nimport warnings\nfrom collections.abc import Iterable\n\nimport numpy as np\nfrom scipy.spatial.distance import pdist, squareform\nfrom astropy.time import Time\nfrom astropy.coordinates import Angle\nfrom astropy.utils import iers\n\nfrom . import _utils\n\n\ndef _str_to_bytes(s):\n warnings.warn(\n \"_str_to_bytes is deprecated and will be removed in pyuvdata version 2.2. \"\n \"For an input string s, this function is a thin wrapper on s.encode('utf8'). \"\n \"The use of encode is preferred over calling this function.\",\n DeprecationWarning,\n )\n return s.encode(\"utf8\")\n\n\ndef _bytes_to_str(b):\n warnings.warn(\n \"_bytes_to_str is deprecated and will be removed in pyuvdata version 2.2. \"\n \"For an input string s, this function is a thin wrapper on s.decode('utf8'). \"\n \"The use of decode is preferred over calling this function.\",\n DeprecationWarning,\n )\n return b.decode(\"utf8\")\n\n\n__all__ = [\n \"POL_STR2NUM_DICT\",\n \"POL_NUM2STR_DICT\",\n \"CONJ_POL_DICT\",\n \"JONES_STR2NUM_DICT\",\n \"JONES_NUM2STR_DICT\",\n \"LatLonAlt_from_XYZ\",\n \"XYZ_from_LatLonAlt\",\n \"rotECEF_from_ECEF\",\n \"ECEF_from_rotECEF\",\n \"ENU_from_ECEF\",\n \"ECEF_from_ENU\",\n \"phase_uvw\",\n \"unphase_uvw\",\n \"uvcalibrate\",\n \"apply_uvflag\",\n \"get_lst_for_time\",\n \"polstr2num\",\n \"polnum2str\",\n \"jstr2num\",\n \"jnum2str\",\n \"parse_polstr\",\n \"parse_jpolstr\",\n \"conj_pol\",\n \"reorder_conj_pols\",\n \"baseline_to_antnums\",\n \"antnums_to_baseline\",\n \"baseline_index_flip\",\n \"get_baseline_redundancies\",\n \"get_antenna_redundancies\",\n \"collapse\",\n \"mean_collapse\",\n \"absmean_collapse\",\n \"quadmean_collapse\",\n \"or_collapse\",\n \"and_collapse\",\n]\n\n# fmt: off\n# polarization constants\n# maps polarization strings to polarization integers\nPOL_STR2NUM_DICT = {\"pI\": 1, \"pQ\": 2, \"pU\": 3, \"pV\": 4,\n \"I\": 1, \"Q\": 2, \"U\": 3, \"V\": 4, # support straight stokes names\n \"rr\": -1, \"ll\": -2, \"rl\": -3, \"lr\": -4,\n \"xx\": -5, \"yy\": -6, \"xy\": -7, \"yx\": -8}\n# maps polarization integers to polarization strings\nPOL_NUM2STR_DICT = {1: \"pI\", 2: \"pQ\", 3: \"pU\", 4: \"pV\",\n -1: \"rr\", -2: \"ll\", -3: \"rl\", -4: \"lr\",\n -5: \"xx\", -6: \"yy\", -7: \"xy\", -8: \"yx\"}\n\n# maps how polarizations change when antennas are swapped\nCONJ_POL_DICT = {\"xx\": \"xx\", \"yy\": \"yy\", \"xy\": \"yx\", \"yx\": \"xy\",\n \"ee\": \"ee\", \"nn\": \"nn\", \"en\": \"ne\", \"ne\": \"en\",\n \"rr\": \"rr\", \"ll\": \"ll\", \"rl\": \"lr\", \"lr\": \"rl\",\n \"I\": \"I\", \"Q\": \"Q\", \"U\": \"U\", \"V\": \"V\",\n \"pI\": \"pI\", \"pQ\": \"pQ\", \"pU\": \"pU\", \"pV\": \"pV\"}\n\n# maps jones matrix element strings to jones integers\n# Add entries that don't start with \"J\" to allow shorthand versions\nJONES_STR2NUM_DICT = {\"Jxx\": -5, \"Jyy\": -6, \"Jxy\": -7, \"Jyx\": -8,\n \"xx\": -5, \"x\": -5, \"yy\": -6, \"y\": -6, \"xy\": -7, \"yx\": -8,\n \"Jrr\": -1, \"Jll\": -2, \"Jrl\": -3, \"Jlr\": -4,\n \"rr\": -1, \"r\": -1, \"ll\": -2, \"l\": -2, \"rl\": -3, \"lr\": -4}\n# maps jones integers to jones matrix element strings\nJONES_NUM2STR_DICT = {-1: \"Jrr\", -2: \"Jll\", -3: \"Jrl\", -4: \"Jlr\",\n -5: \"Jxx\", -6: \"Jyy\", -7: \"Jxy\", -8: \"Jyx\"}\n\n# maps uvdata pols to input feed polarizations\nPOL_TO_FEED_DICT = {\"xx\": [\"x\", \"x\"], \"yy\": [\"y\", \"y\"],\n \"xy\": [\"x\", \"y\"], \"yx\": [\"y\", \"x\"],\n \"ee\": [\"e\", \"e\"], \"nn\": [\"n\", \"n\"],\n \"en\": [\"e\", \"n\"], \"ne\": [\"n\", \"e\"],\n \"rr\": [\"r\", \"r\"], \"ll\": [\"l\", \"l\"],\n \"rl\": [\"r\", \"l\"], \"lr\": [\"l\", \"r\"]}\n\n# fmt: on\n\n\ndef _get_iterable(x):\n \"\"\"Return iterable version of input.\"\"\"\n if isinstance(x, Iterable):\n return x\n else:\n return (x,)\n\n\ndef _fits_gethduaxis(hdu, axis):\n \"\"\"\n Make axis arrays for fits files.\n\n Parameters\n ----------\n hdu : astropy.io.fits HDU object\n The HDU to make an axis array for.\n axis : int\n The axis number of interest (1-based).\n\n Returns\n -------\n ndarray of float\n Array of values for the specified axis.\n\n \"\"\"\n ax = str(axis)\n axis_num = hdu.header[\"NAXIS\" + ax]\n val = hdu.header[\"CRVAL\" + ax]\n delta = hdu.header[\"CDELT\" + ax]\n index = hdu.header[\"CRPIX\" + ax] - 1\n\n return delta * (np.arange(axis_num) - index) + val\n\n\ndef _fits_indexhdus(hdulist):\n \"\"\"\n Get a dict of table names and HDU numbers from a FITS HDU list.\n\n Parameters\n ----------\n hdulist : list of astropy.io.fits HDU objects\n List of HDUs to get names for\n\n Returns\n -------\n dict\n dictionary with table names as keys and HDU number as values.\n\n \"\"\"\n tablenames = {}\n for i in range(len(hdulist)):\n try:\n tablenames[hdulist[i].header[\"EXTNAME\"]] = i\n except (KeyError):\n continue\n return tablenames\n\n\ndef _get_fits_extra_keywords(header, keywords_to_skip=None):\n \"\"\"\n Get any extra keywords and return as dict.\n\n Parameters\n ----------\n header : FITS header object\n header object to get extra_keywords from.\n keywords_to_skip : list of str\n list of keywords to not include in extra keywords in addition to standard\n FITS keywords.\n\n Returns\n -------\n dict\n dict of extra keywords.\n \"\"\"\n # List standard FITS header items that are still should not be included in\n # extra_keywords\n # These are the beginnings of FITS keywords to ignore, the actual keywords\n # often include integers following these names (e.g. NAXIS1, CTYPE3)\n std_fits_substrings = [\n \"HISTORY\",\n \"SIMPLE\",\n \"BITPIX\",\n \"EXTEND\",\n \"BLOCKED\",\n \"GROUPS\",\n \"PCOUNT\",\n \"BSCALE\",\n \"BZERO\",\n \"NAXIS\",\n \"PTYPE\",\n \"PSCAL\",\n \"PZERO\",\n \"CTYPE\",\n \"CRVAL\",\n \"CRPIX\",\n \"CDELT\",\n \"CROTA\",\n \"CUNIT\",\n ]\n\n if keywords_to_skip is not None:\n std_fits_substrings.extend(keywords_to_skip)\n\n extra_keywords = {}\n # find all the other header items and keep them as extra_keywords\n for key in header:\n # check if key contains any of the standard FITS substrings\n if np.any([sub in key for sub in std_fits_substrings]):\n continue\n if key == \"COMMENT\":\n extra_keywords[key] = str(header.get(key))\n elif key != \"\":\n extra_keywords[key] = header.get(key)\n\n return extra_keywords\n\n\ndef _check_history_version(history, version_string):\n \"\"\"Check if version_string is present in history string.\"\"\"\n if version_string.replace(\" \", \"\") in history.replace(\"\\n\", \"\").replace(\" \", \"\"):\n return True\n else:\n return False\n\n\ndef _check_histories(history1, history2):\n \"\"\"Check if two histories are the same.\"\"\"\n if history1.replace(\"\\n\", \"\").replace(\" \", \"\") == history2.replace(\n \"\\n\", \"\"\n ).replace(\" \", \"\"):\n return True\n else:\n return False\n\n\ndef _combine_histories(history1, history2):\n \"\"\"Combine histories with minimal repeats.\"\"\"\n hist2_words = history2.split(\" \")\n add_hist = \"\"\n test_hist1 = \" \" + history1 + \" \"\n for i, word in enumerate(hist2_words):\n if \" \" + word + \" \" not in test_hist1:\n add_hist += \" \" + word\n keep_going = i + 1 < len(hist2_words)\n while keep_going:\n if (hist2_words[i + 1] == \" \") or (\n \" \" + hist2_words[i + 1] + \" \" not in test_hist1\n ):\n add_hist += \" \" + hist2_words[i + 1]\n del hist2_words[i + 1]\n keep_going = i + 1 < len(hist2_words)\n else:\n keep_going = False\n\n return history1 + add_hist\n\n\ndef baseline_to_antnums(baseline, Nants_telescope):\n \"\"\"\n Get the antenna numbers corresponding to a given baseline number.\n\n Parameters\n ----------\n baseline : int or array_like of ints\n baseline number\n Nants_telescope : int\n number of antennas\n\n Returns\n -------\n int or array_like of int\n first antenna number(s)\n int or array_like of int\n second antenna number(s)\n\n \"\"\"\n if Nants_telescope > 2048:\n raise Exception(\n \"error Nants={Nants}>2048 not supported\".format(Nants=Nants_telescope)\n )\n\n return_array = isinstance(baseline, (np.ndarray, list, tuple))\n ant1, ant2 = _utils.baseline_to_antnums(\n np.ascontiguousarray(baseline, dtype=np.int64)\n )\n if return_array:\n return ant1, ant2\n else:\n return ant1.item(0), ant2.item(0)\n\n\ndef antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False):\n \"\"\"\n Get the baseline number corresponding to two given antenna numbers.\n\n Parameters\n ----------\n ant1 : int or array_like of int\n first antenna number\n ant2 : int or array_like of int\n second antenna number\n Nants_telescope : int\n number of antennas\n attempt256 : bool\n Option to try to use the older 256 standard used in\n many uvfits files (will use 2048 standard if there are more\n than 256 antennas). Default is False.\n\n Returns\n -------\n int or array of int\n baseline number corresponding to the two antenna numbers.\n\n \"\"\"\n if Nants_telescope is not None and Nants_telescope > 2048:\n raise Exception(\n \"cannot convert ant1, ant2 to a baseline index \"\n \"with Nants={Nants}>2048.\".format(Nants=Nants_telescope)\n )\n\n return_array = isinstance(ant1, (np.ndarray, list, tuple))\n baseline = _utils.antnums_to_baseline(\n np.ascontiguousarray(ant1, dtype=np.int64),\n np.ascontiguousarray(ant2, dtype=np.int64),\n attempt256=attempt256,\n )\n if return_array:\n return baseline\n else:\n return baseline.item(0)\n\n\ndef baseline_index_flip(baseline, Nants_telescope):\n \"\"\"Change baseline number to reverse antenna order.\"\"\"\n ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope)\n return antnums_to_baseline(ant2, ant1, Nants_telescope)\n\n\ndef _x_orientation_rep_dict(x_orientation):\n \"\"\"Create replacement dict based on x_orientation.\"\"\"\n if x_orientation.lower() == \"east\" or x_orientation.lower() == \"e\":\n return {\"x\": \"e\", \"y\": \"n\"}\n elif x_orientation.lower() == \"north\" or x_orientation.lower() == \"n\":\n return {\"x\": \"n\", \"y\": \"e\"}\n else:\n raise ValueError(\"x_orientation not recognized.\")\n\n\ndef polstr2num(pol, x_orientation=None):\n \"\"\"\n Convert polarization str to number according to AIPS Memo 117.\n\n Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,\n not true Stokes, but also supports 'I', 'Q', 'U', 'V'.\n\n Parameters\n ----------\n pol : str\n polarization string\n x_orientation : str, optional\n Orientation of the physical dipole corresponding to what is\n labelled as the x polarization (\"east\" or \"north\") to allow for\n converting from E/N strings. See corresonding parameter on UVData\n for more details.\n\n Returns\n -------\n int\n Number corresponding to string\n\n Raises\n ------\n ValueError\n If the pol string cannot be converted to a polarization number.\n\n Warns\n -----\n UserWarning\n If the x_orientation not recognized.\n\n \"\"\"\n dict_use = copy.deepcopy(POL_STR2NUM_DICT)\n if x_orientation is not None:\n try:\n rep_dict = _x_orientation_rep_dict(x_orientation)\n for key, value in POL_STR2NUM_DICT.items():\n new_key = key.replace(\"x\", rep_dict[\"x\"]).replace(\"y\", rep_dict[\"y\"])\n dict_use[new_key] = value\n except ValueError:\n warnings.warn(\"x_orientation not recognized.\")\n\n poldict = {k.lower(): v for k, v in dict_use.items()}\n if isinstance(pol, str):\n out = poldict[pol.lower()]\n elif isinstance(pol, Iterable):\n out = [poldict[key.lower()] for key in pol]\n else:\n raise ValueError(\n \"Polarization {p} cannot be converted to a polarization number.\".format(\n p=pol\n )\n )\n return out\n\n\ndef polnum2str(num, x_orientation=None):\n \"\"\"\n Convert polarization number to str according to AIPS Memo 117.\n\n Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,\n not true Stokes\n\n Parameters\n ----------\n num : int\n polarization number\n x_orientation : str, optional\n Orientation of the physical dipole corresponding to what is\n labelled as the x polarization (\"east\" or \"north\") to convert to\n E/N strings. See corresonding parameter on UVData for more details.\n\n Returns\n -------\n str\n String corresponding to polarization number\n\n Raises\n ------\n ValueError\n If the polarization number cannot be converted to a polarization string.\n\n Warns\n -----\n UserWarning\n If the x_orientation not recognized.\n\n \"\"\"\n dict_use = copy.deepcopy(POL_NUM2STR_DICT)\n if x_orientation is not None:\n try:\n rep_dict = _x_orientation_rep_dict(x_orientation)\n for key, value in POL_NUM2STR_DICT.items():\n new_val = value.replace(\"x\", rep_dict[\"x\"]).replace(\"y\", rep_dict[\"y\"])\n dict_use[key] = new_val\n except ValueError:\n warnings.warn(\"x_orientation not recognized.\")\n\n if isinstance(num, (int, np.int32, np.int64)):\n out = dict_use[num]\n elif isinstance(num, Iterable):\n out = [dict_use[i] for i in num]\n else:\n raise ValueError(\n \"Polarization {p} cannot be converted to string.\".format(p=num)\n )\n return out\n\n\ndef jstr2num(jstr, x_orientation=None):\n \"\"\"\n Convert jones polarization str to number according to calfits memo.\n\n Parameters\n ----------\n jstr : str\n antenna (jones) polarization string\n x_orientation : str, optional\n Orientation of the physical dipole corresponding to what is\n labelled as the x polarization (\"east\" or \"north\") to allow for\n converting from E/N strings. See corresonding parameter on UVData\n for more details.\n\n Returns\n -------\n int\n antenna (jones) polarization number corresponding to string\n\n Raises\n ------\n ValueError\n If the jones string cannot be converted to a polarization number.\n\n Warns\n -----\n UserWarning\n If the x_orientation not recognized.\n\n \"\"\"\n dict_use = copy.deepcopy(JONES_STR2NUM_DICT)\n if x_orientation is not None:\n try:\n rep_dict = _x_orientation_rep_dict(x_orientation)\n for key, value in JONES_STR2NUM_DICT.items():\n new_key = key.replace(\"x\", rep_dict[\"x\"]).replace(\"y\", rep_dict[\"y\"])\n dict_use[new_key] = value\n except ValueError:\n warnings.warn(\"x_orientation not recognized.\")\n\n jdict = {k.lower(): v for k, v in dict_use.items()}\n if isinstance(jstr, str):\n out = jdict[jstr.lower()]\n elif isinstance(jstr, Iterable):\n out = [jdict[key.lower()] for key in jstr]\n else:\n raise ValueError(\n \"Jones polarization {j} cannot be converted to index.\".format(j=jstr)\n )\n return out\n\n\ndef jnum2str(jnum, x_orientation=None):\n \"\"\"\n Convert jones polarization number to str according to calfits memo.\n\n Parameters\n ----------\n num : int\n antenna (jones) polarization number\n x_orientation : str, optional\n Orientation of the physical dipole corresponding to what is\n labelled as the x polarization (\"east\" or \"north\") to convert to\n E/N strings. See corresonding parameter on UVData for more details.\n\n Returns\n -------\n str\n antenna (jones) polarization string corresponding to number\n\n Raises\n ------\n ValueError\n If the jones polarization number cannot be converted to a jones\n polarization string.\n\n Warns\n -----\n UserWarning\n If the x_orientation not recognized.\n\n \"\"\"\n dict_use = copy.deepcopy(JONES_NUM2STR_DICT)\n if x_orientation is not None:\n try:\n rep_dict = _x_orientation_rep_dict(x_orientation)\n for key, value in JONES_NUM2STR_DICT.items():\n new_val = value.replace(\"x\", rep_dict[\"x\"]).replace(\"y\", rep_dict[\"y\"])\n dict_use[key] = new_val\n except ValueError:\n warnings.warn(\"x_orientation not recognized.\")\n\n if isinstance(jnum, (int, np.int32, np.int64)):\n out = dict_use[jnum]\n elif isinstance(jnum, Iterable):\n out = [dict_use[i] for i in jnum]\n else:\n raise ValueError(\n \"Jones polarization {j} cannot be converted to string.\".format(j=jnum)\n )\n return out\n\n\ndef parse_polstr(polstr, x_orientation=None):\n \"\"\"\n Parse a polarization string and return pyuvdata standard polarization string.\n\n See utils.POL_STR2NUM_DICT for options.\n\n Parameters\n ----------\n polstr : str\n polarization string\n x_orientation : str, optional\n Orientation of the physical dipole corresponding to what is\n labelled as the x polarization (\"east\" or \"north\") to allow for\n converting from E/N strings. See corresonding parameter on UVData\n for more details.\n\n Returns\n -------\n str\n AIPS Memo 117 standard string\n\n Raises\n ------\n ValueError\n If the pol string cannot be converted to a polarization number.\n\n Warns\n -----\n UserWarning\n If the x_orientation not recognized.\n\n \"\"\"\n return polnum2str(\n polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation\n )\n\n\ndef parse_jpolstr(jpolstr, x_orientation=None):\n \"\"\"\n Parse a Jones polarization string and return pyuvdata standard jones string.\n\n See utils.JONES_STR2NUM_DICT for options.\n\n Parameters\n ----------\n jpolstr : str\n Jones polarization string\n\n Returns\n -------\n str\n calfits memo standard string\n\n Raises\n ------\n ValueError\n If the jones string cannot be converted to a polarization number.\n\n Warns\n -----\n UserWarning\n If the x_orientation not recognized.\n\n \"\"\"\n return jnum2str(\n jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation\n )\n\n\ndef conj_pol(pol):\n \"\"\"\n Return the polarization for the conjugate baseline.\n\n For example, (1, 2, 'xy') = conj(2, 1, 'yx').\n The returned polarization is determined by assuming the antenna pair is\n reversed in the data, and finding the correct polarization correlation\n which will yield the requested baseline when conjugated. Note this means\n changing the polarization for linear cross-pols, but keeping auto-pol\n (e.g. xx) and Stokes the same.\n\n Parameters\n ----------\n pol : str or int\n Polarization string or integer.\n\n Returns\n -------\n cpol : str or int\n Polarization as if antennas are swapped (type matches input)\n\n \"\"\"\n cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()}\n\n if isinstance(pol, str):\n cpol = cpol_dict[pol.lower()]\n elif isinstance(pol, Iterable):\n cpol = [conj_pol(p) for p in pol]\n elif isinstance(pol, (int, np.int32, np.int64)):\n cpol = polstr2num(cpol_dict[polnum2str(pol).lower()])\n else:\n raise ValueError(\"Polarization not recognized, cannot be conjugated.\")\n return cpol\n\n\ndef reorder_conj_pols(pols):\n \"\"\"\n Reorder multiple pols, swapping pols that are conjugates of one another.\n\n For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')\n This is useful for the _key2inds function in the case where an antenna\n pair is specified but the conjugate pair exists in the data. The conjugated\n data should be returned in the order of the polarization axis, so after\n conjugating the data, the pols need to be reordered.\n For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but\n the user requests antpair (1, 0), they should get:\n [(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]\n\n Parameters\n ----------\n pols : array_like of str or int\n Polarization array (strings or ints).\n\n Returns\n -------\n conj_order : ndarray of int\n Indices to reorder polarization array.\n \"\"\"\n if not isinstance(pols, Iterable):\n raise ValueError(\"reorder_conj_pols must be given an array of polarizations.\")\n cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where\n conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]\n if -1 in conj_order:\n raise ValueError(\n \"Not all conjugate pols exist in the polarization array provided.\"\n )\n return conj_order\n\n\ndef LatLonAlt_from_XYZ(xyz, check_acceptability=True):\n \"\"\"\n Calculate lat/lon/alt from ECEF x,y,z.\n\n Parameters\n ----------\n xyz : ndarray of float\n numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.\n check_acceptability : bool\n Flag to check XYZ coordinates are reasonable.\n\n Returns\n -------\n latitude : ndarray or float\n latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians\n longitude : ndarray or float\n longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians\n altitude : ndarray or float\n altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters\n\n \"\"\"\n # convert to a numpy array\n xyz = np.array(xyz)\n if xyz.ndim > 1 and xyz.shape[1] != 3:\n raise ValueError(\"The expected shape of ECEF xyz array is (Npts, 3).\")\n\n else:\n xyz_use = xyz\n\n if xyz_use.ndim == 1:\n xyz_use = xyz_use[np.newaxis, :]\n\n # checking for acceptable values\n if check_acceptability:\n if np.any(np.linalg.norm(xyz_use, axis=1) < 6.35e6) or np.any(\n np.linalg.norm(xyz_use, axis=1) > 6.39e6\n ):\n raise ValueError(\"xyz values should be ECEF x, y, z coordinates in meters\")\n\n latitude, longitude, altitude = _utils._latlonalt_from_xyz(\n np.ascontiguousarray(xyz_use, dtype=np.float64)\n )\n\n if xyz.ndim == 1:\n longitude = longitude[0]\n latitude = latitude[0]\n altitude = altitude[0]\n return latitude, longitude, altitude\n\n\ndef XYZ_from_LatLonAlt(latitude, longitude, altitude):\n \"\"\"\n Calculate ECEF x,y,z from lat/lon/alt values.\n\n Parameters\n ----------\n latitude : ndarray or float\n latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians\n longitude : ndarray or float\n longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians\n altitude : ndarray or float\n altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters\n\n Returns\n -------\n xyz : ndarray of float\n numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.\n\n \"\"\"\n latitude = np.ascontiguousarray(latitude, dtype=np.float64)\n longitude = np.ascontiguousarray(longitude, dtype=np.float64)\n altitude = np.ascontiguousarray(altitude, dtype=np.float64)\n n_pts = latitude.size\n if longitude.size != n_pts:\n raise ValueError(\n \"latitude, longitude and altitude must all have the same length\"\n )\n if altitude.size != n_pts:\n raise ValueError(\n \"latitude, longitude and altitude must all have the same length\"\n )\n\n return _utils._xyz_from_latlonalt(latitude, longitude, altitude)\n\n\ndef rotECEF_from_ECEF(xyz, longitude):\n \"\"\"\n Get rotated ECEF positions such that the x-axis goes through the longitude.\n\n Miriad and uvfits expect antenna positions in this frame\n (with longitude of the array center/telescope location)\n\n Parameters\n ----------\n xyz : ndarray of float\n numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.\n longitude : float\n longitude in radians to rotate coordinates to\n (usually the array center/telescope location).\n\n Returns\n -------\n ndarray of float\n Rotated ECEF coordinates, shape (Npts, 3).\n\n \"\"\"\n angle = -1 * longitude\n rot_matrix = np.array(\n [\n [np.cos(angle), -1 * np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1],\n ]\n )\n return rot_matrix.dot(xyz.T).T\n\n\ndef ECEF_from_rotECEF(xyz, longitude):\n \"\"\"\n Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF).\n\n Parameters\n ----------\n xyz : ndarray of float\n numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates.\n longitude : float\n longitude in radians giving the x direction of the rotated coordinates\n (usually the array center/telescope location).\n\n Returns\n -------\n ndarray of float\n ECEF coordinates, shape (Npts, 3).\n\n \"\"\"\n angle = longitude\n rot_matrix = np.array(\n [\n [np.cos(angle), -1 * np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1],\n ]\n )\n return rot_matrix.dot(xyz.T).T\n\n\ndef ENU_from_ECEF(xyz, latitude, longitude, altitude):\n \"\"\"\n Calculate local ENU (east, north, up) coordinates from ECEF coordinates.\n\n Parameters\n ----------\n xyz : ndarray of float\n numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.\n latitude : float\n Latitude of center of ENU coordinates in radians.\n longitude : float\n Longitude of center of ENU coordinates in radians.\n altitude : float\n Altitude of center of ENU coordinates in radians.\n\n Returns\n -------\n ndarray of float\n numpy array, shape (Npts, 3), with local ENU coordinates\n\n \"\"\"\n xyz = np.array(xyz)\n if xyz.ndim > 1 and xyz.shape[1] != 3:\n raise ValueError(\"The expected shape of ECEF xyz array is (Npts, 3).\")\n\n xyz_in = xyz\n\n if xyz_in.ndim == 1:\n xyz_in = xyz_in[np.newaxis, :]\n\n # check that these are sensible ECEF values -- their magnitudes need to be\n # on the order of Earth's radius\n ecef_magnitudes = np.linalg.norm(xyz_in, axis=1)\n sensible_radius_range = (6.35e6, 6.39e6)\n if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any(\n ecef_magnitudes >= sensible_radius_range[1]\n ):\n raise ValueError(\n \"ECEF vector magnitudes must be on the order of the radius of the earth\"\n )\n\n enu = _utils._ENU_from_ECEF(\n np.ascontiguousarray(xyz_in, dtype=np.float64),\n np.ascontiguousarray(latitude, dtype=np.float64),\n np.ascontiguousarray(longitude, dtype=np.float64),\n np.ascontiguousarray(altitude, dtype=np.float64),\n )\n if len(xyz.shape) == 1:\n enu = np.squeeze(enu)\n\n return enu\n\n\ndef ECEF_from_ENU(enu, latitude, longitude, altitude):\n \"\"\"\n Calculate ECEF coordinates from local ENU (east, north, up) coordinates.\n\n Parameters\n ----------\n enu : ndarray of float\n numpy array, shape (Npts, 3), with local ENU coordinates.\n latitude : float\n Latitude of center of ENU coordinates in radians.\n longitude : float\n Longitude of center of ENU coordinates in radians.\n altitude : float\n Altitude of center of ENU coordinates in radians.\n\n\n Returns\n -------\n xyz : ndarray of float\n numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.\n\n \"\"\"\n enu = np.array(enu)\n if enu.ndim > 1 and enu.shape[1] != 3:\n raise ValueError(\"The expected shape of the ENU array is (Npts, 3).\")\n\n enu_use = enu\n\n if enu_use.ndim == 1:\n enu_use = enu_use[np.newaxis, :]\n xyz = _utils._ECEF_FROM_ENU(\n np.ascontiguousarray(enu_use, dtype=np.float64),\n np.ascontiguousarray(latitude, dtype=np.float64),\n np.ascontiguousarray(longitude, dtype=np.float64),\n np.ascontiguousarray(altitude, dtype=np.float64),\n )\n\n if len(enu.shape) == 1:\n xyz = np.squeeze(xyz)\n\n return xyz\n\n\ndef phase_uvw(ra, dec, initial_uvw):\n \"\"\"\n Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame.\n\n This code expects input uvws or positions relative to the telescope\n location in the same frame that ra/dec are in (e.g. icrs or gcrs) and\n returns phased ones in the same frame.\n\n Note that this code is nearly identical to ENU_from_ECEF, except that it\n uses an arbitrary phasing center rather than a coordinate center.\n\n Parameters\n ----------\n ra : float\n Right ascension of phase center.\n dec : float\n Declination of phase center.\n initial_uvw : ndarray of float\n Unphased uvws or positions relative to the array center,\n shape (Nlocs, 3).\n\n Returns\n -------\n uvw : ndarray of float\n uvw array in the same frame as initial_uvws, ra and dec.\n\n \"\"\"\n if initial_uvw.ndim == 1:\n initial_uvw = initial_uvw[np.newaxis, :]\n\n return _utils._phase_uvw(\n np.float64(ra),\n np.float64(dec),\n np.ascontiguousarray(initial_uvw, dtype=np.float64),\n )\n\n\ndef unphase_uvw(ra, dec, uvw):\n \"\"\"\n Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.\n\n This code expects phased uvws or positions in the same frame that ra/dec\n are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.\n\n Parameters\n ----------\n ra : float\n Right ascension of phase center.\n dec : float\n Declination of phase center.\n uvw : ndarray of float\n Phased uvws or positions relative to the array center,\n shape (Nlocs, 3).\n\n Returns\n -------\n unphased_uvws : ndarray of float\n Unphased uvws or positions relative to the array center,\n shape (Nlocs, 3).\n\n \"\"\"\n if uvw.ndim == 1:\n uvw = uvw[np.newaxis, :]\n\n return _utils._unphase_uvw(\n np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw, dtype=np.float64),\n )\n\n\ndef get_lst_for_time(jd_array, latitude, longitude, altitude):\n \"\"\"\n Get the lsts for a set of jd times at an earth location.\n\n Parameters\n ----------\n jd_array : ndarray of float\n JD times to get lsts for.\n latitude : float\n Latitude of location to get lst for in degrees.\n longitude : float\n Longitude of location to get lst for in degrees.\n altitude : float\n Altitude of location to get lst for in meters.\n\n Returns\n -------\n ndarray of float\n LSTs in radians corresponding to the jd_array.\n\n \"\"\"\n lst_array = np.zeros_like(jd_array)\n jd, reverse_inds = np.unique(jd_array, return_inverse=True)\n times = Time(\n jd,\n format=\"jd\",\n location=(Angle(longitude, unit=\"deg\"), Angle(latitude, unit=\"deg\")),\n )\n if iers.conf.auto_max_age is None: # pragma: no cover\n delta, status = times.get_delta_ut1_utc(return_status=True)\n if np.any(\n np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE))\n ):\n warnings.warn(\n \"time is out of IERS range, setting delta ut1 utc to \"\n \"extrapolated value\"\n )\n times.delta_ut1_utc = delta\n lst_array = times.sidereal_time(\"apparent\").radian[reverse_inds]\n\n return lst_array\n\n\ndef find_clusters(location_ids, location_vectors, tol):\n \"\"\"\n Find clusters of vectors (e.g. redundand baselines, times).\n\n Parameters\n ----------\n location_ids : array_like of int\n ID labels for locations.\n location_vectors : array_like of float\n location vectors, can be multidimensional\n tol : float\n tolerance for clusters\n\n Returns\n -------\n list of list of location_ids\n\n \"\"\"\n location_vectors = np.asarray(location_vectors)\n location_ids = np.asarray(location_ids)\n if location_vectors.ndim == 1:\n location_vectors = location_vectors[:, np.newaxis]\n\n # For each baseline, list all others that are within the tolerance distance.\n adj_triu_mat = pdist(location_vectors) < tol\n adj = {} # Adjacency dictionary\n\n for bi, col in enumerate(squareform(adj_triu_mat)):\n col[bi] = True\n adj[location_ids[bi]] = location_ids[col]\n\n # The adjacency list defines a set of graph edges.\n # For each location b0, loop over its adjacency list ai \\in adj[b0]\n # If adj[b0] is a subset of adj[ai], then ai is in a redundant group with b0\n loc_gps = []\n for k in adj.keys():\n a0 = adj[k]\n group = [k]\n for a in a0:\n if set(a0).issubset(adj[a]) and a not in group:\n group.append(a)\n group.sort()\n loc_gps.append(group)\n\n # Groups can be different lengths, but we need to take a unique over an axis\n # to properly identify unique groups\n # Pad out all the sub-lists to be the same length\n pad = len(max(loc_gps, key=len))\n loc_gps = np.array([i + [-1] * (pad - len(i)) for i in loc_gps])\n # We end up with multiple copies of each redundant group, so remove duplicates\n loc_gps = np.unique(loc_gps, axis=0).tolist()\n # remove the dummy pad baselines from each list\n loc_gps = [[bl for bl in gp if bl != -1] for gp in loc_gps]\n\n return loc_gps\n\n\ndef get_baseline_redundancies(baselines, baseline_vecs, tol=1.0, with_conjugates=False):\n \"\"\"\n Find redundant baseline groups.\n\n Parameters\n ----------\n baselines : array_like of int\n Baseline numbers, shape (Nbls,)\n baseline_vecs : array_like of float\n Baseline vectors in meters, shape (Nbls, 3)\n tol : float\n Absolute tolerance of redundancy, in meters.\n with_conjugates : bool\n Option to include baselines that are redundant when flipped.\n\n Returns\n -------\n baseline_groups : list of lists of int\n list of lists of redundant baseline numbers\n vec_bin_centers : list of array_like of float\n List of vectors describing redundant group centers\n lengths : list of float\n List of redundant group baseline lengths in meters\n baseline_ind_conj : list of int\n List of baselines that are redundant when reversed. Only returned if\n with_conjugates is True\n\n \"\"\"\n Nbls = baselines.shape[0]\n\n if not baseline_vecs.shape == (Nbls, 3):\n raise ValueError(\"Baseline vectors must be shape (Nbls, 3)\")\n\n baseline_vecs = copy.copy(baseline_vecs) # Protect the vectors passed in.\n\n if with_conjugates:\n conjugates = []\n for bv in baseline_vecs:\n uneg = bv[0] < -tol\n uzer = np.isclose(bv[0], 0.0, atol=tol)\n vneg = bv[1] < -tol\n vzer = np.isclose(bv[1], 0.0, atol=tol)\n wneg = bv[2] < -tol\n conjugates.append(uneg or (uzer and vneg) or (uzer and vzer and wneg))\n\n conjugates = np.array(conjugates, dtype=bool)\n baseline_vecs[conjugates] *= -1\n baseline_ind_conj = baselines[conjugates]\n bl_gps, vec_bin_centers, lens = get_baseline_redundancies(\n baselines, baseline_vecs, tol=tol, with_conjugates=False\n )\n return bl_gps, vec_bin_centers, lens, baseline_ind_conj\n\n bl_gps = find_clusters(baselines, baseline_vecs, tol)\n\n n_unique = len(bl_gps)\n vec_bin_centers = np.zeros((n_unique, 3))\n for gi, gp in enumerate(bl_gps):\n inds = [np.where(i == baselines)[0] for i in gp]\n vec_bin_centers[gi] = np.mean(baseline_vecs[inds, :], axis=0)\n\n lens = np.sqrt(np.sum(vec_bin_centers ** 2, axis=1))\n if np.sum([len(bg) for bg in bl_gps]) > Nbls:\n raise ValueError(\n \"Some baselines are falling into multiple\"\n \" redundant groups. Lower the tolerance to resolve ambiguity.\"\n )\n\n return bl_gps, vec_bin_centers, lens\n\n\ndef get_antenna_redundancies(\n antenna_numbers, antenna_positions, tol=1.0, include_autos=False\n):\n \"\"\"\n Find redundant baseline groups based on antenna positions.\n\n Parameters\n ----------\n antenna_numbers : array_like of int\n Antenna numbers, shape (Nants,).\n antenna_positions : array_like of float\n Antenna position vectors in the ENU (topocentric) frame in meters,\n shape (Nants, 3).\n tol : float\n Redundancy tolerance in meters.\n include_autos : bool\n Option to include autocorrelations.\n\n Returns\n -------\n baseline_groups : list of lists of int\n list of lists of redundant baseline numbers\n vec_bin_centers : list of array_like of float\n List of vectors describing redundant group centers\n lengths : list of float\n List of redundant group baseline lengths in meters\n\n Notes\n -----\n The baseline numbers refer to antenna pairs (a1, a2) such that\n the baseline vector formed from ENU antenna positions,\n blvec = enu[a1] - enu[a2]\n is close to the other baselines in the group.\n\n This is achieved by putting baselines in a form of the u>0\n convention, but with a tolerance in defining the signs of\n vector components.\n\n To guarantee that the same baseline numbers are present in a UVData\n object, ``UVData.conjugate_bls('u>0', uvw_tol=tol)``, where `tol` is\n the tolerance used here.\n\n \"\"\"\n Nants = antenna_numbers.size\n\n bls = []\n bl_vecs = []\n\n for aj in range(Nants):\n mini = aj + 1\n if include_autos:\n mini = aj\n for ai in range(mini, Nants):\n anti, antj = antenna_numbers[ai], antenna_numbers[aj]\n bidx = antnums_to_baseline(antj, anti, Nants)\n bv = antenna_positions[ai] - antenna_positions[aj]\n bl_vecs.append(bv)\n bls.append(bidx)\n bls = np.array(bls)\n bl_vecs = np.array(bl_vecs)\n gps, vecs, lens, conjs = get_baseline_redundancies(\n bls, bl_vecs, tol=tol, with_conjugates=True\n )\n # Flip the baselines in the groups.\n for gi, gp in enumerate(gps):\n for bi, bl in enumerate(gp):\n if bl in conjs:\n gps[gi][bi] = baseline_index_flip(bl, Nants)\n\n return gps, vecs, lens\n\n\ndef mean_collapse(\n arr, weights=None, axis=None, return_weights=False, return_weights_square=False\n):\n \"\"\"\n Collapse by averaging data.\n\n This is similar to np.average, except it handles infs (by giving them\n zero weight) and zero weight axes (by forcing result to be inf with zero\n output weight).\n\n Parameters\n ----------\n arr : array\n Input array to process.\n weights: ndarray, optional\n weights for average. If none, will default to equal weight for all\n non-infinite data.\n axis : int or tuple, optional\n Axis or axes to collapse (passed to np.sum). Default is all.\n return_weights : bool\n Whether to return sum of weights.\n return_weights_square: bool\n Whether to return the sum of the square of the weights. Default is False.\n\n \"\"\"\n arr = copy.deepcopy(arr) # avoid changing outside\n if weights is None:\n weights = np.ones_like(arr)\n else:\n weights = copy.deepcopy(weights)\n weights = weights * np.logical_not(np.isinf(arr))\n arr[np.isinf(arr)] = 0\n weight_out = np.sum(weights, axis=axis)\n if return_weights_square:\n weights_square = weights ** 2\n weights_square_out = np.sum(weights_square, axis=axis)\n out = np.sum(weights * arr, axis=axis)\n where = weight_out > 1e-10\n out = np.true_divide(out, weight_out, where=where)\n out = np.where(where, out, np.inf)\n if return_weights and return_weights_square:\n return out, weight_out, weights_square_out\n elif return_weights:\n return out, weight_out\n elif return_weights_square:\n return out, weights_square_out\n else:\n return out\n\n\ndef absmean_collapse(\n arr, weights=None, axis=None, return_weights=False, return_weights_square=False\n):\n \"\"\"\n Collapse by averaging absolute value of data.\n\n Parameters\n ----------\n arr : array\n Input array to process.\n weights: ndarray, optional\n weights for average. If none, will default to equal weight for all\n non-infinite data.\n axis : int or tuple, optional\n Axis or axes to collapse (passed to np.sum). Default is all.\n return_weights : bool\n Whether to return sum of weights.\n return_weights_square: bool\n whether to return the sum of the squares of the weights. Default is False.\n\n \"\"\"\n return mean_collapse(\n np.abs(arr),\n weights=weights,\n axis=axis,\n return_weights=return_weights,\n return_weights_square=return_weights_square,\n )\n\n\ndef quadmean_collapse(\n arr, weights=None, axis=None, return_weights=False, return_weights_square=False\n):\n \"\"\"\n Collapse by averaging in quadrature.\n\n Parameters\n ----------\n arr : array\n Input array to process.\n weights: ndarray, optional\n weights for average. If none, will default to equal weight for all\n non-infinite data.\n axis : int or tuple, optional\n Axis or axes to collapse (passed to np.sum). Default is all.\n return_weights : bool\n Whether to return sum of weights.\n return_weights_square: bool\n whether to return the sum of the squares of the weights. Default is False.\n\n \"\"\"\n out = mean_collapse(\n np.abs(arr) ** 2,\n weights=weights,\n axis=axis,\n return_weights=return_weights,\n return_weights_square=return_weights_square,\n )\n if return_weights and return_weights_square:\n return np.sqrt(out[0]), out[1], out[2]\n elif return_weights or return_weights_square:\n return np.sqrt(out[0]), out[1]\n else:\n return np.sqrt(out)\n\n\ndef or_collapse(\n arr, weights=None, axis=None, return_weights=False, return_weights_square=False\n):\n \"\"\"\n Collapse using OR operation.\n\n Parameters\n ----------\n arr : array\n Input array to process.\n weights: ndarray, optional\n NOT USED, but kept for symmetry with other collapsing functions.\n axis : int or tuple, optional\n Axis or axes to collapse (take OR over). Default is all.\n return_weights : bool\n Whether to return dummy weights array.\n NOTE: the dummy weights will simply be an array of ones\n return_weights_square: bool\n NOT USED, but kept for symmetry with other collapsing functions.\n\n \"\"\"\n if arr.dtype != np.bool:\n raise ValueError(\"Input to or_collapse function must be boolean array\")\n out = np.any(arr, axis=axis)\n if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):\n warnings.warn(\"Currently weights are not handled when OR-ing boolean arrays.\")\n if return_weights:\n return out, np.ones_like(out, dtype=np.float)\n else:\n return out\n\n\ndef and_collapse(\n arr, weights=None, axis=None, return_weights=False, return_weights_square=False\n):\n \"\"\"\n Collapse using AND operation.\n\n Parameters\n ----------\n arr : array\n Input array to process.\n weights: ndarray, optional\n NOT USED, but kept for symmetry with other collapsing functions.\n axis : int or tuple, optional\n Axis or axes to collapse (take AND over). Default is all.\n return_weights : bool\n Whether to return dummy weights array.\n NOTE: the dummy weights will simply be an array of ones\n return_weights_square: bool\n NOT USED, but kept for symmetry with other collapsing functions.\n\n \"\"\"\n if arr.dtype != np.bool:\n raise ValueError(\"Input to and_collapse function must be boolean array\")\n out = np.all(arr, axis=axis)\n if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):\n warnings.warn(\"Currently weights are not handled when AND-ing boolean arrays.\")\n if return_weights:\n return out, np.ones_like(out, dtype=np.float)\n else:\n return out\n\n\ndef collapse(\n arr, alg, weights=None, axis=None, return_weights=False, return_weights_square=False\n):\n \"\"\"\n Parent function to collapse an array with a given algorithm.\n\n Parameters\n ----------\n arr : array\n Input array to process.\n alg : str\n Algorithm to use. Must be defined in this function with\n corresponding subfunction above.\n weights: ndarray, optional\n weights for collapse operation (e.g. weighted mean).\n NOTE: Some subfunctions do not use the weights. See corresponding\n doc strings.\n axis : int or tuple, optional\n Axis or axes to collapse. Default is all.\n return_weights : bool\n Whether to return sum of weights.\n return_weights_square: bool\n Whether to return the sum of the squares of the weights. Default is False.\n\n \"\"\"\n collapse_dict = {\n \"mean\": mean_collapse,\n \"absmean\": absmean_collapse,\n \"quadmean\": quadmean_collapse,\n \"or\": or_collapse,\n \"and\": and_collapse,\n }\n try:\n out = collapse_dict[alg](\n arr,\n weights=weights,\n axis=axis,\n return_weights=return_weights,\n return_weights_square=return_weights_square,\n )\n except KeyError:\n raise ValueError(\n \"Collapse algorithm must be one of: \"\n + \", \".join(collapse_dict.keys())\n + \".\"\n )\n return out\n\n\ndef uvcalibrate(\n uvdata,\n uvcal,\n inplace=True,\n prop_flags=True,\n flag_missing=True,\n Dterm_cal=False,\n delay_convention=\"minus\",\n undo=False,\n time_check=True,\n ant_check=True,\n):\n \"\"\"\n Calibrate a UVData object with a UVCal object.\n\n Parameters\n ----------\n uvdata : UVData object\n UVData object to calibrate.\n uvcal : UVCal object\n UVCal object containing the calibration.\n inplace : bool, optional\n if True edit uvdata in place, else return a calibrated copy\n prop_flags : bool, optional\n if True, propagate calibration flags to data flags\n and doesn't use flagged gains. Otherwise, uses flagged gains and\n does not propagate calibration flags to data flags.\n flag_missing : bool, optional\n Deprecated in favor of ant_check.\n If True, flag baselines in uvdata otherwise don't flag and\n don't calibrate the baseline if a participating antenna or polarization\n is missing in uvcal.\n Dterm_cal : bool, optional\n Calibrate the off-diagonal terms in the Jones matrix if present\n in uvcal. Default is False. Currently not implemented.\n delay_convention : str, optional\n Exponent sign to use in conversion of 'delay' to 'gain' cal_type\n if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'.\n undo : bool, optional\n If True, undo the provided calibration. i.e. apply the calibration with\n flipped gain_convention. Flag propagation rules apply the same.\n time_check : bool\n Option to check that times match between the UVCal and UVData\n objects if UVCal has a single time or time range. Times are always\n checked if UVCal has multiple times.\n ant_check : bool\n Option to check that all antennas with data on the UVData\n object have calibration solutions in the UVCal object. If this option is\n set to False, uvcalibrate will proceed without erroring and data for\n antennas without calibrations will be flagged.\n\n Returns\n -------\n UVData, optional\n Returns if not inplace\n\n \"\"\"\n if not inplace:\n uvdata = uvdata.copy()\n\n # Check whether the UVData antennas *that have data associated with them*\n # have associated data in the UVCal object\n uvdata_unique_nums = np.unique(np.append(uvdata.ant_1_array, uvdata.ant_2_array))\n uvdata.antenna_names = np.asarray(uvdata.antenna_names)\n uvdata_used_antnames = np.array(\n [\n uvdata.antenna_names[np.where(uvdata.antenna_numbers == antnum)][0]\n for antnum in uvdata_unique_nums\n ]\n )\n uvcal_unique_nums = np.unique(uvcal.ant_array)\n uvcal.antenna_names = np.asarray(uvcal.antenna_names)\n uvcal_used_antnames = np.array(\n [\n uvcal.antenna_names[np.where(uvcal.antenna_numbers == antnum)][0]\n for antnum in uvcal_unique_nums\n ]\n )\n\n ant_arr_match = uvcal_used_antnames.tolist() == uvdata_used_antnames.tolist()\n\n if not ant_arr_match:\n # check more carefully\n name_missing = []\n for this_ant_name in uvdata_used_antnames:\n wh_ant_match = np.nonzero(uvcal_used_antnames == this_ant_name)\n if wh_ant_match[0].size == 0:\n name_missing.append(this_ant_name)\n\n use_ant_nums = False\n if len(name_missing) > 0:\n if len(name_missing) == uvdata_used_antnames.size:\n # all antenna_names with data on UVData are missing on UVCal.\n if not ant_check:\n warnings.warn(\n \"All antenna names with data on UVData are missing \"\n \"on UVCal. Since ant_check is False, calibration will \"\n \"proceed but all data will be flagged.\"\n )\n else:\n # this entire clause will be replaced with just raising a\n # ValueError in version 2.2\n\n # old behavior only required that antenna numbers were present,\n # not names. Check numbers\n number_missing = []\n for this_ant_name in uvdata_used_antnames:\n uvdata_ant_num = uvdata.antenna_numbers[\n np.where(uvdata.antenna_names == this_ant_name)[0][0]\n ]\n if uvdata_ant_num not in uvcal_unique_nums:\n number_missing.append(this_ant_name)\n\n if len(number_missing) == 0:\n # all have matching numbers on UVCal\n use_ant_nums = True\n warnings.warn(\n \"All antenna names with data on UVData are missing \"\n \"on UVCal. They do all have matching antenna numbers on \"\n \"UVCal. Currently the data will be calibrated using the \"\n \"matching antenna number, but that will be deprecated in \"\n \"version 2.2 and this will become an error.\",\n DeprecationWarning,\n )\n elif len(number_missing) < len(name_missing):\n # Some have matching numbers on UVCal\n use_ant_nums = True\n both_missing = sorted(set(number_missing) & set(name_missing))\n only_name_missing = sorted(\n set(name_missing) - set(number_missing)\n )\n warnings.warn(\n f\"Antennas {only_name_missing} have data on UVData but \"\n \"are missing on UVCal. They do have matching antenna \"\n \"numbers on UVCal. Currently the data for these antennas \"\n \"will be calibrated using the matching antenna number, \"\n \"but that will be deprecated in \"\n \"version 2.2 and this will become an error.\",\n DeprecationWarning,\n )\n if flag_missing is True:\n warnings.warn(\n f\"Antennas {both_missing} have data on UVData but \"\n \"are missing on UVCal. Currently calibration will \"\n \"proceed and since flag_missing is True, the data \"\n \"for these antennas will be flagged. This will \"\n \"become an error in version 2.2, to continue \"\n \"calibration and flag missing antennas in the \"\n \"future, set ant_check=False.\",\n DeprecationWarning,\n )\n else:\n warnings.warn(\n f\"Antennas {both_missing} have data on UVData but \"\n \"are missing on UVCal. Currently calibration will \"\n \"proceed and since flag_missing is False, the data \"\n \"for these antennas will not be calibrated or \"\n \"flagged. This will become an error in version 2.2, \"\n \"to continue calibration and flag missing \"\n \"antennas in the future, set ant_check=False.\",\n DeprecationWarning,\n )\n else:\n # Only some antenna_names with data on UVData are missing on UVCal\n if not ant_check:\n warnings.warn(\n f\"Antennas {name_missing} have data on UVData but are missing \"\n \"on UVCal. Since ant_check is False, calibration will \"\n \"proceed and the data for these antennas will be flagged.\"\n )\n else:\n # this entire clause will be replaced with just raising a\n # ValueError in version 2.2\n if flag_missing is True:\n warnings.warn(\n f\"Antennas {name_missing} have data on UVData but \"\n \"are missing on UVCal. Currently calibration will \"\n \"proceed and since flag_missing is True, the data \"\n \"for these antennas will be flagged. This will \"\n \"become an error in version 2.2, to continue \"\n \"calibration and flag missing antennas in the \"\n \"future, set ant_check=False.\",\n DeprecationWarning,\n )\n else:\n warnings.warn(\n f\"Antennas {name_missing} have data on UVData but \"\n \"are missing on UVCal. Currently calibration will \"\n \"proceed and since flag_missing is False, the data \"\n \"for these antennas will not be calibrated or \"\n \"flagged. This will become an error in version 2.2, \"\n \"to continue calibration and flag missing \"\n \"antennas in the future, set ant_check=False.\",\n DeprecationWarning,\n )\n\n uvdata_times = np.unique(uvdata.time_array)\n downselect_cal_times = False\n if uvcal.Ntimes > 1:\n if uvcal.Ntimes < uvdata.Ntimes:\n raise ValueError(\n \"The uvcal object has more than one time but fewer than the \"\n \"number of unique times on the uvdata object.\"\n )\n uvcal_times = np.unique(uvcal.time_array)\n try:\n time_arr_match = np.allclose(\n uvcal_times,\n uvdata_times,\n atol=uvdata._time_array.tols[1],\n rtol=uvdata._time_array.tols[0],\n )\n except ValueError:\n time_arr_match = False\n\n if not time_arr_match:\n # check more carefully\n uvcal_times_to_keep = []\n for this_time in uvdata_times:\n wh_time_match = np.nonzero(\n np.isclose(\n uvcal.time_array - this_time,\n 0,\n atol=uvdata._time_array.tols[1],\n rtol=uvdata._time_array.tols[0],\n )\n )\n if wh_time_match[0].size > 0:\n uvcal_times_to_keep.append(uvcal.time_array[wh_time_match][0])\n else:\n warnings.warn(\n f\"Time {this_time} exists on UVData but not on UVCal. \"\n \"This will become an error in version 2.2\",\n DeprecationWarning,\n )\n if len(uvcal_times_to_keep) < uvcal.Ntimes:\n downselect_cal_times = True\n\n elif uvcal.time_range is None:\n # only one UVCal time, no time_range.\n # This cannot match if UVData.Ntimes > 1.\n # If they are both NTimes = 1, then check if they're close.\n if uvdata.Ntimes > 1 or not np.isclose(\n uvdata_times,\n uvcal.time_array,\n atol=uvdata._time_array.tols[1],\n rtol=uvdata._time_array.tols[0],\n ):\n if not time_check:\n warnings.warn(\n \"Times do not match between UVData and UVCal \"\n \"but time_check is False, so calibration \"\n \"will be applied anyway.\"\n )\n else:\n warnings.warn(\n \"Times do not match between UVData and UVCal. \"\n \"Set time_check=False to apply calibration anyway. \"\n \"This will become an error in version 2.2\",\n DeprecationWarning,\n )\n else:\n # time_array is length 1 and time_range exists: check uvdata_times in time_range\n if (\n np.min(uvdata_times) < uvcal.time_range[0]\n or np.max(uvdata_times) > uvcal.time_range[1]\n ):\n if not time_check:\n warnings.warn(\n \"Times do not match between UVData and UVCal \"\n \"but time_check is False, so calibration \"\n \"will be applied anyway.\"\n )\n else:\n warnings.warn(\n \"Times do not match between UVData and UVCal. \"\n \"Set time_check=False to apply calibration anyway. \"\n \"This will become an error in version 2.2\",\n DeprecationWarning,\n )\n\n downselect_cal_freq = False\n try:\n freq_arr_match = np.allclose(\n np.sort(uvcal.freq_array[0, :]),\n np.sort(uvdata.freq_array[0, :]),\n atol=uvdata._freq_array.tols[1],\n rtol=uvdata._freq_array.tols[0],\n )\n except ValueError:\n freq_arr_match = False\n\n if freq_arr_match is False:\n # check more carefully\n uvcal_freqs_to_keep = []\n for this_freq in uvdata.freq_array[0, :]:\n wh_freq_match = np.nonzero(\n np.isclose(\n uvcal.freq_array - this_freq,\n 0,\n atol=uvdata._freq_array.tols[1],\n rtol=uvdata._freq_array.tols[0],\n )\n )\n if wh_freq_match[0].size > 0:\n uvcal_freqs_to_keep.append(uvcal.freq_array[wh_freq_match][0])\n else:\n warnings.warn(\n f\"Frequency {this_freq} exists on UVData but not on UVCal. \"\n \"This will become an error in version 2.2\",\n DeprecationWarning,\n )\n if len(uvcal_freqs_to_keep) < uvcal.Nfreqs:\n downselect_cal_freq = True\n\n uvdata_pol_strs = polnum2str(\n uvdata.polarization_array, x_orientation=uvdata.x_orientation\n )\n uvcal_pol_strs = jnum2str(uvcal.jones_array, x_orientation=uvcal.x_orientation)\n uvdata_feed_pols = {\n feed for pol in uvdata_pol_strs for feed in POL_TO_FEED_DICT[pol]\n }\n for feed in uvdata_feed_pols:\n # get diagonal jones str\n jones_str = parse_jpolstr(feed, x_orientation=uvcal.x_orientation)\n if jones_str not in uvcal_pol_strs:\n warnings.warn(\n f\"Feed polarization {feed} exists on UVData but not on UVCal. \"\n \"This will become an error in version 2.2\",\n DeprecationWarning,\n )\n\n # downselect UVCal times, frequencies\n if downselect_cal_freq or downselect_cal_times:\n if not downselect_cal_times:\n uvcal_times_to_keep = None\n elif not downselect_cal_freq:\n uvcal_freqs_to_keep = None\n\n # handle backwards compatibility: prevent downselecting to nothing\n # or to shapes that don't match\n if downselect_cal_times and len(uvcal_times_to_keep) < uvdata.Ntimes:\n downselect_cal_times = False\n uvcal_times_to_keep = None\n if downselect_cal_freq and len(uvcal_freqs_to_keep) < uvdata.Nfreqs:\n downselect_cal_freq = False\n uvcal_freqs_to_keep = None\n\n if downselect_cal_freq or downselect_cal_times:\n uvcal_use = uvcal.select(\n times=uvcal_times_to_keep, frequencies=uvcal_freqs_to_keep, inplace=False\n )\n\n new_uvcal = True\n else:\n uvcal_use = uvcal\n new_uvcal = False\n\n # input checks\n if uvcal_use.cal_type == \"delay\":\n if not new_uvcal:\n # make a copy to convert to gain\n uvcal_use = uvcal_use.copy()\n new_uvcal = True\n uvcal_use.convert_to_gain(delay_convention=delay_convention)\n\n # D-term calibration\n if Dterm_cal:\n # check for D-terms\n if -7 not in uvcal_use.jones_array and -8 not in uvcal_use.jones_array:\n raise ValueError(\n \"Cannot apply D-term calibration without -7 or -8\"\n \"Jones polarization in uvcal object.\"\n )\n raise NotImplementedError(\"D-term calibration is not yet implemented.\")\n\n # No D-term calibration\n else:\n # key is number, value is name\n uvdata_ant_dict = dict(zip(uvdata.antenna_numbers, uvdata.antenna_names))\n # opposite: key is name, value is number\n uvcal_ant_dict = dict(zip(uvcal.antenna_names, uvcal.antenna_numbers))\n\n # iterate over keys\n for key in uvdata.get_antpairpols():\n # get indices for this key\n blt_inds = uvdata.antpair2ind(key)\n pol_ind = np.argmin(\n np.abs(\n uvdata.polarization_array - polstr2num(key[2], uvdata.x_orientation)\n )\n )\n\n # try to get gains for each antenna\n ant1_num = key[0]\n ant2_num = key[1]\n\n feed1, feed2 = POL_TO_FEED_DICT[key[2]]\n try:\n uvcal_ant1_num = uvcal_ant_dict[uvdata_ant_dict[ant1_num]]\n except KeyError:\n if use_ant_nums:\n # backwards compatibility: use antenna numbers instead\n # this will be removed in version 2.2\n uvcal_ant1_num = ant1_num\n else:\n uvcal_ant1_num = None\n try:\n uvcal_ant2_num = uvcal_ant_dict[uvdata_ant_dict[ant2_num]]\n except KeyError:\n if use_ant_nums:\n # backwards compatibility: use antenna numbers instead\n # this will be removed in version 2.2\n uvcal_ant2_num = ant2_num\n else:\n uvcal_ant2_num = None\n\n uvcal_key1 = (uvcal_ant1_num, feed1)\n uvcal_key2 = (uvcal_ant2_num, feed2)\n\n if uvcal_ant1_num is None or uvcal_ant2_num is None:\n uvdata.flag_array[blt_inds, 0, :, pol_ind] = True\n continue\n elif not uvcal_use._has_key(*uvcal_key1) or not uvcal_use._has_key(\n *uvcal_key2\n ):\n if flag_missing:\n uvdata.flag_array[blt_inds, 0, :, pol_ind] = True\n continue\n gain = (\n uvcal_use.get_gains(uvcal_key1)\n * np.conj(uvcal_use.get_gains(uvcal_key2))\n ).T # tranpose to match uvdata shape\n flag = (uvcal_use.get_flags(uvcal_key1) | uvcal_use.get_flags(uvcal_key2)).T\n\n # propagate flags\n if prop_flags:\n mask = np.isclose(gain, 0.0) | flag\n gain[mask] = 1.0\n uvdata.flag_array[blt_inds, 0, :, pol_ind] += mask\n\n # apply to data\n mult_gains = uvcal_use.gain_convention == \"multiply\"\n if undo:\n mult_gains = not mult_gains\n if mult_gains:\n uvdata.data_array[blt_inds, 0, :, pol_ind] *= gain\n else:\n uvdata.data_array[blt_inds, 0, :, pol_ind] /= gain\n\n # update attributes\n uvdata.history += \"\\nCalibrated with pyuvdata.utils.uvcalibrate.\"\n if undo:\n uvdata.vis_units = \"UNCALIB\"\n else:\n if uvcal_use.gain_scale is not None:\n uvdata.vis_units = uvcal_use.gain_scale\n\n if not inplace:\n return uvdata\n\n\ndef apply_uvflag(\n uvd, uvf, inplace=True, unflag_first=False, flag_missing=True, force_pol=True\n):\n \"\"\"\n Apply flags from a UVFlag to a UVData instantiation.\n\n Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across\n that axis.\n\n Parameters\n ----------\n uvd : UVData object\n UVData object to add flags to.\n uvf : UVFlag object\n A UVFlag object in flag mode.\n inplace : bool\n If True overwrite flags in uvd, otherwise return new object\n unflag_first : bool\n If True, completely unflag the UVData before applying flags.\n Else, OR the inherent uvd flags with uvf flags.\n flag_missing : bool\n If input uvf is a baseline type and antpairs in uvd do not exist in uvf,\n flag them in uvd. Otherwise leave them untouched.\n force_pol : bool\n If True, broadcast flags to all polarizations if they do not match.\n Only works if uvf.Npols == 1.\n\n Returns\n -------\n UVData\n If not inplace, returns new UVData object with flags applied\n\n \"\"\"\n # assertions\n if uvf.mode != \"flag\":\n raise ValueError(\"UVFlag must be flag mode\")\n\n if not inplace:\n uvd = uvd.copy()\n\n # make a deepcopy by default b/c it is generally edited inplace downstream\n uvf = uvf.copy()\n\n # convert to baseline type\n if uvf.type != \"baseline\":\n # edits inplace\n uvf.to_baseline(uvd, force_pol=force_pol)\n\n else:\n # make sure polarizations match or force_pol\n uvd_pols, uvf_pols = (\n uvd.polarization_array.tolist(),\n uvf.polarization_array.tolist(),\n )\n if set(uvd_pols) != set(uvf_pols):\n if uvf.Npols == 1 and force_pol:\n # if uvf is 1pol we can make them match: also edits inplace\n uvf.polarization_array = uvd.polarization_array\n uvf.Npols = len(uvf.polarization_array)\n uvf_pols = uvf.polarization_array.tolist()\n\n else:\n raise ValueError(\"Input uvf and uvd polarizations do not match\")\n\n # make sure polarization ordering is correct: also edits inplace\n uvf.polarization_array = uvf.polarization_array[\n [uvd_pols.index(pol) for pol in uvf_pols]\n ]\n\n # check time and freq shapes match: if Ntimes or Nfreqs is 1, allow\n # implicit broadcasting\n if uvf.Ntimes == 1:\n mismatch_times = False\n elif uvf.Ntimes == uvd.Ntimes:\n tdiff = np.unique(uvf.time_array) - np.unique(uvd.time_array)\n mismatch_times = np.any(tdiff > np.max(np.abs(uvf._time_array.tols)))\n else:\n mismatch_times = True\n if mismatch_times:\n raise ValueError(\"UVFlag and UVData have mismatched time arrays.\")\n\n if uvf.Nfreqs == 1:\n mismatch_freqs = False\n elif uvf.Nfreqs == uvd.Nfreqs:\n fdiff = np.unique(uvf.freq_array) - np.unique(uvd.freq_array)\n mismatch_freqs = np.any(fdiff > np.max(np.abs(uvf._freq_array.tols)))\n else:\n mismatch_freqs = True\n if mismatch_freqs:\n raise ValueError(\"UVFlag and UVData have mismatched frequency arrays.\")\n\n # unflag if desired\n if unflag_first:\n uvd.flag_array[:] = False\n\n # iterate over antpairs and apply flags: TODO need to be able to handle\n # conjugated antpairs\n uvf_antpairs = uvf.get_antpairs()\n for ap in uvd.get_antpairs():\n uvd_ap_inds = uvd.antpair2ind(ap)\n if ap not in uvf_antpairs:\n if flag_missing:\n uvd.flag_array[uvd_ap_inds] = True\n continue\n uvf_ap_inds = uvf.antpair2ind(*ap)\n # addition of boolean is OR\n uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds]\n\n uvd.history += \"\\nFlagged with pyuvdata.utils.apply_uvflags.\"\n\n if not inplace:\n return uvd\n\n\ndef parse_ants(uv, ant_str, print_toggle=False, x_orientation=None):\n \"\"\"\n Get antpair and polarization from parsing an aipy-style ant string.\n\n Used to support the the select function.\n Generates two lists of antenna pair tuples and polarization indices based\n on parsing of the string ant_str. If no valid polarizations (pseudo-Stokes\n params, or combinations of [lr] or [xy]) or antenna numbers are found in\n ant_str, ant_pairs_nums and polarizations are returned as None.\n\n Parameters\n ----------\n uv : UVBase Object\n A UVBased object that supports the following functions and parameters:\n - get_ants\n - get_antpairs\n - get_pols\n These are used to construct the baseline ant_pair_nums\n and polarizations returned.\n ant_str : str\n String containing antenna information to parse. Can be 'all',\n 'auto', 'cross', or combinations of antenna numbers and polarization\n indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used\n in front of an antenna number or baseline to exclude it from being\n output in ant_pairs_nums. If ant_str has a minus sign as the first\n character, 'all,' will be appended to the beginning of the string.\n See the tutorial for examples of valid strings and their behavior.\n print_toggle : bool\n Boolean for printing parsed baselines for a visual user check.\n x_orientation : str, optional\n Orientation of the physical dipole corresponding to what is\n labelled as the x polarization (\"east\" or \"north\") to allow for\n converting from E/N strings. If input uv object has an `x_orientation`\n parameter and the input to this function is `None`, the value from the\n object will be used. Any input given to this function will override the\n value on the uv object. See corresonding parameter on UVData\n for more details.\n\n Returns\n -------\n ant_pairs_nums : list of tuples of int or None\n List of tuples containing the parsed pairs of antenna numbers, or\n None if ant_str is 'all' or a pseudo-Stokes polarizations.\n polarizations : list of int or None\n List of desired polarizations or None if ant_str does not contain a\n polarization specification.\n\n \"\"\"\n required_attrs = [\"get_ants\", \"get_antpairs\", \"get_pols\"]\n if not all(hasattr(uv, attr) for attr in required_attrs):\n raise ValueError(\n \"UVBased objects must have all the following attributes in order \"\n f\"to call 'parse_ants': {required_attrs}.\"\n )\n\n if x_orientation is None and (\n hasattr(uv, \"x_orientation\") and uv.x_orientation is not None\n ):\n x_orientation = uv.x_orientation\n\n ant_re = r\"(\\(((-?\\d+[lrxy]?,?)+)\\)|-?\\d+[lrxy]?)\"\n bl_re = \"(^(%s_%s|%s),?)\" % (ant_re, ant_re, ant_re)\n str_pos = 0\n ant_pairs_nums = []\n polarizations = []\n ants_data = uv.get_ants()\n ant_pairs_data = uv.get_antpairs()\n pols_data = uv.get_pols()\n warned_ants = []\n warned_pols = []\n\n if ant_str.startswith(\"-\"):\n ant_str = \"all,\" + ant_str\n\n while str_pos < len(ant_str):\n m = re.search(bl_re, ant_str[str_pos:])\n if m is None:\n if ant_str[str_pos:].upper().startswith(\"ALL\"):\n if len(ant_str[str_pos:].split(\",\")) > 1:\n ant_pairs_nums = uv.get_antpairs()\n elif ant_str[str_pos:].upper().startswith(\"AUTO\"):\n for pair in ant_pairs_data:\n if pair[0] == pair[1] and pair not in ant_pairs_nums:\n ant_pairs_nums.append(pair)\n elif ant_str[str_pos:].upper().startswith(\"CROSS\"):\n for pair in ant_pairs_data:\n if not (pair[0] == pair[1] or pair in ant_pairs_nums):\n ant_pairs_nums.append(pair)\n elif ant_str[str_pos:].upper().startswith(\"PI\"):\n polarizations.append(polstr2num(\"pI\"))\n elif ant_str[str_pos:].upper().startswith(\"PQ\"):\n polarizations.append(polstr2num(\"pQ\"))\n elif ant_str[str_pos:].upper().startswith(\"PU\"):\n polarizations.append(polstr2num(\"pU\"))\n elif ant_str[str_pos:].upper().startswith(\"PV\"):\n polarizations.append(polstr2num(\"pV\"))\n else:\n raise ValueError(\"Unparsible argument {s}\".format(s=ant_str))\n\n comma_cnt = ant_str[str_pos:].find(\",\")\n if comma_cnt >= 0:\n str_pos += comma_cnt + 1\n else:\n str_pos = len(ant_str)\n else:\n m = m.groups()\n str_pos += len(m[0])\n if m[2] is None:\n ant_i_list = [m[8]]\n ant_j_list = list(uv.get_ants())\n else:\n if m[3] is None:\n ant_i_list = [m[2]]\n else:\n ant_i_list = m[3].split(\",\")\n\n if m[6] is None:\n ant_j_list = [m[5]]\n else:\n ant_j_list = m[6].split(\",\")\n\n for ant_i in ant_i_list:\n include_i = True\n if type(ant_i) == str and ant_i.startswith(\"-\"):\n ant_i = ant_i[1:] # nibble the - off the string\n include_i = False\n\n for ant_j in ant_j_list:\n include_j = True\n if type(ant_j) == str and ant_j.startswith(\"-\"):\n ant_j = ant_j[1:]\n include_j = False\n\n pols = None\n ant_i, ant_j = str(ant_i), str(ant_j)\n if not ant_i.isdigit():\n ai = re.search(r\"(\\d+)([x,y,l,r])\", ant_i).groups()\n\n if not ant_j.isdigit():\n aj = re.search(r\"(\\d+)([x,y,l,r])\", ant_j).groups()\n\n if ant_i.isdigit() and ant_j.isdigit():\n ai = [ant_i, \"\"]\n aj = [ant_j, \"\"]\n elif ant_i.isdigit() and not ant_j.isdigit():\n if \"x\" in ant_j or \"y\" in ant_j:\n pols = [\"x\" + aj[1], \"y\" + aj[1]]\n else:\n pols = [\"l\" + aj[1], \"r\" + aj[1]]\n ai = [ant_i, \"\"]\n elif not ant_i.isdigit() and ant_j.isdigit():\n if \"x\" in ant_i or \"y\" in ant_i:\n pols = [ai[1] + \"x\", ai[1] + \"y\"]\n else:\n pols = [ai[1] + \"l\", ai[1] + \"r\"]\n aj = [ant_j, \"\"]\n elif not ant_i.isdigit() and not ant_j.isdigit():\n pols = [ai[1] + aj[1]]\n\n ant_tuple = (abs(int(ai[0])), abs(int(aj[0])))\n\n # Order tuple according to order in object\n if ant_tuple in ant_pairs_data:\n pass\n elif ant_tuple[::-1] in ant_pairs_data:\n ant_tuple = ant_tuple[::-1]\n else:\n if not (\n ant_tuple[0] in ants_data or ant_tuple[0] in warned_ants\n ):\n warned_ants.append(ant_tuple[0])\n if not (\n ant_tuple[1] in ants_data or ant_tuple[1] in warned_ants\n ):\n warned_ants.append(ant_tuple[1])\n if pols is not None:\n for pol in pols:\n if not (pol.lower() in pols_data or pol in warned_pols):\n warned_pols.append(pol)\n continue\n\n if include_i and include_j:\n if ant_tuple not in ant_pairs_nums:\n ant_pairs_nums.append(ant_tuple)\n if pols is not None:\n for pol in pols:\n if (\n pol.lower() in pols_data\n and polstr2num(pol, x_orientation=x_orientation)\n not in polarizations\n ):\n polarizations.append(\n polstr2num(pol, x_orientation=x_orientation)\n )\n elif not (\n pol.lower() in pols_data or pol in warned_pols\n ):\n warned_pols.append(pol)\n else:\n if pols is not None:\n for pol in pols:\n if pol.lower() in pols_data:\n if uv.Npols == 1 and [pol.lower()] == pols_data:\n ant_pairs_nums.remove(ant_tuple)\n if (\n polstr2num(pol, x_orientation=x_orientation)\n in polarizations\n ):\n polarizations.remove(\n polstr2num(\n pol, x_orientation=x_orientation,\n )\n )\n elif not (\n pol.lower() in pols_data or pol in warned_pols\n ):\n warned_pols.append(pol)\n elif ant_tuple in ant_pairs_nums:\n ant_pairs_nums.remove(ant_tuple)\n\n if ant_str.upper() == \"ALL\":\n ant_pairs_nums = None\n elif len(ant_pairs_nums) == 0:\n if not ant_str.upper() in [\"AUTO\", \"CROSS\"]:\n ant_pairs_nums = None\n\n if len(polarizations) == 0:\n polarizations = None\n else:\n polarizations.sort(reverse=True)\n\n if print_toggle:\n print(\"\\nParsed antenna pairs:\")\n if ant_pairs_nums is not None:\n for pair in ant_pairs_nums:\n print(pair)\n\n print(\"\\nParsed polarizations:\")\n if polarizations is not None:\n for pol in polarizations:\n print(polnum2str(pol, x_orientation=x_orientation))\n\n if len(warned_ants) > 0:\n warnings.warn(\n \"Warning: Antenna number {a} passed, but not present \"\n \"in the ant_1_array or ant_2_array\".format(\n a=(\",\").join(map(str, warned_ants))\n )\n )\n\n if len(warned_pols) > 0:\n warnings.warn(\n \"Warning: Polarization {p} is not present in \"\n \"the polarization_array\".format(p=(\",\").join(warned_pols).upper())\n )\n\n return ant_pairs_nums, polarizations\n", "id": "12610854", "language": "Python", "matching_score": 3.059738874435425, "max_stars_count": 0, "path": "pyuvdata/utils.py" }, { "content": "\"\"\"Functions used in preprocessing of hdf5 files.\n\nPrepares hdf5 files written by dsa-meridian-fs for conversion to ms.\n\"\"\"\nimport re\nimport os\nimport shutil\nimport subprocess\nfrom urllib.request import urlretrieve\nimport pandas\nfrom pkg_resources import resource_filename, resource_exists\nimport numpy as np\nimport astropy.units as u\nfrom pyuvdata import UVData\nimport dsautils.dsa_syslog as dsl\nimport dsautils.cnf as cnf\nimport dsacalib.constants as ct\nfrom dsacalib.fringestopping import pb_resp\n\nLOGGER = dsl.DsaSyslogger()\nLOGGER.subsystem(\"software\")\nLOGGER.app(\"dsamfs\")\n\nCONF = cnf.Conf()\nMFS_CONF = CONF.get('fringe')\n# parameters for freq scrunching\nNFREQ = MFS_CONF['nfreq_scrunch']\n# Outrigger delays are those estimated by <NAME> based on the cable\n# length.\nOUTRIGGER_DELAYS = MFS_CONF['outrigger_delays']\n\ndef first_true(iterable, default=False, pred=None):\n \"\"\"Returns the first true value in the iterable.\n\n If no true value is found, returns ``default``\n If ``pred`` is not None, returns the first item\n for which pred(item) is true.\n\n Parameters\n ----------\n iterable : list\n The list for which to find the first True item.\n default :\n If not False, then this is returned if no true value is found.\n Defaults False.\n pred :\n If not None, then the first item for which pred(item) is True is\n returned.\n \"\"\"\n # first_true([a,b,c], x) --> a or b or c or x\n # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x\n return next(filter(pred, iterable), default)\n\ndef rsync_file(rsync_string, remove_source_files=True):\n \"\"\"Rsyncs a file from the correlator machines to dsastorage.\n\n Parameters\n ----------\n rsync_string : str\n E.g. 'corr06.sas.pvt:/home/ubuntu/data/2020-06-24T12:32:06.hdf5 /mnt/data/dsa110/correlator/corr06/'\n \"\"\"\n fname, fdir = rsync_string.split(' ')\n if remove_source_files:\n command = '. ~/.keychain/dsa-storage-sh; rsync -avv --remove-source-files {0} {1}'.format(fname, fdir)\n else:\n command = '. ~/.keychain/dsa-storage-sh; rsync -avv {0} {1}'.format(fname, fdir)\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True\n )\n proc_stdout = str(process.communicate()[0].strip())\n print(proc_stdout)\n LOGGER.info(proc_stdout)\n fname = fname.split('/')[-1]\n #if output.returncode != 0:\n # print(output)\n return '{0}{1}'.format(fdir, fname)\n\ndef remove_outrigger_delays(UVhandler, outrigger_delays=OUTRIGGER_DELAYS):\n \"\"\"Remove outrigger delays from open UV object.\n \"\"\"\n if 'applied_delays_ns' in UVhandler.extra_keywords.keys():\n applied_delays = np.array(\n UVhandler.extra_keywords['applied_delays_ns'].split(' ')\n ).astype(np.int).reshape(-1, 2)\n else:\n applied_delays = np.zeros((UVhandler.Nants_telescope, 2), np.int)\n fobs = UVhandler.freq_array*u.Hz\n # Remove delays for the outrigger antennas\n for ant, delay in outrigger_delays.items():\n phase_model = np.exp(\n (\n 2.j*np.pi*fobs*(delay*u.nanosecond)\n ).to_value(u.dimensionless_unscaled)\n ).reshape(1, fobs.shape[0], fobs.shape[1], 1)\n UVhandler.data_array[\n (UVhandler.ant_1_array!=UVhandler.ant_2_array) &\n (UVhandler.ant_2_array==ant-1)\n ] *= phase_model\n UVhandler.data_array[\n (UVhandler.ant_1_array!=UVhandler.ant_2_array) &\n (UVhandler.ant_1_array==ant-1)\n ] /= phase_model\n applied_delays[ant-1, :] += int(delay)\n if 'applied_delays_ns' in UVhandler.extra_keywords.keys():\n UVhandler.extra_keywords['applied_delays_ns'] = np.string_(\n ' '.join([str(d) for d in applied_delays.flatten()])\n )\n\ndef fscrunch_file(fname):\n \"\"\"Removes outrigger delays before averaging in frequency.\n\n Leaves file untouched if the number of frequency bins is not divisible\n by the desired number of frequency bins (NFREQ), or is equal to the desired\n number of frequency bins.\n\n Parameters\n ----------\n fname : str\n The full path to the file to process.\n \"\"\"\n # Process the file\n # print(fname)\n UV = UVData()\n UV.read_uvh5(fname, run_check_acceptability=False)\n nint = UV.Nfreqs//NFREQ\n if nint > 1 and UV.Nfreqs%nint == 0:\n remove_outrigger_delays(UV)\n # Scrunch in frequency by factor of nint\n UV.frequency_average(n_chan_to_avg=nint)\n if os.path.exists(fname.replace('.hdf5', '_favg.hdf5')):\n os.remove(fname.replace('.hdf5', '_favg.hdf5'))\n UV.write_uvh5(fname.replace('.hdf5', '_favg.hdf5'), run_check_acceptability=False)\n # Move the original data to a new directory\n corrname = re.findall('corr\\d\\d', fname)[0]\n os.rename(\n fname,\n fname.replace(\n '{0}'.format(corrname),\n '{0}/full_freq_resolution/'.format(corrname)\n )\n )\n os.rename(\n fname.replace('.hdf5', '_favg.hdf5'),\n fname\n )\n return fname\n\ndef read_nvss_catalog():\n \"\"\"Reads the NVSS catalog into a pandas dataframe.\n \"\"\"\n if not resource_exists('dsacalib', 'data/heasarc_nvss.tdat'):\n urlretrieve(\n 'https://heasarc.gsfc.nasa.gov/FTP/heasarc/dbase/tdat_files/heasarc_nvss.tdat.gz',\n resource_filename('dsacalib', 'data/heasarc_nvss.tdat.gz')\n )\n os.system('gunzip {0}'.format(\n resource_filename('dsacalib', 'data/heasarc_nvss.tdat.gz')\n ))\n\n df = pandas.read_csv(\n resource_filename('dsacalib','data/heasarc_nvss.tdat'),\n sep='|',\n skiprows=67,\n names=[\n 'ra',\n 'dec',\n 'lii',\n 'bii',\n 'ra_error',\n 'dec_error',\n 'flux_20_cm',\n 'flux_20_cm_error',\n 'limit_major_axis',\n 'major_axis',\n 'major_axis_error',\n 'limit_minor_axis',\n 'minor_axis',\n 'minor_axis_error',\n 'position_angle',\n 'position_angle_error',\n 'residual_code',\n 'residual_flux',\n 'pol_flux',\n 'pol_flux_error',\n 'pol_angle',\n 'pol_angle_error',\n 'field_name',\n 'x_pixel',\n 'y_pixel',\n 'extra'\n ],\n )\n df.drop(df.tail(1).index, inplace=True)\n df.drop(['extra'], axis='columns', inplace=True)\n return df\n\ndef generate_caltable(\n pt_dec,\n csv_string,\n radius=2.5*u.deg,\n min_weighted_flux=1*u.Jy,\n min_percent_flux=0.15\n):\n \"\"\"Generate a table of calibrators at a given declination.\n\n Parameters\n ----------\n pt_dec : astropy quantity\n The pointing declination, in degrees or radians.\n radius : astropy quantity\n The radius of the DSA primary beam. Only sources out to this radius\n from the pointing declination are considered.\n min_weighted_flux : astropy quantity\n The minimum primary-beam response-weighted flux of a calibrator for\n which it is included in the calibrator list, in Jy or equivalent.\n min_percent_flux : float\n The minimum ratio of the calibrator weighted flux to the weighted flux\n in the primary beam for which to include the calibrator.\n \"\"\"\n df = read_nvss_catalog()\n calibrators = df[\n (df['dec'] < (pt_dec+radius).to_value(u.deg)) &\n (df['dec'] > (pt_dec-radius).to_value(u.deg)) &\n (df['flux_20_cm'] > 1000)\n ]\n # Calculate field flux and weighted flux for each calibrator\n calibrators = calibrators.assign(field_flux=np.zeros(len(calibrators)))\n calibrators = calibrators.assign(weighted_flux=np.zeros(len(calibrators)))\n for name, row in calibrators.iterrows():\n calibrators['weighted_flux'].loc[name] = row['flux_20_cm']/1e3*pb_resp(\n row['ra']*(1*u.deg).to_value(u.rad),\n pt_dec.to_value(u.rad),\n row['ra']*(1*u.deg).to_value(u.rad),\n row['dec']*(1*u.deg).to_value(u.rad),\n 1.4\n )\n field = df[\n (df['dec'] < (pt_dec+radius).to_value(u.deg)) &\n (df['dec'] > (pt_dec-radius).to_value(u.deg)) &\n (df['ra'] < row['ra']+radius.to_value(u.deg)/np.cos(pt_dec)) &\n (df['ra'] > row['ra']-radius.to_value(u.deg)/np.cos(pt_dec))\n ]\n field = field.assign(weighted_flux=np.zeros(len(field)))\n for fname, frow in field.iterrows():\n field['weighted_flux'].loc[fname] = frow['flux_20_cm']/1e3*pb_resp(\n row['ra']*(1*u.deg).to_value(u.rad),\n pt_dec.to_value(u.rad),\n frow['ra']*(1*u.deg).to_value(u.rad),\n frow['dec']*(1*u.deg).to_value(u.rad),\n 1.4\n )\n calibrators['field_flux'].loc[name] = sum(field['weighted_flux'])\n # Calculate percent of the field flux that is contained in the\n # main calibrator\n calibrators = calibrators.assign(\n percent_flux=calibrators['weighted_flux']/calibrators['field_flux']\n )\n # Keep calibrators based on the weighted flux and percent flux\n calibrators = calibrators[\n (calibrators['weighted_flux'] > min_weighted_flux.to_value(u.Jy)) &\n (calibrators['percent_flux'] > min_percent_flux)]\n # Create the caltable needed by the calibrator service\n caltable = calibrators[[\n 'ra', 'dec', 'flux_20_cm', 'weighted_flux', 'percent_flux'\n ]]\n caltable.reset_index(inplace=True)\n caltable.rename(\n columns={\n \"index\": \"source\",\n \"flux_20_cm\": \"flux (Jy)\",\n \"weighted_flux\": \"weighted flux (Jy)\",\n \"percent_flux\": \"percent flux\"\n },\n inplace=True\n )\n caltable['flux (Jy)'] = caltable['flux (Jy)']/1e3\n caltable['source'] = [sname.strip('NVSS ') for sname in caltable['source']]\n caltable['ra'] = caltable['ra']*u.deg\n caltable['dec'] = caltable['dec']*u.deg\n caltable.to_csv(resource_filename('dsacalib', csv_string))\n\ndef update_caltable(pt_dec):\n \"\"\"Updates caltable to new elevation.\n\n If needed, a new caltable is written to the dsacalib data directory.\n The caltable to be used is copied to 'calibrator_sources.csv' in the\n dsacalib data directory.\n\n Parameters\n ----------\n pt_el : astropy quantity\n The antenna pointing elevation in degrees or equivalent.\n \"\"\"\n csv_string = 'data/calibrator_sources_dec{0}{1}.csv'.format(\n '+' if pt_dec.to_value(u.deg) >= 0 else '-',\n '{0:05.1f}'.format(pt_dec.to_value(u.deg)).replace('.', 'p')\n )\n if not resource_exists('dsacalib', csv_string):\n generate_caltable(pt_dec, csv_string)\n return resource_filename('dsacalib', csv_string)\n", "id": "11937331", "language": "Python", "matching_score": 2.6121666431427, "max_stars_count": 1, "path": "dsacalib/preprocess.py" }, { "content": "\"\"\"Simple utilities for T3 imaging.\n\"\"\"\nimport subprocess\nimport numpy as np\nfrom influxdb import DataFrameClient\nimport astropy.units as u\nfrom astropy.coordinates import Angle\nimport dsacalib.constants as ct\nfrom dsacalib.utils import direction\nimport numpy as np\nimport datetime\nfrom psrqpy import QueryATNF\nfrom astropy.coordinates import SkyCoord, ITRS, EarthLocation\nfrom astropy.time import Time\nfrom dsautils import dsa_store\nfrom progress.bar import Bar\nimport dsacalib.constants as ct\nimport dsautils.cnf as cnf\nds = dsa_store.DsaStore()\n\nMY_CNF = cnf.Conf()\nCORR_CNF = MY_CNF.get('corr')\nquery = QueryATNF(params=['DM', 'RAJ', 'DECJ', 'S1400', 'PSRJ', 'PSRB'])\ninflux = DataFrameClient('influxdbservice.sas.pvt', 8086, 'root', 'root', 'dsa110')\n\n\ndef get_elevation_mjd(tobs):\n \"\"\"Gets the pointing elevation at a time in the past.\n\n Parameters\n ----------\n tobs : astropy.time.Time object\n The observing time.\n \n Returns\n -------\n astropy Quantity\n The pointing elevation in degrees or equivalent.\n \"\"\"\n time_ms = int(tobs.unix*1000)\n query = ('SELECT ant_num, ant_el, ant_cmd_el, ant_el_err FROM \"antmon\" WHERE '\n 'time >= {0}ms and time < {1}ms'.format(time_ms-500, time_ms+500))\n el_df = influx.query(query)\n el_df = el_df['antmon']\n el = np.median(el_df[np.abs(el_df['ant_el_err']) < 1.]['ant_cmd_el'])*u.deg\n return el\n\n\ndef get_declination(elevation, latitude=ct.OVRO_LAT*u.rad):\n \"\"\"Calculates the declination from the elevation.\n \n Parameters\n ----------\n elevation : astropy Quantity\n The elevation, in degrees or equivalent.\n latitude : astropy Quantity\n The latitude of the telescope, in degrees or equivalent.\n\n Returns\n -------\n astropy Quantity\n The declination, in degrees or equivalent.\n \"\"\"\n return (elevation+latitude-90*u.deg).to(u.deg)\n\n\ndef get_declination_mjd(tobs, latitude=ct.OVRO_LAT*u.rad):\n \"\"\"Gets the pointing declination at a time in the past.\n\n Parameters\n ----------\n tobs : astropy.Time object\n The observing time.\n latitude : astropy Quantity\n The telescope latitude.\n\n Returns\n -------\n astropy Quantity\n The declination, in degrees or equivalent.\n \"\"\"\n elevation = get_elevation_mjd(tobs)\n return get_declination(elevation)\n\n\ndef get_pointing_mjd(mjd):\n tmjd = Time(mjd, scale='utc', format='mjd')\n ra_mjd = (tmjd.sidereal_time('apparent', longitude=ct.OVRO_LON*(180./np.pi)*u.deg)).deg\n dec_mjd = get_declination_mjd(tmjd)\n\n return ra_mjd*u.deg, dec_mjd\n\n\ndef get_pointing_declination(tol=0.25):\n \"\"\"Gets the pointing declination from the commanded antenna elevations.\n Parameters\n ----------\n tol : float\n The tolerance for discrepancies in the antenna pointing and commanded\n elevations, in degrees.\n Returns\n -------\n astropy quantity\n The pointing declination, in degrees or equivalent.\n \"\"\"\n commanded_els = np.zeros(len(CORR_CNF['antenna_order']))\n for idx, ant in CORR_CNF['antenna_order'].items():\n try:\n antmc = ds.get_dict('/mon/ant/{0}'.format(ant))\n a1 = np.abs(antmc['ant_el'] - antmc['ant_cmd_el'])\n except:\n a1 = 2.*tol\n if a1 < tol:\n commanded_els[idx] = antmc['ant_cmd_el']\n else:\n commanded_els[idx] = np.nan\n\n pt_el = np.nanmedian(commanded_els)\n if pt_el is not np.nan:\n pt_dec = ct.OVRO_LAT*u.rad + pt_el*u.deg - 90*u.deg\n else:\n pt_el = CORR_CNF['pt_dec']\n return pt_dec\n\n\ndef get_pointing_now():\n tnow = Time(datetime.datetime.now(), scale='utc')\n ra_now = (tnow.sidereal_time('apparent', longitude=ct.OVRO_LON*(180./np.pi)*u.deg)).deg\n dec_now = get_pointing_declination()\n dec_now = np.rad2deg(dec_now.value)\n \n return ra_now*u.deg, dec_now*u.deg, tnow.mjd\n\ndef get_galcoord(ra, dec):\n c = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')\n galcoord = c.galactic\n return galcoord.l.deg, galcoord.b.deg\n\ndef match_pulsar(RA_mjd, Dec_mjd, thresh_deg=3.5):\n RA_psr, Dec_psr, DM = np.array(query['RAJ']), np.array(query['DECJ']), np.array(query['DM'])\n# print(RA_mjd, Dec_mjd)\n c = SkyCoord(ra=RA_mjd, dec=Dec_mjd)\n catalog = SkyCoord(ra=RA_psr, dec=Dec_psr, unit=(u.h, u.deg))\n \n ra,dec = catalog.data.lon.deg, catalog.data.lat.value\n sep_deg = np.sqrt((ra-RA_mjd.value)**2 + (dec - Dec_mjd.value)**2)\n ind_near = np.where(sep_deg<thresh_deg)[0]\n #idx, d2, d3 = c.match_to_catalog_sky(catalog)\n\n return ind_near\n\n\ndef rsync_file(infile, outdir):\n \"\"\"Rsyncs a file from the correlator machines to dsastorage.\n\n Parameters\n ----------\n infile : str\n The sourcefile string, e.g. 'corr01.sas.pvt:/home/user/data/fl_out.1.5618974'\n outfile : str\n The destination string, e.g. '/home/user/data/'\n \n Returns\n -------\n str\n The full path to the rsynced file in its destination.\n \"\"\"\n command = '. ~/.keychain/lxd110h23-sh ; rsync -avvP --inplace {0} {1}'.format(infile, outdir)\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True\n )\n proc_stdout = str(process.communicate()[0].strip())\n print(proc_stdout)\n fname = infile.split('/')[-1]\n return '{0}{1}'.format(outdir, fname)\n\n\ndef get_pointing(obstime):\n \"\"\"Get the RA and DEC of the array at a given time in the past.\n\n Parameters\n ----------\n obstime : astropy.time.Time object\n The observation time.\n\n Returns\n -------\n tuple\n (ra, dec) of the observation in J2000 epoch, as astropy Quantities.\n \"\"\"\n ra, dec = direction(\n 'HADEC',\n 0.,\n get_declination_mjd(obstime).to_value(u.rad),\n obstime=obstime.mjd\n ).J2000()\n return Angle(ra*u.rad).to(u.hourangle), Angle(dec*u.rad).to(u.deg)\n\n\ndef get_beam_ha(ibeam, beam_sep=1*u.arcmin):\n return beam_sep*(127-ibeam)\n\n\ndef get_beam_ra_dec(obstime, ibeam):\n \"\"\"Get ra and dec of beam.\n \n Parameters\n ----------\n obstime : astropy time object\n observing time\n ibeam : int\n beam id\n \n Returns\n -------\n tuple\n Ra, Dec in radians\n \"\"\"\n trigger_dir = direction(\n 'HADEC',\n get_beam_ha(ibeam).to_value(u.rad),\n get_declination_mjd(obstime).to_value(u.rad),\n obstime=obstime\n )\n return trigger_dir.J2000()\n", "id": "4167787", "language": "Python", "matching_score": 1.0559581518173218, "max_stars_count": 0, "path": "dsaT3/utils.py" }, { "content": "\"\"\"A service to preprcocess hdf5 files before calibration.\n\"\"\"\nimport datetime\nimport sys\nimport warnings\nfrom multiprocessing import Process, Queue\nimport time\nimport pandas\nimport h5py\nimport numpy as np\nfrom astropy.time import Time\nfrom astropy.coordinates import Angle\nimport astropy.units as u\nimport dsautils.dsa_store as ds\nimport dsautils.dsa_syslog as dsl\nimport dsautils.cnf as cnf\nimport dsacalib.constants as ct\nfrom dsacalib.preprocess import rsync_file, fscrunch_file, first_true\nfrom dsacalib.preprocess import update_caltable\nfrom dsacalib.utils import exception_logger\n# make sure warnings do not spam syslog\nwarnings.filterwarnings(\"ignore\")\n\nCONF = cnf.Conf()\nCORR_CONF = CONF.get('corr')\nCAL_CONF = CONF.get('cal')\nMFS_CONF = CONF.get('fringe')\nCORRLIST = list(CORR_CONF['ch0'].keys())\nNCORR = len(CORRLIST)\nCALTIME = CAL_CONF['caltime_minutes']*u.min\nFILELENGTH = MFS_CONF['filelength_minutes']*u.min\nHDF5DIR = CAL_CONF['hdf5_dir']\n\n# Logger\nLOGGER = dsl.DsaSyslogger()\nLOGGER.subsystem(\"software\")\nLOGGER.app(\"dsacalib\")\n\n# ETCD interface\nETCD = ds.DsaStore()\n\n# FIFO Queues for rsync, freq scrunching, calibration\nFSCRUNCH_Q = Queue()\nRSYNC_Q = Queue()\nGATHER_Q = Queue()\nASSESS_Q = Queue()\nCALIB_Q = Queue()\n\n# Maximum number of files per correlator that can be assessed for calibration\n# needs at one time.\nMAX_ASSESS = 4\n\n# Maximum amount of time that gather_files will wait for all correlator files\n# to be gathered, in seconds\nMAX_WAIT = 5*60\n\n# Time to sleep if a queue is empty before trying to get an item\nTSLEEP = 10\n\ndef _update_caltable_callback(etcd_dict):\n \"\"\"When the antennas are moved, make and read a new calibration table.\n \"\"\"\n if etcd_dict['cmd'] == 'move':\n pt_el = etcd_dict['val']*u.deg\n update_caltable(pt_el)\n\ndef populate_queue(etcd_dict, queue=RSYNC_Q, hdf5dir=HDF5DIR):\n \"\"\"Populates the fscrunch and rsync queues using etcd.\n\n Etcd watch callback function.\n \"\"\"\n cmd = etcd_dict['cmd']\n val = etcd_dict['val']\n if cmd == 'rsync':\n rsync_string = '{0}.sas.pvt:{1} {2}/{0}/'.format(\n val['hostname'],\n val['filename'],\n hdf5dir\n )\n queue.put(rsync_string)\n\ndef task_handler(task_fn, inqueue, outqueue=None):\n \"\"\"Handles in and out queues of preprocessing tasks.\n\n Parameters\n ----------\n task_fn : function\n The function to execute, with a single argument.\n inqueue : multiprocessing.Queue instance\n The queue containing the arguments to `task_fn`.\n outqueue : multiprocessing.Queue instance\n The queue to write the otuput of `task_fn` to.\n \"\"\"\n while True:\n if not inqueue.empty():\n fname = inqueue.get()\n try:\n fname = task_fn(fname)\n if outqueue is not None:\n outqueue.put(fname)\n except Exception as exc:\n exception_logger(\n LOGGER,\n 'preprocessing of file {0}'.format(fname),\n exc,\n throw=False\n )\n else:\n time.sleep(TSLEEP)\n\ndef gather_worker(inqueue, outqueue, corrlist=CORRLIST):\n \"\"\"Gather all files that match a filename.\n\n Will wait for a maximum of 15 minutes from the time the first file is\n received.\n\n Parameters\n ----------\n inqueue : multiprocessing.Queue instance\n The queue containing the filenames, max size of 16 (i.e. one file per\n corr node).\n outqueue : multiprocessing.Queue instance\n The queue in which to place the gathered files (as a list).\n \"\"\"\n ncorr = len(corrlist)\n filelist = [None]*ncorr\n nfiles = 0\n # Times out after 15 minutes\n end = time.time() + 60*15\n while nfiles < ncorr and time.time() < end:\n if not inqueue.empty():\n fname = inqueue.get()\n corrid = fname.replace('//', '/').split('/')[5]\n filelist[corrlist.index(corrid)] = fname\n nfiles += 1\n time.sleep(1)\n outqueue.put(filelist)\n\ndef gather_files(inqueue, outqueue, ncorr=NCORR, max_assess=MAX_ASSESS, tsleep=TSLEEP):\n \"\"\"Gather files from all correlators.\n\n Will wait for a maximum of 15 minutes from the time the first file is\n received.\n\n Parameters\n ----------\n inqueue : multiprocessing.Queue instance\n The queue containing the ungathered filenames .\n outqueue : multiprocessing.Queue instance\n The queue in which to place the gathered files (as a list).\n \"\"\"\n gather_queues = [Queue(ncorr) for idx in range(max_assess)]\n gather_names = [None]*max_assess\n gather_processes = [None]*max_assess\n nfiles_assessed = 0\n while True:\n if not inqueue.empty():\n try:\n fname = inqueue.get()\n print(fname)\n if not fname.split('/')[-1][:-7] in gather_names:\n gather_names[nfiles_assessed%max_assess] = \\\n fname.split('/')[-1][:-7]\n gather_processes[nfiles_assessed%max_assess] = \\\n Process(\n target=gather_worker,\n args=(\n gather_queues[nfiles_assessed%max_assess],\n outqueue\n ),\n daemon=True\n )\n gather_processes[nfiles_assessed%max_assess].start()\n nfiles_assessed += 1\n gather_queues[\n gather_names.index(fname.split('/')[-1][:-7])\n ].put(fname)\n except Exception as exc:\n exception_logger(\n LOGGER,\n 'preprocessing of file {0}'.format(fname),\n exc,\n throw=False\n )\n else:\n time.sleep(tsleep)\n\ndef assess_file(inqueue, outqueue, caltime=CALTIME, filelength=FILELENGTH):\n \"\"\"Decides whether calibration is necessary.\n\n Sends a command to etcd using the monitor point /cmd/cal if the file should\n be calibrated.\n\n Parameters\n ----------\n inqueue : multiprocessing.Queue instance\n The queue containing the gathered filenames.\n outqueue : multiprocessing.Queue instance\n The queue to which the calname and gathered filenames (as a tuple) if\n the file is appropriate for calibration.\n caltime : astropy quantity\n The amount of time around the calibrator to be converted to\n a measurement set for calibration. Used to assess whether any part of\n the desired calibrator pass is in a given file.\n \"\"\"\n while True:\n if not inqueue.empty():\n try:\n flist = inqueue.get()\n fname = first_true(flist)\n datet = fname.split('/')[-1][:19]\n tstart = Time(datet).sidereal_time(\n 'apparent',\n longitude=ct.OVRO_LON*u.rad\n )\n tend = (Time(datet)+filelength).sidereal_time(\n 'apparent',\n longitude=ct.OVRO_LON*u.rad\n )\n a0 = (caltime*np.pi*u.rad/\n (ct.SECONDS_PER_SIDEREAL_DAY*u.s)).to_value(u.rad)\n with h5py.File(fname, mode='r') as h5file:\n pt_dec = h5file['Header']['extra_keywords']['phase_center_dec'].value*u.rad\n caltable = update_caltable(pt_dec)\n calsources = pandas.read_csv(caltable, header=0)\n for _index, row in calsources.iterrows():\n if isinstance(row['ra'], str):\n rowra = row['ra']\n else:\n rowra = row['ra']*u.deg\n delta_lst_start = (\n tstart-Angle(rowra)\n ).to_value(u.rad)%(2*np.pi)\n if delta_lst_start > np.pi:\n delta_lst_start -= 2*np.pi\n delta_lst_end = (\n tend-Angle(rowra)\n ).to_value(u.rad)%(2*np.pi)\n if delta_lst_end > np.pi:\n delta_lst_end -= 2*np.pi\n if delta_lst_start < a0 < delta_lst_end:\n calname = row['source']\n print('Calibrating {0}'.format(calname))\n outqueue.put((calname, flist))\n except Exception as exc:\n exception_logger(\n LOGGER,\n 'preprocessing of file {0}'.format(fname),\n exc,\n throw=False\n )\n else:\n time.sleep(TSLEEP)\n\nif __name__==\"__main__\":\n processes = {\n 'rsync': {\n 'nthreads': 1,\n 'task_fn': rsync_file,\n 'queue': RSYNC_Q,\n 'outqueue': GATHER_Q, #FSCRUNCH_Q,\n 'processes': []\n },\n # 'fscrunch': {\n # 'nthreads': 4,\n # 'task_fn': fscrunch_file,\n # 'queue': FSCRUNCH_Q,\n # 'outqueue': GATHER_Q,\n # 'processes': []\n # },\n }\n # Start etcd watch\n ETCD.add_watch('/cmd/cal', populate_queue)\n # Start all threads\n for name in processes.keys():\n for i in range(processes[name]['nthreads']):\n processes[name]['processes'] += [Process(\n target=task_handler,\n args=(\n processes[name]['task_fn'],\n processes[name]['queue'],\n processes[name]['outqueue'],\n ),\n daemon=True\n )]\n for pinst in processes[name]['processes']:\n pinst.start()\n\n try:\n processes['gather'] = {\n 'nthreads': 1,\n 'task_fn': gather_files,\n 'queue': GATHER_Q,\n 'outqueue': ASSESS_Q,\n 'processes': []\n }\n processes['gather']['processes'] += [Process(\n target=gather_files,\n args=(\n GATHER_Q,\n ASSESS_Q\n )\n )]\n processes['gather']['processes'][0].start()\n\n processes['assess'] = {\n 'nthreads': 1,\n 'task_fn': assess_file,\n 'queue': ASSESS_Q,\n 'outqueue': CALIB_Q,\n 'processes': []\n }\n processes['assess']['processes'] += [Process(\n target=assess_file,\n args=(\n ASSESS_Q,\n CALIB_Q\n ),\n daemon=True\n )]\n processes['assess']['processes'][0].start()\n\n while True:\n for name in processes.keys():\n ETCD.put_dict(\n '/mon/cal/{0}_process'.format(name),\n {\n \"queue_size\": processes[name]['queue'].qsize(),\n \"ntasks_alive\": sum([\n pinst.is_alive() for pinst in\n processes[name]['processes']\n ]),\n \"ntasks_total\": processes[name]['nthreads']\n }\n )\n ETCD.put_dict(\n '/mon/service/calpreprocess',\n {\n \"cadence\": 60,\n \"time\": Time(datetime.datetime.utcnow()).mjd\n }\n )\n while not CALIB_Q.empty():\n (calname_fromq, flist_fromq) = CALIB_Q.get()\n ETCD.put_dict(\n '/cmd/cal',\n {\n 'cmd': 'calibrate',\n 'val': {\n 'calname': calname_fromq,\n 'flist': flist_fromq\n }\n }\n )\n time.sleep(60)\n except (KeyboardInterrupt, SystemExit):\n processes['gather']['processes'][0].terminate()\n processes['gather']['processes'][0].join()\n sys.exit()\n", "id": "2082395", "language": "Python", "matching_score": 5.0250325202941895, "max_stars_count": 1, "path": "services/preprocess_service.py" }, { "content": "\"\"\"Service for updating beamformer weights.\n\"\"\"\n\nimport datetime\nimport warnings\nfrom multiprocessing import Process, Queue\nimport time\nfrom astropy.time import Time\nimport dsautils.dsa_store as ds\nimport dsautils.dsa_syslog as dsl\nimport dsautils.cnf as dsc\nfrom dsacalib.preprocess import rsync_file\nfrom dsacalib.utils import exception_logger\nwarnings.filterwarnings(\"ignore\")\n\n# Logger\nLOGGER = dsl.DsaSyslogger()\nLOGGER.subsystem(\"software\")\nLOGGER.app(\"dsacalib\")\n\n# ETCD interface\nETCD = ds.DsaStore()\n\nDATE_STR = '28feb21'\nVOLTAGE_DIR = '/mnt/data/dsa110/T3/'\nCORR_LIST = [0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]\nRSYNC_Q = Queue()\nTSLEEP = 10\n\nCONF = dsc.Conf()\nPARAMS = CONF.get('corr')\n\ndef rsync_handler(inqueue):\n \"\"\"Handles in and out queues of preprocessing tasks.\n\n Parameters\n ----------\n task_fn : function\n The function to execute, with a single argument.\n inqueue : multiprocessing.Queue instance\n The queue containing the arguments to `task_fn`.\n outqueue : multiprocessing.Queue instance\n The queue to write the otuput of `task_fn` to.\n \"\"\"\n while True:\n if not inqueue.empty():\n fname = inqueue.get()\n try:\n rsync_file(\n fname,\n remove_source_files=False\n )\n except Exception as exc:\n exception_logger(\n LOGGER,\n 'copying of voltage trigger {0}'.format(fname),\n exc,\n throw=False\n )\n else:\n time.sleep(TSLEEP)\n\ndef populate_queue(etcd_dict):\n \"\"\"Copies voltage triggers from corr machines.\n \"\"\"\n time.sleep(5*60) # Allow correlator voltage service to create metadata\n for specnum in etcd_dict.keys():\n specnum = (int(specnum)-477)*16\n for corr in PARAMS['ch0'].keys():\n fname = \"{0}.sas.pvt:/home/ubuntu/data/fl*.out.{1}\".format(\n corr,\n specnum\n )\n fnameout = \"/mnt/data/dsa110/T3/{0}/{1}/\".format(corr, DATE_STR)\n print(\"{0} {1}\".format(fname, fnameout))\n RSYNC_Q.put(\n \"{0} {1}\".format(fname, fnameout)\n )\n fname = \"{0}.sas.pvt:/home/ubuntu/data/fl*.out.{1}.json\".format(\n corr,\n specnum\n )\n print(\"{0} {1}\".format(fname, fnameout))\n RSYNC_Q.put(\n \"{0} {1}\".format(fname, fnameout)\n )\n LOGGER.info(\n 'Copied voltage trigger {0} from {1}'.format(\n specnum,\n corr\n )\n )\n\nif __name__ == \"__main__\":\n processes = {\n 'rsync': {\n 'nthreads': 2,\n 'queue': RSYNC_Q,\n 'processes': []\n },\n }\n # Start etcd watch\n ETCD.add_watch('/mon/corr/1/trigger', populate_queue)\n # Start all threads\n for name in processes.keys():\n for i in range(processes[name]['nthreads']):\n processes[name]['processes'] += [Process(\n target=rsync_handler,\n args=(\n processes[name]['queue'],\n ),\n daemon=True\n )]\n for pinst in processes[name]['processes']:\n pinst.start()\n\n while True:\n ETCD.put_dict(\n '/mon/cal/voltagecopy',\n {\n \"alive\": True,\n \"cadence\": 60,\n \"time\": Time(datetime.datetime.utcnow()).isot\n }\n )\n time.sleep(60)\n", "id": "12461553", "language": "Python", "matching_score": 2.6618547439575195, "max_stars_count": 1, "path": "services/voltagetriggers.py" }, { "content": "\"\"\"A service to create measurement sets and calibrate data.\n\"\"\"\nimport os\nimport shutil\nimport warnings\nimport datetime\nimport time\nimport yaml\nimport h5py\nimport numpy as np\nimport astropy.units as u\nfrom astropy.time import Time\nimport dsautils.dsa_store as ds\nimport dsautils.dsa_syslog as dsl\nimport dsautils.cnf as dsc\nfrom dsacalib.preprocess import first_true, update_caltable\nfrom dsacalib.utils import exception_logger\nfrom dsacalib.calib import calibrate_phases, calibrate_phase_single_ms\nfrom dsacalib.routines import get_files_for_cal, calibrate_measurement_set\nfrom dsacalib.ms_io import convert_calibrator_pass_to_ms, caltable_to_etcd, \\\n write_beamformer_solutions, average_beamformer_solutions\nfrom dsacalib.plotting import summary_plot, plot_bandpass_phases, \\\n plot_beamformer_weights #, plot_current_beamformer_solutions\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nwarnings.filterwarnings(\"ignore\")\n\n# Logger\nLOGGER = dsl.DsaSyslogger()\nLOGGER.subsystem(\"software\")\nLOGGER.app(\"dsacalib\")\n\n# ETCD interface\nETCD = ds.DsaStore()\n\nCONF = dsc.Conf()\nCORR_PARAMS = CONF.get('corr')\nCAL_PARAMS = CONF.get('cal')\nMFS_PARAMS = CONF.get('fringe')\n\n# These should be put somewhere else eventually\nCALTIME = CAL_PARAMS['caltime_minutes']*u.min\nREFANTS = CAL_PARAMS['refant']\nif isinstance(REFANTS, (str, int)):\n REFANTS = [REFANTS]\nFILELENGTH = MFS_PARAMS['filelength_minutes']*u.min\nMSDIR = CAL_PARAMS['msdir']\nBEAMFORMER_DIR = CAL_PARAMS['beamformer_dir']\nHDF5DIR = CAL_PARAMS['hdf5_dir']\n# This should be made more general for more antennas\nANTENNAS = list(CORR_PARAMS['antenna_order'].values())\nPOLS = CORR_PARAMS['pols_voltage']\nANTENNAS_IN_MS = CAL_PARAMS['antennas_in_ms']\nANTENNAS_NOT_IN_BF = CAL_PARAMS['antennas_not_in_bf']\nCORR_LIST = list(CORR_PARAMS['ch0'].keys())\nCORR_LIST = [int(cl.strip('corr')) for cl in CORR_LIST]\nREFCORR = '{0:02d}'.format(CORR_LIST[0])\nWEBPLOTS = '/mnt/data/dsa110/webPLOTS/calibration/'\nPLOTDIR = f'{WEBPLOTS}/allpngs/'\n\ndef sort_filenames(filenames):\n \"\"\"Sort list of calibrator passes.\n \"\"\"\n filenames_sorted = {}\n yesterday, today = sorted(filenames.keys())\n for date in sorted(filenames.keys(), reverse=True):\n filenames_sorted[date] = {}\n # What is the order that we will get here\n # We want the most recent cal to be last\n times = {\n cal: filenames[today][cal]['transit_time']\n for cal in filenames[today].keys()\n }\n ordered_times = {\n k: v for k, v in sorted(\n times.items(),\n key=lambda item: item[1],\n reverse=True\n )\n }\n for cal in ordered_times.keys():\n filenames_sorted[today][cal] = filenames[today][cal]\n times = {\n cal: filenames[yesterday][cal]['transit_time']\n for cal in filenames[yesterday].keys()\n }\n ordered_times = {\n k: v for k, v in sorted(\n times.items(),\n key=lambda item: item[1],\n reverse=True\n )\n }\n for cal in ordered_times.keys():\n if cal not in filenames_sorted[today].keys():\n filenames_sorted[yesterday][cal] = filenames[yesterday][cal]\n return filenames_sorted\n\ndef find_bf_solns_to_avg(filenames, ttime, start_time, caltable):\n \"\"\"Find all previous calibrator passes to average.\n \"\"\"\n # TODO: Just use a glob of the beamformer directory instead since the\n # names contain the transit pass time and calibrator names.\n yesterday = (ttime-1*u.d).isot.split('T')[0]\n filenames_yesterday = get_files_for_cal(\n caltable,\n REFCORR,\n CALTIME,\n FILELENGTH,\n hdf5dir=HDF5DIR,\n date_specifier='{0}*'.format(yesterday),\n )\n if yesterday in filenames_yesterday.keys():\n filenames[yesterday] = filenames_yesterday[yesterday]\n else:\n filenames[yesterday] = {}\n # Get rid of calibrators after the snap start time or without files\n for date in filenames.keys():\n for cal in list(filenames[date].keys()):\n if filenames[date][cal]['transit_time'] < start_time or \\\n len(filenames[date][cal]['files'])==0 or \\\n filenames[date][cal]['transit_time'] > ttime:\n filenames[date].pop(cal)\n # Sort the filenames by time\n assert len(filenames.keys()) < 3\n filenames = sort_filenames(filenames)\n # Average beamformer solutions\n beamformer_names = []\n for date in filenames.keys():\n for cal in filenames[date].keys():\n cal_ttime = filenames[date][cal]['transit_time']\n cal_ttime.precision = 0\n beamformer_names += [\n '{0}_{1}'.format(\n cal,\n cal_ttime.isot\n )\n ]\n # Open yaml files\n print('opening yaml files')\n if os.path.exists(\n '{0}/beamformer_weights_{1}.yaml'.format(\n BEAMFORMER_DIR,\n beamformer_names[0]\n )\n ):\n with open(\n '{0}/beamformer_weights_{1}.yaml'.format(\n BEAMFORMER_DIR,\n beamformer_names[0]\n )\n ) as f:\n latest_solns = yaml.load(f, Loader=yaml.FullLoader)\n for bfname in beamformer_names[1:].copy():\n try:\n with open(\n '{0}/beamformer_weights_{1}.yaml'.format(\n BEAMFORMER_DIR,\n bfname\n )\n ) as f:\n solns = yaml.load(f, Loader=yaml.FullLoader)\n assert solns['cal_solutions']['antenna_order'] == \\\n latest_solns['cal_solutions']['antenna_order']\n assert solns['cal_solutions']['corr_order'] == \\\n latest_solns['cal_solutions']['corr_order']\n assert solns['cal_solutions']['delays'] == \\\n latest_solns['cal_solutions']['delays']\n assert solns['cal_solutions']['eastings'] == \\\n latest_solns['cal_solutions']['eastings']\n except (AssertionError, FileNotFoundError):\n beamformer_names.remove(bfname)\n else:\n beamformer_names = []\n latest_solns = None\n return beamformer_names, latest_solns\n\ndef extract_applied_delays(file):\n \"\"\"Extracts the current snap delays from the hdf5 file.\n\n If delays are not set in the hdf5 file, uses the most recent delays in\n the beamformer weights directory instead.\n\n Parameters\n ----------\n file : str\n The full path to the hdf5 file.\n\n Returns\n -------\n ndarray\n The applied delays in ns.\n \"\"\"\n with h5py.File(file, 'r') as f:\n if 'applied_delays_ns' in f['Header']['extra_keywords'].keys():\n delaystring = (\n f['Header']['extra_keywords']['applied_delays_ns']\n [()]\n ).astype(np.str)\n applied_delays = np.array(\n delaystring.split(' ')\n ).astype(np.int).reshape(-1, 2)\n applied_delays = applied_delays[np.array(ANTENNAS)-1, :]\n else:\n current_solns = '{0}/beamformer_weights.yaml'.format(BEAMFORMER_DIR)\n with open(current_solns) as yamlfile:\n calibration_params = yaml.load(\n yamlfile,\n Loader=yaml.FullLoader\n )['cal_solutions']\n applied_delays = np.array(calibration_params['delays'])*2\n LOGGER.error(\n 'Error extracting snap delays from uvh5 files. '\n 'Using delays in {0}'.format(current_solns)\n )\n return applied_delays\n\n# TODO: Etcd watch robust to etcd connection failures.\ndef calibrate_file(etcd_dict):\n \"\"\"Generates and calibrates a measurement set.\n\n An etcd watch callback function.\n \"\"\"\n cmd = etcd_dict['cmd']\n val = etcd_dict['val']\n if cmd == 'calibrate':\n calname = val['calname']\n flist = val['flist']\n print('flist[0]: {0}, {1}'.format(\n first_true(flist), type(first_true(flist))\n ))\n date = first_true(flist).split('/')[-1][:-14]\n msname = '{0}/{1}_{2}'.format(MSDIR, date, calname)\n date_specifier = '{0}*'.format(date)\n # Get the start time for the snaps\n start_time = Time(\n ETCD.get_dict('/mon/snap/1/armed_mjd')['armed_mjd'], format='mjd'\n )\n with h5py.File(first_true(flist), mode='r') as h5file:\n pt_dec = h5file['Header']['extra_keywords']['phase_center_dec'].value*u.rad\n caltable = update_caltable(pt_dec)\n LOGGER.info('Creating {0}.ms at dec {1}'.format(msname, pt_dec))\n filenames = get_files_for_cal(\n caltable,\n REFCORR,\n CALTIME,\n FILELENGTH,\n hdf5dir=HDF5DIR,\n date_specifier=date_specifier,\n )\n ttime = filenames[date][calname]['transit_time']\n # Only use calibrators within the last 24 hours or since the snaps\n # were restarted\n if ttime-start_time > 24*u.h:\n start_time = ttime - 24*u.h\n ttime.precision = 0\n ETCD.put_dict(\n '/mon/cal/calibration',\n {\n \"transit_time\": filenames[date][calname]['transit_time'].mjd,\n \"calibration_source\": calname,\n \"filelist\": flist,\n \"status\": -1\n }\n )\n print('writing ms')\n convert_calibrator_pass_to_ms(\n cal=filenames[date][calname]['cal'],\n date=date,\n files=filenames[date][calname]['files'],\n duration=CALTIME,\n antenna_list=ANTENNAS_IN_MS,\n logger=LOGGER,\n msdir=MSDIR\n )\n print('done writing ms')\n LOGGER.info('{0}.ms created'.format(msname))\n\n status = calibrate_measurement_set(\n msname,\n filenames[date][calname]['cal'],\n refants=REFANTS,\n bad_antennas=None,\n bad_uvrange='2~27m',\n forsystemhealth=True,\n throw_exceptions=True,\n logger=LOGGER\n )\n print('done calibration')\n caltable_to_etcd(\n msname,\n calname,\n filenames[date][calname]['transit_time'].mjd,\n status,\n logger=LOGGER\n )\n\n ETCD.put_dict(\n '/mon/cal/calibration',\n {\n \"transit_time\": filenames[date][calname]['transit_time'].mjd,\n \"calibration_source\": calname,\n \"filelist\": flist,\n \"status\": status\n }\n )\n print('solns written to etcd')\n LOGGER.info(\n 'Calibrated {0}.ms for system health with status {1}'\n .format(msname, status)\n )\n print('creating figures')\n figure_path = '{0}/{1}_{2}'.format(PLOTDIR, date, calname)\n try:\n with PdfPages('{0}.pdf'.format(figure_path)) as pdf:\n for j in range(len(ANTENNAS)//10+1):\n fig = summary_plot(\n msname,\n calname,\n 2,\n ['B', 'A'],\n ANTENNAS[j*10:(j+1)*10]\n )\n pdf.savefig(fig)\n plt.close()\n target = f'{WEBPLOTS}/summary_current.pdf'\n if os.path.exists(target):\n os.unlink(target)\n shutil.copyfile(\n f'{PLOTDIR}/{date}_{calname}.pdf',\n target\n )\n# TODO: Get beamformer weight filenames from etcd\n# # Index error occured - some files could not be found. corr04\n# plot_current_beamformer_solutions(\n# filenames[date][calname]['files'],\n# calname,\n# date,\n# # beamformer name,\n# corrlist=CORR_LIST,\n# outname=figure_path,\n# show=False\n# )\n except Exception as exc:\n exception_logger(\n LOGGER,\n 'plotting of calibration solutions for {0}.ms'.format(msname),\n exc,\n throw=False\n )\n print('calibration for bf')\n status = calibrate_measurement_set(\n msname,\n filenames[date][calname]['cal'],\n refants=REFANTS,\n bad_antennas=None,\n bad_uvrange='2~27m',\n keepdelays=False,\n forsystemhealth=False,\n throw_exceptions=False,\n logger=LOGGER\n )\n LOGGER.info(\n 'Calibrated {0}.ms for beamformer weights with status {1}'\n .format(msname, status)\n )\n print('calculating beamformer weights')\n try:\n applied_delays = extract_applied_delays(first_true(flist))\n # Write beamformer solutions for one source\n _ = write_beamformer_solutions(\n msname,\n calname,\n ttime,\n ANTENNAS,\n applied_delays,\n flagged_antennas=ANTENNAS_NOT_IN_BF,\n outdir=BEAMFORMER_DIR,\n corr_list=np.array(CORR_LIST)\n )\n except Exception as exc:\n exception_logger(\n LOGGER,\n 'calculation of beamformer weights for {0}.ms'.format(msname),\n exc,\n throw=False\n )\n print('getting list of calibrators')\n # Now we want to find all sources in the last 24 hours\n # start by updating our list with calibrators from the day before\n beamformer_names, latest_solns = find_bf_solns_to_avg(\n filenames, ttime, start_time, caltable\n )\n # Average beamformer solutions\n if len(beamformer_names) > 0:\n print('averaging beamformer weights')\n averaged_files, avg_flags = average_beamformer_solutions(\n beamformer_names,\n ttime,\n outdir=BEAMFORMER_DIR,\n corridxs=CORR_LIST,\n logger=LOGGER\n )\n print('setting parameters for new yaml file')\n # Make the final yaml file\n latest_solns['cal_solutions']['weight_files'] = averaged_files\n latest_solns['cal_solutions']['source'] = [\n bf.split('_')[0] for bf in beamformer_names\n ]\n latest_solns['cal_solutions']['caltime'] = [\n float(Time(bf.split('_')[1]).mjd) for bf in beamformer_names\n ]\n # Remove the old bad cal solutions\n for key, value in \\\n latest_solns['cal_solutions']['flagged_antennas'].items():\n if 'casa solutions flagged' in value:\n value = value.remove('casa solutions flagged')\n # Flag new bad solutions\n idxant, idxpol = np.nonzero(avg_flags)\n for i, ant in enumerate(idxant):\n key = '{0} {1}'.format(ANTENNAS[ant], POLS[idxpol[i]])\n if key not in \\\n latest_solns['cal_solutions']['flagged_antennas'].keys():\n latest_solns['cal_solutions']['flagged_antennas'][key] = []\n latest_solns['cal_solutions']['flagged_antennas'][key] += \\\n ['casa solutions flagged']\n latest_solns['cal_solutions']['flagged_antennas'] = {\n key: value for key, value in\n latest_solns['cal_solutions']['flagged_antennas'].items()\n if len(value) > 0\n }\n print('opening yaml file')\n with open(\n '{0}/beamformer_weights_{1}.yaml'.format(\n BEAMFORMER_DIR, ttime.isot\n ),\n 'w'\n ) as file:\n print('writing bf weights')\n _ = yaml.dump(latest_solns, file)\n # Get rid of things that don't need to be stored in etcd\n latest_solns['cal_solutions'].pop('antenna_order')\n latest_solns['cal_solutions'].pop('pol_order')\n latest_solns['cal_solutions'].pop('corr_order')\n latest_solns['cal_solutions']['time'] = ttime.mjd\n ETCD.put_dict(\n '/mon/cal/bfweights',\n {\n 'cmd': 'update_weights',\n 'val': latest_solns['cal_solutions']\n }\n )\n print('done writing')\n os.system(\n \"cd {0} ; \"\n \"git add beamformer_weights.yaml ; \"\n \"git commit -m {1} ; \"\n \"cd /home/user/proj/dsa110-shell/dsa110-calib/services/\".format(\n BEAMFORMER_DIR,\n beamformer_names[0]\n )\n )\n beamformer_names += [averaged_files[0].split('_')[-1].strip(\".dat\")]\n _ = plot_beamformer_weights(\n beamformer_names,\n antennas_to_plot=np.array(ANTENNAS),\n outname='{0}/{1}'.format(PLOTDIR, ttime),\n corrlist=np.array(CORR_LIST),\n show=False\n )\n target = f'{WEBPLOTS}/bfw_current.png'\n if os.path.exists(target):\n os.unlink(target)\n shutil.copyfile(\n f'{PLOTDIR}/{ttime}_averagedweights.png',\n target\n )\n # Plot evolution of the phase over the day\n calibrate_phase_single_ms(msname, REFANTS[0], calname)\n plot_bandpass_phases(\n filenames,\n np.array(ANTENNAS),\n outname='{0}/{1}'.format(PLOTDIR, ttime)\n )\n plt.close('all')\n target = f'{WEBPLOTS}/phase_current.png'\n if os.path.exists(target):\n os.unlink(target)\n shutil.copyfile(\n f'{PLOTDIR}/{ttime}_phases.png',\n target\n )\n\nif __name__==\"__main__\":\n ETCD.add_watch('/cmd/cal', calibrate_file)\n while True:\n ETCD.put_dict(\n '/mon/service/calibration',\n {\n \"cadence\": 60,\n \"time\": Time(datetime.datetime.utcnow()).mjd\n }\n )\n time.sleep(60)\n", "id": "1871759", "language": "Python", "matching_score": 6.34314489364624, "max_stars_count": 1, "path": "services/calibration_service.py" }, { "content": "\"\"\"Service for updating beamformer weights.\n\"\"\"\n\nimport shutil\nimport datetime\nimport warnings\nimport time\nimport yaml\nimport numpy as np\nfrom astropy.time import Time\nimport dsautils.dsa_store as ds\nimport dsautils.dsa_syslog as dsl\nimport dsautils.cnf as dsc\nfrom dsacalib.preprocess import rsync_file\nwarnings.filterwarnings(\"ignore\")\n\n# Logger\nLOGGER = dsl.DsaSyslogger()\nLOGGER.subsystem(\"software\")\nLOGGER.app(\"dsacalib\")\n\n# ETCD interface\nETCD = ds.DsaStore()\n\nCONF = dsc.Conf()\nCORR_PARAMS = CONF.get('corr')\nCAL_PARAMS = CONF.get('cal')\nCORR_LIST = list(CORR_PARAMS['ch0'].keys())\nCORR_LIST = [int(cl.strip('corr')) for cl in CORR_LIST]\n\nANTENNAS_PLOT = list(CORR_PARAMS['antenna_order'].values())\nANTENNAS = ANTENNAS_PLOT\nBFDIR = CAL_PARAMS['beamformer_dir']\nWEIGHTFILE = CAL_PARAMS['weightfile']\nFLAGFILE = CAL_PARAMS['flagfile']\nBFARCHIVEDIR = CAL_PARAMS['bfarchivedir']\n\ndef update_beamformer_weights(etcd_dict):\n \"\"\"Updates beamformer weights and antenna flags on core machines.\n\n Also archives the beamformer weights in /mnt/data/dsa110/T3/calibs/\n \"\"\"\n cmd = etcd_dict['cmd']\n val = etcd_dict['val']\n\n if cmd == 'update_weights':\n bfsolns = val\n # Put antenna flags in the way needed by the bf\n antenna_flags = np.zeros((len(ANTENNAS)), np.int)\n for key in bfsolns['flagged_antennas']:\n ant = int(key.split(' ')[0])\n if ant in ANTENNAS:\n antenna_flags[\n ANTENNAS.index(ant)\n ] = 1\n antenna_flags = np.where(antenna_flags)[0]\n with open('antenna_flags.txt', 'w') as f:\n f.write('\\n'.join([str(af) for af in antenna_flags]))\n f.write('\\n')\n tstamp = Time(datetime.datetime.utcnow())\n tstamp.precision = 0\n with open(\n '{0}/beamformer_weights_{1}.yaml'.format(BFARCHIVEDIR, tstamp.isot), 'w'\n ) as file:\n _ = yaml.dump(bfsolns, file)\n for i, corr in enumerate(CORR_LIST):\n fname = '{0}/{1}'.format(\n BFDIR,\n bfsolns['weight_files'][i]\n )\n fnamearchive = '{0}/beamformer_weights_corr{1:02d}_{2}.dat'.format(\n BFARCHIVEDIR,\n corr,\n tstamp.isot\n )\n fnameout = 'corr{0:02d}.sas.pvt:{1}'.format(\n corr,\n WEIGHTFILE\n )\n flagsout = 'corr{0:02d}.sas.pvt:{1}'.format(\n corr,\n FLAGFILE\n )\n rsync_file(\n '{0} {1}'.format(fname, fnameout),\n remove_source_files=False\n )\n rsync_file(\n 'antenna_flags.txt {0}'.format(flagsout),\n remove_source_files=False\n )\n shutil.copyfile(fname, fnamearchive)\n LOGGER.info(\n 'Updated beamformer weights using {0}'.format(\n bfsolns['weight_files']\n )\n )\n\nif __name__ == \"__main__\":\n ETCD.add_watch('/mon/cal/bfweights', update_beamformer_weights)\n while True:\n ETCD.put_dict(\n '/mon/service/bfweightcopy',\n {\n \"cadence\": 60,\n \"time\": Time(datetime.datetime.utcnow()).mjd\n }\n )\n time.sleep(60)\n", "id": "9296497", "language": "Python", "matching_score": 1.1378577947616577, "max_stars_count": 1, "path": "services/beamformerweights.py" }, { "content": "import traceback\nimport numpy as np\nfrom dsautils import dsa_store\nimport dsautils.dsa_syslog as dsl\nfrom dsaT3 import filplot_funcs as filf\nds = dsa_store.DsaStore()\nimport time, os\nimport json\n\nTIMEOUT_FIL = 60\nTIMEOUT_CORR = 21600\nFILPATH = '/data/dsa110/T1/'\nOUTPUT_PATH = '/home/ubuntu/data/T3/'\nFIL_CORRS = ['corr01','corr02','corr09','corr13']\n\nLOGGER = dsl.DsaSyslogger()\nLOGGER.subsystem(\"software\")\nLOGGER.app(\"dsaT3\")\nLOGGER.function(\"T3_manager\")\n\n# fills output_dict with empty entries\ndef fill_empty_dict(od):\n\n od['filfile'] = None\n od['candplot'] = None\n od['save'] = False\n od['label'] = ''\n for corr in ['corr03','corr04','corr05','corr06','corr07','corr08','corr10','corr11','corr12','corr14','corr15','corr16','corr18','corr19','corr21','corr22']:\n od[corr+'_data'] = None\n od[corr+'_header'] = None\n\n\n# searches for local file\ndef search_for_local_file(fl):\n\n if os.path.exists(fl):\n return fl\n return None\n \n\n# waits for local file to be written\ndef wait_for_local_file(fl,timt):\n\n time_counter = 0\n while not os.path.exists(fl):\n time.sleep(1)\n time_counter += 1\n if time_counter > timt:\n return None\n\n # wait in case file hasn't been written\n time.sleep(10)\n\n return fl\n \n \n# a is T3 trigger dict\ndef run(a):\n\n # set up output dict and datestring\n datestring = ds.get_dict('/cnf/datestring')\n output_dict = a[list(a.keys())[0]]\n output_dict['trigname'] = list(a.keys())[0]\n output_dict['datestring'] = datestring\n fill_empty_dict(output_dict)\n\n # wait for specific filterbank file to be written\n ibeam = output_dict['ibeam'] + 1\n corrXX = FIL_CORRS[int( (ibeam-1) / 64)]\n filfile = '/data/dsa110/T1/' + corrXX + '/' + datestring + '/fil_' + output_dict['trigname'] + '/' + output_dict['trigname'] +\t'_' + str(ibeam) + '.fil'\n print(filfile)\n LOGGER.info('Working on {0}'.format(output_dict['trigname']))\n found_filfile = wait_for_local_file(filfile,TIMEOUT_FIL)\n output_dict['filfile'] = found_filfile\n\n if found_filfile is None:\n LOGGER.error('No filfile for {0}'.format(output_dict['trigname']))\n #with open(OUTPUT_PATH + output_dict['trigname'] + '.json', 'w') as f: #encoding='utf-8'\n # json.dump(output_dict, f, ensure_ascii=False, indent=4)\n \n return output_dict\n \n # launch candplotter\n try:\n output_dict['candplot'] = filf.filplot_entry(datestring,a)\n except Exception as exception:\n logging_string = \"Could not make filplot {0} due to {1}. Callback:\\n{2}\".format(\n output_dict['trigname'],\n type(exception).__name__,\n ''.join(\n traceback.format_tb(exception.__traceback__)\n )\n )\n print(logging_string)\n LOGGER.error(logging_string)\n #with open(OUTPUT_PATH + output_dict['trigname'] + '.json', 'w') as f: #encoding='utf-8'\n # json.dump(output_dict, f, ensure_ascii=False, indent=4)\n\n return output_dict\n\n # wait for voltage files to be written\n \n\n # write output_dict to disk\n with open(OUTPUT_PATH + output_dict['trigname'] + '.json', 'w') as f: #encoding='utf-8' \n json.dump(output_dict, f, ensure_ascii=False, indent=4)\n\n return output_dict\n\n# a is T3 trigger dict\ndef run_nowait(a):\n\n # set up output dict and datestring\n datestring = ds.get_dict('/cnf/datestring')\n output_dict = a[list(a.keys())[0]]\n output_dict['trigname'] = list(a.keys())[0]\n output_dict['datestring'] = datestring\n fill_empty_dict(output_dict)\n\n # wait for specific filterbank file to be written\n ibeam = output_dict['ibeam'] + 1\n corrXX = FIL_CORRS[int( (ibeam-1) / 64)]\n filfile = '/data/dsa110/T1/' + corrXX + '/' + datestring + '/fil_' + output_dict['trigname'] + '/' + output_dict['trigname'] +\t'_' + str(ibeam) + '.fil'\n print(filfile)\n LOGGER.info('Working on {0}'.format(output_dict['trigname']))\n found_filfile = search_for_local_file(filfile)\n output_dict['filfile'] = found_filfile\n\n if found_filfile is None:\n LOGGER.error('No filfile for {0}'.format(output_dict['trigname']))\n #with open(OUTPUT_PATH + output_dict['trigname'] + '.json', 'w') as f: #encoding='utf-8'\n # json.dump(output_dict, f, ensure_ascii=False, indent=4)\n return output_dict\n \n # launch candplotter\n try:\n output_dict['candplot'] = filf.filplot_entry(datestring,a)\n except Exception as exception:\n logging_string = \"Could not make filplot {0} due to {1}. Callback:\\n{2}\".format(\n output_dict['trigname'],\n type(exception).__name__,\n ''.join(\n traceback.format_tb(exception.__traceback__)\n )\n )\n print(logging_string)\n LOGGER.error(logging_string)\n \n #with open(OUTPUT_PATH + output_dict['trigname'] + '.json', 'w') as f: #encoding='utf-8'\n # json.dump(output_dict, f, ensure_ascii=False, indent=4)\n\n return output_dict\n\n # wait for voltage files to be written\n \n\n # write output_dict to disk\n with open(OUTPUT_PATH + output_dict['trigname'] + '.json', 'w') as f: #encoding='utf-8' \n json.dump(output_dict, f, ensure_ascii=False, indent=4)\n\n return output_dict\n\n", "id": "3498107", "language": "Python", "matching_score": 3.3682949542999268, "max_stars_count": 0, "path": "dsaT3/T3_manager.py" }, { "content": "import json, os, glob, sys\nfrom dsautils import dsa_store\nds = dsa_store.DsaStore()\ndatestring = ds.get_dict('/cnf/datestring')\n\nT3root = '/media/ubuntu/data/dsa110/T3/'\n\n# copy from T3 directory \nos.system('mkdir -p '+T3root+datestring)\nos.system('cp /home/ubuntu/data/T3/* '+T3root+datestring)\n\nfls = glob.glob(T3root+datestring+'/*.json')\n\n# let me know which ones will be archived\nfor fl in fls:\n f = open(fl)\n de = json.load(f)\n print('Key <save> not in {0}'.format(fl))\n # Skip corr node json files without the save key if OoD archives twice\n if de.get('save', False):\n print('Will save voltages for ',de['trigname'])\n\nfor fl in fls:\n\n f = open(fl)\n de = json.load(f)\n if de.get('save', False): \n\n for corr in ['corr03', 'corr04', 'corr05', 'corr06', 'corr07', 'corr08', 'corr10', 'corr11', 'corr12', 'corr14', 'corr15', 'corr16', 'corr18', 'corr19', 'corr21', 'corr22']:\n\n if de[corr+'_header'] is True:\n\n outfile_h = T3root + datestring + '/'+corr+'_'+de['trigname']+'_header.json'\n \n if not os.path.exists(outfile_h):\n print('copying header '+corr+' '+de['trigname'])\n os.system('scp '+corr+'.sas.pvt:./data/'+de['trigname']+'_header.json '+outfile_h)\n \n if de[corr+'_data'] is True:\n\n outfile_d = T3root + datestring + '/'+corr+'_'+de['trigname']+'_data.out'\n\n if not os.path.exists(outfile_d):\n print('copying data '+corr+' '+de['trigname'])\n os.system('scp '+corr+'.sas.pvt:./data/'+de['trigname']+'_data.out '+outfile_d)\n \n", "id": "5157656", "language": "Python", "matching_score": 1.6175241470336914, "max_stars_count": 0, "path": "services/archive.py" }, { "content": "import json\nimport os.path\nimport numpy as np\nimport subprocess\nimport pipes\n\n_allowed = ['astrophysical', 'instrumental', 'unsure/noise', 'rfi', 'save', '']\n\ndef exists_remote(host, path):\n \"\"\"Test if a file exists at path on a host accessible with SSH.\"\"\"\n status = subprocess.call(\n ['ssh', host, 'test -f {}'.format(pipes.quote(path))])\n if status == 0:\n return True\n if status == 1:\n return False\n raise Exception('SSH failed')\n\ndef check_voltages(candname):\n\n filename = f'/home/ubuntu/data/T3/{candname}.json'\n assert os.path.exists(filename), f'candidate json file {filename} not found'\n dd = readfile(filename)\n \n corrs = ['corr03','corr04','corr05','corr06','corr07','corr08','corr10','corr11','corr12','corr14','corr15','corr16','corr18','corr19','corr21','corr22']\n\n # edit corr03_data and corr03_header\n for corr in corrs:\n\n data_file = '/home/ubuntu/data/'+candname+'_data.out'\n header_file = '/home/ubuntu/data/'+candname+'_header.json'\n if exists_remote(corr+'.sas.pvt',data_file):\n dd[corr+'_data'] = True\n print('Found data:',corr)\n if exists_remote(corr+'.sas.pvt',header_file):\n dd[corr+'_header'] = True\n print('Found header:',corr)\n\n writefile(dd, filename)\n\ndef readfile(filename):\n \"\"\" Read candidate json trigger file and return dict\n Also accepts npy file\n TODO: add file lock?\n \"\"\"\n\n try:\n with open(filename, 'r') as fp:\n dd = json.load(fp)\n return dd\n except:\n print('File is not json')\n try:\n dd = np.load(filename,allow_pickle=True)\n return dd.tolist()\n except:\n print('File is not .npy')\n return None\n \n\ndef writefile(dd, filename):\n \"\"\" Write candidate json trigger file with dict\n \"\"\"\n\n with open(filename, 'w') as fp:\n json.dump(dd, fp)\n\n\ndef list_cands_labels(filename):\n \"\"\" read json file and list all candidates and labels.\n TODO: decide if more than one allowed\n \"\"\"\n \n dd = readfile(filename)\n candnames = list(dd.keys())\n for candname in candnames:\n labels = [kk for kk in dd[candname].keys() if kk in _allowed]\n if len(labels):\n labelstr = ', '.join(labels)\n else:\n labelstr = 'no labels'\n print(f'{candname}: {labelstr}')\n\n\ndef set_label(candname, label, filename=None):\n \"\"\" Read, add label, and write candidate json file.\n Can optionally provide full path to file.\n Default assumes name of <candname>.json in cwd.\n TODO: decide if file can have more than one candname.\n \"\"\"\n\n assert label in _allowed, f'label must be in {_allowed}'\n\n if filename is None:\n filename = f'/home/ubuntu/data/T3/{candname}.json'\n\n assert os.path.exists(filename), f'candidate json file {filename} not found'\n\n dd = readfile(filename)\n\n if label == 'save':\n dd['save'] = True\n else:\n dd['label'] = label\n writefile(dd, filename)\n \n \ndef set_notes(candname, notes, filename=None):\n \"\"\" Read, add notes, and write candidate json file.\n Can optionally provide full path to file.\n Default assumes name of <candname>.json in cwd.\n TODO: decide if file can have more than one candname.\n \"\"\"\n\n if filename is None:\n filename = f'/home/ubuntu/data/T3/{candname}.json'\n\n assert os.path.exists(filename), f'candidate json file {filename} not found'\n\n dd = readfile(filename)\n dd['notes'] = notes\n writefile(dd, filename)\n", "id": "3787989", "language": "Python", "matching_score": 3.4505975246429443, "max_stars_count": 0, "path": "dsaT3/labels.py" }, { "content": "import json\nimport os.path\n\n_allowed = ['astrophysical', 'instrumental', 'unsure/noise', 'rfi', 'archive']\n\ndef readfile(filename):\n \"\"\" Read candidate json trigger file and return dict\n TODO: add file lock?\n \"\"\"\n\n with open(filename, 'r') as fp:\n dd = json.load(fp)\n\n return dd\n\n\ndef writefile(dd, filename):\n \"\"\" Write candidate json trigger file with dict\n \"\"\"\n\n with open(filename, 'w') as fp:\n json.dump(dd, fp)\n\n\ndef list_cands_labels(filename):\n \"\"\" read json file and list all candidates and labels.\n TODO: decide if more than one allowed\n \"\"\"\n \n dd = readfile(filename)\n candnames = list(dd.keys())\n for candname in candnames:\n labels = [kk for kk in dd[candname].keys() if kk in _allowed]\n if len(labels):\n labelstr = ', '.join(labels)\n else:\n labelstr = 'no labels'\n print(f'{candname}: {labelstr}')\n\n\ndef set_label(candname, label, filename=None):\n \"\"\" Read, add label, and write candidate json file.\n Can optionally provide full path to file.\n Default assumes name of <candname>.json in cwd.\n TODO: decide if file can have more than one candname.\n \"\"\"\n\n assert label in _allowed, f'label must be in {_allowed}'\n\n if filename is None:\n filename = f'{candname}.json'\n\n assert os.path.exists(filename), f'candidate json file {filename} not found'\n\n dd = readfile(filename)\n\n if candname in dd.keys():\n if label == 'archive':\n dd[candname][label] = True\n else:\n dd[candname]['label'] = label\n writefile(dd, filename)\n else:\n print(f'candname {candname} not found in {filename}. no label applied.')\n \n \n\n", "id": "7039886", "language": "Python", "matching_score": 0.32652023434638977, "max_stars_count": 0, "path": "event/labels.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport ast\nfrom IPython.display import display\nfrom matplotlib.patches import Ellipse\nimport math\n\n# path imports\nfrom astropy.coordinates import SkyCoord\nfrom astropath import path\nfrom astropath import localization\nfrom astropath import chance\nfrom astropath import bayesian\n\ndef get_candidates(frb_loc, r, true_gal=-1, gal_cat=None):\n '''Helper function for single_path, grabs all galaxies withing a certain radius from a central loc'''\n radius = r/3600\n dec = gal_cat[np.logical_and(gal_cat['dec'] < frb_loc[1]+radius, gal_cat['dec'] > frb_loc[1]-radius)]\n candidates = dec[np.logical_and(dec['ra'] < frb_loc[0]+radius, dec['ra'] > frb_loc[0]-radius)]\n return candidates\n\ndef single_path(frb, cand_info, offset_info, search_rad=15, plot=False, gal_cat=None):\n '''\n Runs PATH for a single frb given assumed priors, p_u, and search radius.\n Plotting returns all the candidates colored by their association probability. \n The orange ring is the frb ellipse, green ring means there was a correct association\n if there is an incorrect association the guessed galaxy will be red and the true will be cyan\n \n Parameters:\n frb (arr) containing the values of a single row from the output of sim_frbs\n cand_info (tuple) (unknown probability (float), keyword for Aggarwal cand_priors)\n offset_info (tuple) (maximum size (int), keyword for Aggarwal off_priors (str))\n search_rad (int) radius to search around frb in arcsec\n plot (boolean) True plots\n Returns:\n dataframe of candidates, and thier association probabilities\n Example:\n candidates = single_path(frbs.iloc[22], (0., 'inverse'), (6, 'exp'), search_rad=7, plot=True)\n '''\n Path = path.PATH()\n # init frb\n frb_coord = SkyCoord(frb[0], frb[1], unit='deg')\n eellipse = dict(a=frb[2], b=frb[2], theta=0.)\n Path.init_localization('eellipse', center_coord=frb_coord, eellipse=eellipse)\n \n # init candidates\n candidates = get_candidates((frb_coord.ra.value, frb_coord.dec.value), search_rad, gal_cat=gal_cat)\n Path.init_candidates(candidates.ra.values,\n candidates.dec.values,\n candidates.diskSize.values,\n mag=candidates.Rc.values, \n sfr=candidates.sfr.values)\n\n # init priors\n P_u, cand_pdf = cand_info\n mx, off_pdf = offset_info\n Path.init_cand_prior(cand_pdf, P_U=P_u)\n Path.init_theta_prior(off_pdf, mx)\n # calculating\n Path.calc_priors()\n P_Ox, P_Ux = Path.calc_posteriors('fixed', box_hwidth=30., max_radius=30)\n # adding true galaxy index to results df\n Path.candidates['gal_Index'] = candidates.index\n \n # adding probabilities to candidates for easier plotting\n candidates['pOx'] = Path.candidates['P_Ox'].values\n if plot:\n figure, axes = plt.subplots(figsize=(10, 10))\n display(candidates[candidates['pOx']>.05])\n # plotting galaxies based on probability\n for i in candidates.sort_values('diskSize', ascending=False).values:\n axes.add_patch(plt.Circle((i[0], i[1]), i[3]/3600, facecolor=plt.cm.Blues(i[-1]), alpha=1, edgecolor='k'))\n \n # circle outlines for frbs, true_gal, guessed_gal\n axes.add_patch(plt.Circle((frb[0], frb[1]), frb[2]/3600, fill=False, color='tab:orange', linewidth=2))\n tru = gal_cat[gal_cat.index == frb[-1]].values[0] # getting tru_gal variables\n axes.add_patch(plt.Circle((tru[0], tru[1]), tru[3]/3600, fill=False, edgecolor='tab:cyan', linewidth=2))\n best_index = Path.candidates[Path.candidates.P_Ox == Path.candidates.P_Ox.max()]['gal_Index'].values[0]\n best = gal_cat[gal_cat.index == best_index].values[0] # getting best_gal variables\n if frb[-1]==best_index: \n axes.add_patch(plt.Circle((best[0], best[1]), best[3]/3600, fill=False, edgecolor='tab:green', linewidth=2))\n else:\n axes.add_patch(plt.Circle((best[0], best[1]), best[3]/3600, fill=False, edgecolor='tab:red', linewidth=2))\n # making color map\n colors = candidates.pOx.values\n colors[-1] = 1.\n plt.scatter(candidates.ra.values, candidates.dec.values, c=colors, alpha=1)\n plt.gca().set_aspect('equal', adjustable='box')\n plt.colorbar() \n return Path.candidates\n\ndef multiple_path(frbs, cand_info, offset_info, search_rad=15, save=None, plot=False, gal_cat=None):\n '''\n Runs path for an entire catalog of frbs, saves in csv\n \n Parameters:\n frbs (arr) output of sim_frbs\n cand_info (tuple) (unknown probability (float), keyword for Aggarwal cand_priors)\n offset_info (tuple) (maximum size (int), keyword for Aggarwal off_priors (str))\n search_rad (int) radius to search around frb in arcsec\n save (str) filename which will be appended with the length of the input frb cat\n plot (boolean) True plots\n Returns:\n dataframe of important statistics for analysis\n Example:\n multiple_path(frbs, (0.05, 'inverse'), (6, 'exp'), search_rad=7, save='inverse', gal_cat=galaxies)\n '''\n stats = []\n count = 0\n for i, r in frbs.iterrows():\n results = single_path(r, cand_info, offset_info, search_rad=search_rad, plot=False, gal_cat=gal_cat)\n pox = results.P_Ox.values\n true_gal = r[-1]\n best_gal = results[results.P_Ox == results.P_Ox.max()]['gal_Index'].values[0]\n stats.append([pox[pox > .01], max(pox), best_gal==true_gal, true_gal, len(results)])\n count += 1\n if count%500==0:\n print('{} '.format(count), end='')\n stat = pd.DataFrame(stats, columns=['all_pOx', 'max_pOx', 'correct', 'gal_Index', 'num_cand'])\n if save != None:\n stat.to_csv('./sims/{0}_{1}.csv'.format(save, len(stat)), header=True, index=False)\n return stat\n\ndef import_stats(name):\n '''imports the output of multiple_path. This is not the best way to do this and liklely has errors\n Should work at least until 10000 frbs though'''\n s = pd.read_csv('sims/{}.csv'.format(name))\n new = [ast.literal_eval(i.replace('\\n', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ', ')) for i in s.all_pOx.values]\n #s.drop('all_pOx')\n s['all_pOx'] = new\n return s\n\ndef analyze(stats):\n '''\n Runs analysis to recreate figures 5 and 6 from Aggarwal+2021 also gives percentage secure and percentage correct\n \n Parameters:\n stats (df) output of multiple_frbs or import_stats\n Returns:\n P(O|x) histogram data, max[P(O|x)] histogram data, fig6 data, \n Example:\n multiple_path(frbs, (0.05, 'inverse'), (6, 'exp'), search_rad=7, save='inverse', gal_cat=galaxies), (percentage_secure, TP, P01, per_corr)\n '''\n a = plt.hist(np.concatenate(stats.all_pOx.values), 50, density=True)\n plt.ylabel('PDF')\n plt.xlabel('$P(O_i|x)$')\n plt.figure()\n b = plt.hist(stats.max_pOx, 50, density=True)\n plt.ylabel('PDF')\n plt.xlabel('max[$P(O_i|x)$]')\n\n correct = [(stats.max_pOx.values[i], stats.correct.values[i])for i in range(len(stats))]\n correct.sort()\n n = int(len(correct)/10)\n chunks = [correct[i:i + n] for i in range(0, len(correct), n)]\n \n max_poxs = []\n percentage = []\n mins = []\n maxs = []\n for i in chunks:\n maxes = [j[0] for j in i]\n mn = np.mean(maxes)\n max_poxs.append(mn)\n mins.append(mn-min(maxes))\n maxs.append(max(maxes)-mn)\n tfs = [j[1] for j in i]\n percentage.append(sum(tfs)/len(tfs))\n minmax = [mins, maxs]\n plt.figure(figsize=(6, 6))\n plt.plot([0, 1], [0, 1], 'k--', zorder=1)\n plt.scatter(max_poxs, percentage, zorder=10, s=10)\n plt.errorbar(max_poxs, percentage, xerr=minmax, capsize=3, linestyle='', zorder=10)\n plt.xlabel('max[$P(O_i|x)$]')\n plt.ylabel('Fraction correct');\n\n per_corr = sum([i[1] for i in correct])/len(correct)\n\n secure = np.array(stats.max_pOx.values)\n percentage_secure = len(secure[secure>.95])/len(stats)\n print('f(T+secure): {0:.2f}'.format(percentage_secure))\n tp = [i[1] for i in correct if i[0] > .95]\n try:\n TP = sum(tp)/len(tp)\n print('TP: {0:.2f}'.format(TP))\n except ZeroDivisionError:\n print('TP: N/A')\n zero = [len(i) for i in stats.all_pOx.values] / stats.num_cand.values\n P01 = 1-np.mean(zero)\n print('p<.01: {0:.2f}'.format(P01))\n print('percentage correct: {0:.2f}'.format(per_corr))\n return a, b, (max_poxs, percentage, minmax), (percentage_secure, TP, P01, per_corr)", "id": "8773882", "language": "Python", "matching_score": 4.700526237487793, "max_stars_count": 0, "path": "path_help.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport astropy.cosmology as cosm\nimport scipy.integrate as integrate\n\n# path imports\nfrom astropy.coordinates import SkyCoord\nfrom astropath import path\nfrom astropath import localization\nfrom astropath import chance\nfrom astropath import bayesian\n\n\ndef angle_conversion(inc, tilt):\n '''helper function for DM_calc to convert inclination and tilt of galaxy into spherical angles for line-of-sight'''\n # rotated the point (0, 1, 0) around the x the y axes and then converted the x, y, z position into polar coords\n theta = np.arccos(np.sin(inc)*np.cos(tilt)/ \n np.sqrt((np.sin(inc)*np.sin(tilt))**2+\n np.cos(inc)**2+\n (np.sin(inc)*np.cos(tilt))**2))+np.pi\n phi = np.arctan(np.cos(inc)/(np.sin(inc)*np.sin(tilt)))\n return theta, phi\n\ndef normalize(total, r0, mx, em=False):\n '''helper function for DM_calc function that returns a normalization constant for the electron density based on the emmision measure'''\n z0 = r0/7.3\n def ellipse_int2(z):\n r = np.sqrt(1-z**2/(z0*mx)**2)\n return r0*np.exp(-np.abs(2*z/z0))*(1-np.exp(-2*r/r0))\n result = integrate.quad(ellipse_int2, -z0*mx, z0*mx)[0]\n return total/result\n\ndef sfr_EM(r0_mpc, sfr):\n '''helper function for DM_calc which gives the H_alpha emmision measure based on the SFR tendulkar+2017'''\n a = r0_mpc*3.086e24\n Lha_int = 1.26e41 * sfr # erg s-1\n Sha = Lha_int/((a*3600*360/.673)**2/7.3)\n EM_ha = 2.75*Sha*3/17*10**(18)\n return EM_ha\n\ndef DM_calc(frb_loc, scale_height, mx, inc, tilt, sfr, plot=False):\n ''' takes in the frb location and galaxy information and returns (DM (pc cm-3), path_length, maximum density)\n '''\n # inclination [0, 1.57], tilt [0, 6.28]\n if inc > np.pi/2 or inc < 0:\n raise ValueError('incination must be in range [0, pi/2]')\n if tilt > 2*np.pi or tilt < 0:\n raise ValueError('tilt must be in range [0, 2pi]')\n mx *= 2 # extending the maximum for the ellipse so that we don't have DM = 0 \n x, y, z = frb_loc # initilizing location\n x0, y0, z0 = x, y, z\n theta, phi = angle_conversion(inc, tilt) \n xylim = scale_height*mx # limits \n zlim = xylim / 7.3\n \n EM = sfr_EM(scale_height, sfr)\n norm = np.sqrt(normalize(EM, scale_height*1e6, 40))\n def density(r, z, x, y): # electron density function\n return norm*np.exp(-np.absolute(r/scale_height))*np.exp(-np.absolute(z/scale_height*7.3))\n def ellipse(loc, limit): # equation of ellipse to check limits\n return np.sqrt((loc[0]**2+loc[1]**2)/(limit[0]**2)+loc[2]**2/(limit[1]**2))\n \n integral = [0] # integral initialization\n s = .0001 # stepsize in mpc\n # run until the ellipse of equal density is hit\n while ellipse((x, y, z), (xylim, zlim))<1:\n r = (x**2+y**2)**(1/2)\n integral.append(density(r, z, x, y)) # left hand riemman sum (underestimate)\n # stepping in xyz\n x1 = x + s*np.sin(theta)*np.cos(phi)\n y1 = y + s*np.sin(theta)*np.sin(phi)\n z1 = z + s*np.cos(theta)\n x, y, z = (x1, y1, z1) #resetting the location\n total_dist = math.dist([x0, y0, z0], [x, y, z])*10**6\n return sum(np.array(integral)*s*10**6), total_dist, max(integral)\n\ndef DM_scat(dm, redshift):\n '''helper function for sim_frbs which gives the scattering wrt DM by using the MW DM-tau relation and correcting for geometry and redshift'''\n tau = 3.6e-6*dm**2.2 * (1.+.00194*dm**2)*(1000/327)**(-4.0)\n return tau*3/(1+redshift)**3\n\ndef igm_dm(z, f_igm, sample=False):\n '''helper function for sim_frbs which calculates the dm contribution from the igm based on the redshift and baryonic fraction'''\n cosmo = cosm.Planck15\n Om0 = .315\n Ode0 = .685\n h = .673\n myz = 10.**((np.arange(1000)+0.5)*(np.log10(z)+3.)/1000.-3.)\n mydz = 10.**((np.arange(1000)+1.)*(np.log10(z)+3.)/1000.-3.)-10.**((np.arange(1000)+0.)*(np.log10(z)+3.)/1000.-3.)\n Iz = np.sum(mydz*(1.+myz)**2./np.sqrt(Om0*(1.+myz)**3.+Ode0))\n dm = 935.*f_igm*(h/0.7)**(-1.)*Iz\n # variation around different DMs based on sightline differences\n if sample:\n if z <1.5:\n return np.random.normal(dm, .5*dm) #should be variation during this McQuinn+2014 \n if z>=1.5:\n return np.random.normal(dm, .2*dm)\n else:\n return dm\n \ndef pick_off(frb, mx, pdf=None):\n '''adjusted offset picker, picks based on inclination and tilt for a 3D location within the galaxy, to allow for DM calculations. Then the location is converted into RA and DEC based on D_a. To use PATH offsets set pdf equal to the desired pdf'''\n ra = frb[0]\n dec = frb[1]\n r0 = frb[10]/.673/3 # converting to scale height \n z0 = r0 / 7.3 \n da = frb[4]\n inc = np.arccos(frb[6])\n tilt = 0.001\n #tilt = 2*np.pi*np.random.random()\n \n # picking r and z values based on exponential dist\n r = pd.DataFrame(np.linspace(0., r0*mx, 10000))\n r_weight = [i[0] for i in np.exp(-r.values/r0)]\n z = pd.DataFrame(np.linspace(0., z0*mx, 10000))\n z_weight = [i[0] for i in np.exp(-z.values/z0)]\n zs = z.sample(n=1, weights=z_weight).values[0][0]*np.random.choice([-1, 1])\n rs = r.sample(n=1, weights=r_weight).values[0][0]\n theta = 2*np.pi*np.random.random(1)[0] \n # this does pick within a puck but in with a mx=5 ~.003 were in a ellipse\n \n # getting x, y, z coords\n xshift = rs*np.cos(theta)\n yshift = rs*np.sin(theta)\n # rotating along the xaxis (inc)\n yrot = yshift*np.cos(inc)-zs*np.sin(inc)\n zrot = yshift*np.sin(inc)+zs*np.cos(inc)\n # rotating aling the yaxis (tile)\n xtilt = xshift*np.cos(tilt)+zrot*np.sin(tilt)\n ztilt = zrot*np.cos(tilt)-xshift*np.sin(tilt)\n # using angular diameter distance to get ra, dec\n ra += xtilt/da*180/np.pi\n dec += ztilt/da*180/np.pi\n\n # DM and tau calculations\n dm_host, path_length, max_dens = DM_calc((xshift, yshift, zs), r0, mx, inc, tilt, frb[6])\n dm_mw = 50 + 30 # 50 from halo, 30 from ism see Macquart+2020\n dm_igm = igm_dm(frb[5], .7, sample=True) # assuming f_baryons = .7\n dm = dm_mw + dm_igm + dm_host/(1+frb[5])\n scat_host = DM_scat(dm_host, frb[5])\n scat = scat_host + 7e-6 # MW scattering estimated from Chawla\n \n if pdf != None: # older version\n radii = pd.DataFrame(np.linspace(0., mx, 10000))\n ## only applies same offset priors as aggarwal\n weights = [i[0] for i in bayesian.pw_Oi(radii.values, frb[3], dict(max=mx, PDF=pdf))]\n r = radii.sample(n=1, weights=weights).values[0][0]\n theta = 2*np.pi*np.random.random(1)[0]\n ra += r/3600*np.cos(theta)\n dec += r/3600*np.sin(theta)\n return (ra, dec, r)\n else:\n return (ra, dec, (dm_host, dm_igm, dm), path_length, max_dens, (scat_host, scat))\n \ndef sim_frbs(catalog, n, sigma_frb, cand_info, offset_info):\n '''\n Simulates a number of frbs in a given catalog for true priors\n \n Parameters:\n catalog (df) catalog containing galaxies to sim, needs prior variables\n n (int) number of frbs to sim\n sigma_frb (float) sets error ellipse diameter, optional keyword 'alternate' gives random value [.1, 1]\n cand_info (1 element arr) contains prior keyword (str) possible values shown as functions below\n offset_info (tuple) (maximum size (int), keyword for Aggarwal off_priors (str))\n \n Returns:\n dataframe with parameters of simulated frbs\n \n Example:\n sim_frbs(cat, 10000, 'uniform', ('inverse'), (2., 'uniform')\n '''\n # initializing frb catalog\n frbs = []\n # getting candidates\n cand_prior = cand_info\n # setting the weights given the prior\n if cand_prior == 'inverse':\n weights = 1/chance.driver_sigma(catalog.Rc.values)\n if cand_prior == 'inverse1':\n weights = 1/chance.driver_sigma(catalog.Rc.values)/catalog.diskSize.values\n if cand_prior == 'inverse2':\n weights = 1/chance.driver_sigma(catalog.Rc.values)/catalog.diskSize.values**2\n if cand_prior == 'uniform':\n weights = np.ones(len(catalog))\n if cand_prior == 'sfr':\n weights = catalog.sfr.values\n if cand_prior == 'mass':\n weights = catalog.stellarMass.values\n # applying weights (normalization within)\n frb_cands = catalog.sample(n=n, weights=weights)\n # getting offsets for each candidate\n mx, pdf = offset_info\n for i, r in frb_cands.iterrows():\n # gives the random frb ellipse diameter or the global one\n if sigma_frb=='alternate':\n sigma = .1+.9*np.random.random()\n else:\n sigma = sigma_frb\n if pdf == None:\n ra, dec, dm, path_length, max_dens, scat = pick_off(r, mx)\n frbs.append([ra, dec, sigma, dm[0], dm[1], dm[2], path_length, max_dens, scat[0], scat[1], int(i)])\n else:\n ra, dec, r = pick_off(r, mx, pdf)\n frbs.append([ra, dec, sigma, 'na', 'na', 'na', r, int(i)])\n # loading data into frb catalog\n return pd.DataFrame(frbs, columns=['ra', 'dec', 'radius', 'dm_host', 'dm_igm', 'dm', 'path_length', 'n_max', 'tau_host', 'tau', 'gal_Index'])\n\n", "id": "5495985", "language": "Python", "matching_score": 0.32554709911346436, "max_stars_count": 0, "path": "sim_frbs.py" }, { "content": "\"\"\"This module holds all objects shared by all other modules in tftpy.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nLOG_LEVEL = logging.NOTSET\nMIN_BLKSIZE = 8\nDEF_BLKSIZE = 512\nMAX_BLKSIZE = 65536\nSOCK_TIMEOUT = 5\nMAX_DUPS = 20\nTIMEOUT_RETRIES = 5\nDEF_TFTP_PORT = 69\n\n# A hook for deliberately introducing delay in testing.\nDELAY_BLOCK = 0\n\n# Initialize the logger.\nlogging.basicConfig()\n\n# The logger used by this library. Feel free to clobber it with your own, if\n# you like, as long as it conforms to Python's logging.\nlog = logging.getLogger('tftpy')\n\ndef create_streamhandler():\n \"\"\"add create_streamhandler output logging.DEBUG msg to stdout.\n \"\"\"\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(levelname)-8s %(message)s')\n console.setFormatter(formatter)\n return console\n\ndef create_rotatingfilehandler(path, maxbytes=10*1024*1024, count=20):\n \"\"\"\n add create_rotatingfilehandler record the logging.DEBUG msg to logfile. you can change the maxsize (10*1024*1024)\n and amount of the logfiles\n \"\"\"\n Rthandler = RotatingFileHandler(path, maxbytes, count)\n Rthandler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n Rthandler.setFormatter(formatter)\n return Rthandler\n\ndef addHandler(hdlr):\n \"\"\"add handler methods\n More details see the page:\n https://docs.python.org/2/library/logging.handlers.html#module-logging.handlers\n \"\"\"\n log.addHandler(hdlr)\n\ndef tftpassert(condition, msg):\n \"\"\"This function is a simple utility that will check the condition\n passed for a false state. If it finds one, it throws a TftpException\n with the message passed. This just makes the code throughout cleaner\n by refactoring.\"\"\"\n if not condition:\n raise TftpException(msg)\n\ndef setLogLevel(level):\n \"\"\"This function is a utility function for setting the internal log level.\n The log level defaults to logging.NOTSET, so unwanted output to stdout is\n not created.\"\"\"\n log.setLevel(level)\n\nclass TftpErrors(object):\n \"\"\"This class is a convenience for defining the common tftp error codes,\n and making them more readable in the code.\"\"\"\n NotDefined = 0\n FileNotFound = 1\n AccessViolation = 2\n DiskFull = 3\n IllegalTftpOp = 4\n UnknownTID = 5\n FileAlreadyExists = 6\n NoSuchUser = 7\n FailedNegotiation = 8\n\nclass TftpException(Exception):\n \"\"\"This class is the parent class of all exceptions regarding the handling\n of the TFTP protocol.\"\"\"\n pass\n\nclass TftpTimeout(TftpException):\n \"\"\"This class represents a timeout error waiting for a response from the\n other end.\"\"\"\n pass\n", "id": "4651867", "language": "Python", "matching_score": 2.758456230163574, "max_stars_count": 1, "path": "tftpy/TftpShared.py" }, { "content": "\"\"\"\nThis library implements the tftp protocol, based on rfc 1350.\nhttp://www.faqs.org/rfcs/rfc1350.html\nAt the moment it implements only a client class, but will include a server,\nwith support for variable block sizes.\n\nAs a client of tftpy, this is the only module that you should need to import\ndirectly. The TftpClient and TftpServer classes can be reached through it.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport sys\n\n# Make sure that this is at least Python 2.3\nrequired_version = (2, 3)\nif sys.version_info < required_version:\n raise ImportError(\"Requires at least Python 2.3\")\n\nfrom .TftpShared import *\nfrom .TftpPacketTypes import *\nfrom .TftpPacketFactory import *\nfrom .TftpClient import *\nfrom .TftpServer import *\nfrom .TftpContexts import *\nfrom .TftpStates import *\n\n", "id": "12191719", "language": "Python", "matching_score": 0.24625907838344574, "max_stars_count": 1, "path": "tftpy/__init__.py" }, { "content": "import json\nfrom os import environ, path\nimport datetime\ntry:\n from datacite import DataCiteRESTClient\nexcept ImportError:\n print('datacite not found. cannot create DOIs')\ntry:\n from caltechdata_api import caltechdata_edit, caltechdata_write\nexcept ImportError:\n print('caltechdata_api not found.')\n\ntry:\n dcp = environ['DATACITEPWD']\n token = environ['TINDTOK']\nexcept KeyError:\n dcp = None\n token = None\n print('Secrets not found. Cannot use datacite API')\n\n_install_dir = path.abspath(path.dirname(__file__))\n\n\ndef send_ctd(dictin={}, doi=None, internalname=None, filenames=[], production=False, schema='43'):\n \"\"\" Create entry at Caltech Data.\n dictin overloads fields in template dictionary.\n filenames is (optional) list of strings with full path to file for upload.\n Upload takes time, so can be left blank and loaded alter via \"edit_ctd\" function.\n \"\"\"\n\n metadata = set_metadata(dictin, internalname=internalname, doi=doi, schema=schema)\n\n # write metadata\n res = caltechdata_write(metadata, token, filenames=filenames, production=production, schema=schema)\n Id = res.rstrip('. ').split('/')[-1]\n\n return Id\n\n\ndef edit_ctd(Id, metadata={}, filenames=[], production=False):\n \"\"\" Edit an entry at Caltech Data.\n metadata can be emtpy dict ({}) and filenames a list of files associate with existing Id.\n filenames is list of strings with full path to file for upload.\n \"\"\"\n \n # upload supporting data\n caltechdata_edit(token, Id, metadata=metadata, filenames=filenames, production=production)\n\n\ndef set_metadata(dictin=None, internalname=None, doi=None, schema='43'):\n \"\"\" Create dict with metadata for caltechdata/datacite.\n dictin overloads fields in template fields.\n internalname is optional name for event.\n doi can be provided, but if not, one will be generated.\n schema can be '43' or '42' and defines template json file.\n \"\"\"\n\n # template metadata\n with open(path.join(_install_dir, f'data/example{schema}.json'), 'r') as fp:\n metadata = json.load(fp)\n\n # overload metadata\n if dictin is not None:\n for key, value in dictin.items():\n if key in metadata:\n print(f'Overloading metadata key {key} with {value}')\n metadata[key] = value\n\n # modify basic metadata\n if internalname is not None:\n metadata['alternateIdentifiers'] = [{'alternateIdentifier': internalname,\n 'alternateIdentifierType': '?'}]\n dt = datetime.datetime.now()\n metadata['publicationYear'] = f'{dt.year:04}'\n metadata['dates'] = [{'date': f'{dt.year:04}-{dt.month:02}-{dt.day:02}', 'dateType': 'Created'}] # dateType can also be \"Updated\"\n # metadata['formats'] = ['FITS', 'filterbank', 'png'] # or just one zip?\n\n # can provide doi or have one generated by datacite\n if doi is None:\n doi = get_doi(metadata) # TODO: check whether it is returned or just added to metadata dict\n metadata['identifiers'] = [{'identifier': doi, 'identifierType': 'DOI'}]\n\n return metadata\n\n\ndef get_doi(metadata, production=False):\n \"\"\" Use datacite to get DOI for metadata\n \"\"\"\n\n # development\n dcprefix_dev = '10.22013'\n dcurl_dev = 'http://doi.test.datacite.org'\n # production\n dcprefix_prod = '10.26800'\n dcurl_prod = 'http://doi.datacite.org'\n\n if production:\n url = dcurl_prod\n prefix = dcprefix_prod\n else:\n url = dcurl_dev\n prefix = dcprefix_dev\n\n d = DataCiteRESTClient(username='CALTECH.OVRO', password=dcp, prefix=prefix, test_mode=(not production))\n return d.public_doi(metadata, url)\n", "id": "2087954", "language": "Python", "matching_score": 1.2628533840179443, "max_stars_count": 0, "path": "event/caltechdata.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on December 2020\n\nDeveloped and tested on:\n\n- Linux 18.04 LTS\n- Windows 10\n- Python 3.7 (Spyder)\n\n@author: <NAME>\n\"\"\"\n\nimport os\nimport requests\nimport json\nfrom collections import OrderedDict\nimport time \nimport sys\nfrom os import environ\n\n###########################################################################################\n####################################### PARAMETERS ########################################\n\nTNSproduction=\"www.wis-tns.org\"\nTNSsandbox=\"sandbox.wis-tns.org\"\n\ntry:\n YOUR_BOT_ID=environ['TNSGROUPID']\n YOUR_BOT_NAME=environ['TNSNAME']\n api_key=environ['TNSKEY']\nexcept KeyError:\n YOUR_BOT_ID=None\n YOUR_BOT_NAME=None\n api_key=None\n print('Secrets not found. TNS APIs will not work.')\n\nlist_of_filenames=\"Here put your list of filenames for uploading.\"\nreport_filename=\"Here put your report filename.\"\n# report type can only be \"tsv\" or \"json\"\nreport_type=\"Here put the type of your report.\"\nid_report=\"Here put your report ID for getting report's reply.\"\n\n# current working directory\ncwd=os.getcwd()\n# folder containing files for uploading\nupload_folder=cwd #os.path.join(cwd,'files_for_uploading')\n# folder containing tsv reports for sending\ntsv_reports_folder=cwd\n# folder containing json reports for sending\njson_reports_folder=cwd\n\n# http errors\nhttp_errors={ \n304: 'Error 304: Not Modified: There was no new data to return.',\n400: 'Error 400: Bad Request: The request was invalid. '\\\n 'An accompanying error message will explain why.',\n403: 'Error 403: Forbidden: The request is understood, but it has '\\\n 'been refused. An accompanying error message will explain why.',\n404: 'Error 404: Not Found: The URI requested is invalid or the '\\\n 'resource requested, such as a category, does not exists.',\n500: 'Error 500: Internal Server Error: Something is broken.',\n503: 'Error 503: Service Unavailable.'\n} \n\n# how many second to sleep\nSLEEP_SEC=1\n# max number of time to check response\nLOOP_COUNTER=60\n# keeping sys.stdout\nold_stdout=sys.stdout\n\n###########################################################################################\n###########################################################################################\n\n\n###########################################################################################\n######################################## FUNCTIONS ########################################\n\n# function for changing data to json format\ndef format_to_json(source):\n # change data to json format and return\n parsed=json.loads(source,object_pairs_hook=OrderedDict)\n result=json.dumps(parsed,indent=4)\n return result\n\n# function for uploading files trough api\ndef upload_files(url,list_of_files):\n try:\n # url for uploading files\n upload_url=url+'/file-upload' \n # headers\n headers={'User-Agent':'tns_marker{\"tns_id\":'+str(YOUR_BOT_ID)+', \"type\":\"bot\",'\\\n ' \"name\":\"'+YOUR_BOT_NAME+'\"}'}\n # api key data\n api_data={'api_key':api_key}\n # construct a dictionary of files and their data\n files_data={}\n for i in range(len(list_of_files)):\n file_name=list_of_files[i]\n file_path=os.path.join(upload_folder,file_name)\n key='files['+str(i)+']' \n if file_name.lower().endswith(('.asci', '.ascii')):\n value=(file_name, open(file_path), 'text/plain')\n else:\n value=(file_name, open(file_path,'rb'), 'application/fits')\n files_data[key]=value\n # upload all files using request module\n response=requests.post(upload_url, headers=headers, data=api_data, files=files_data)\n # return response\n return response\n except Exception as e:\n return [None,'Error message : \\n'+str(e)]\n\n# function for sending tsv reports (AT or Classification)\ndef send_tsv_report(url,tsv_report):\n try:\n # url for sending tsv reports\n tsv_url=url+'/csv-report'\n\n # headers\n headers={'User-Agent':'tns_marker{\"tns_id\":'+str(YOUR_BOT_ID)+', \"type\":\"bot\",'\\\n ' \"name\":\"'+YOUR_BOT_NAME+'\"}'}\n # api key data \n api_data={'api_key':api_key}\n # tsv report file path\n tsv_file_path=os.path.join(tsv_reports_folder,tsv_report)\n # read tsv data from file\n tsv_read=(tsv_report, open(tsv_file_path,'rb'))\n # construct a dictionary of tsv data\n tsv_data={'csv':tsv_read}\n # send tsv report using request module\n response=requests.post(tsv_url, headers=headers, data=api_data, files=tsv_data)\n # return response\n return response\n except Exception as e:\n return [None,'Error message : \\n'+str(e)]\n\n# function for sending json reports (AT or Classification)\ndef send_json_report(url, json_report):\n try:\n # url for sending json reports\n json_url=url+'/bulk-report'\n # headers\n headers={'User-Agent':'tns_marker{\"tns_id\":'+str(YOUR_BOT_ID)+', \"type\":\"bot\",'\\\n ' \"name\":\"'+YOUR_BOT_NAME+'\"}'} \n # json report file path\n json_file_path=os.path.join(json_reports_folder, json_report)\n assert os.path.exists(json_file_path)\n # read json data from file\n json_read=format_to_json(open(json_file_path).read())\n # construct a dictionary of api key data and json data\n json_data={'api_key':api_key, 'data':json_read}\n # send json report using request module\n response=requests.post(json_url, headers=headers, data=json_data)\n # return response\n return response\n except Exception as e:\n return [None,'Error message : \\n'+str(e)]\n\n# function for getting reply from report\ndef reply(url, report_id):\n try:\n # url for getting report reply\n reply_url=url+'/bulk-report-reply'\n # headers\n headers={'User-Agent':'tns_marker{\"tns_id\":'+str(YOUR_BOT_ID)+', \"type\":\"bot\",'\\\n ' \"name\":\"'+YOUR_BOT_NAME+'\"}'}\n # construct a dictionary of api key data and report id\n reply_data={'api_key':api_key, 'report_id':report_id}\n # send report ID using request module\n response=requests.post(reply_url, headers=headers, data=reply_data)\n # return response\n return response\n except Exception as e:\n return [None,'Error message : \\n'+str(e)]\n\n# function that checks response and\n# returns True if everything went OK\n# or returns False if something went wrong\ndef check_response(response):\n # if response exists\n if None not in response:\n # take status code of that response\n status_code=int(response.status_code)\n if status_code==200:\n # response as json data\n json_data=response.json()\n # id code\n id_code=str(json_data['id_code'])\n # id message\n id_message=str(json_data['id_message'])\n # print id code and id message\n print (\"ID code = \"+id_code)\n print (\"ID message = \"+id_message)\n # check if id code is 200 and id message OK\n if (id_code==\"200\" and id_message==\"OK\"):\n return True\n #special case\n elif (id_code==\"400\" and id_message==\"Bad request\"):\n return None\n else:\n return False\n else:\n # if status code is not 200, check if it exists in\n # http errors\n if status_code in list(http_errors.keys()):\n print (list(http_errors.values())\n [list(http_errors.keys()).index(status_code)])\n else:\n print (\"Undocumented error.\")\n return False\n else:\n # response doesn't exists, print error\n print (response[1])\n return False\n\n# find all occurrences of a specified key in json data\n# and return all values for that key\ndef find_keys(key, json_data):\n if isinstance(json_data, list):\n for i in json_data:\n for x in find_keys(key, i):\n yield x\n elif isinstance(json_data, dict):\n if key in json_data:\n yield json_data[key]\n for j in list(json_data.values()):\n for x in find_keys(key, j):\n yield x\n\n# print feedback\ndef print_feedback(json_feedback):\n # find all message id-s in feedback\n message_id=list(find_keys('message_id',json_feedback))\n # find all messages in feedback\n message=list(find_keys('message',json_feedback))\n # find all obj names in feedback\n objname=list(find_keys('objname',json_feedback))\n # find all new obj types in feedback\n new_object_type=list(find_keys('new_object_type',json_feedback))\n # find all new obj names in feedback\n new_object_name=list(find_keys('new_object_name',json_feedback))\n # find all new redshifts in feedback\n new_redshift=list(find_keys('new_redshift',json_feedback))\n # index counters for objname, new_object_type, new_object_name\n # and new_redshift lists\n n_o=0\n n_not=0\n n_non=0\n n_nr=0\n # messages to print\n msg=[]\n # go trough every message and print\n for j in range(len(message)):\n m=str(message[j])\n m_id=str(message_id[j])\n if m_id not in ['102','103','110']:\n if m.endswith('.')==False:\n m=m+'.'\n if m_id=='100' or m_id=='101':\n m=\"Message = \"+m+\" Object name = \"+str(objname[n_o])\n n_o=n_o+1\n elif m_id=='120': \n m=\"Message = \"+m+\" New object type = \"+str(new_object_type[n_not])\n n_not=n_not+1\n elif m_id=='121':\n m=\"Message = \"+m+\" New object name = \"+str(new_object_name[n_non])\n n_non=n_non+1\n elif m_id=='122' or m_id=='123':\n m=\"Message = \"+m+\" New redshift = \"+str(new_redshift[n_nr])\n n_nr=n_nr+1\n else:\n m=\"Message = \"+m\n msg.append([\"Message ID = \"+m_id,m])\n # return messages \n return msg\n\n# sending report id to get reply of the report\n# and printing that reply\ndef print_reply(url,report_id):\n # sending reply using report id and checking response\n print (\"Sending reply for the report id \"+report_id+\" ...\")\n reply_res=reply(url, report_id)\n reply_res_check=check_response(reply_res)\n # if reply is sent\n if reply_res_check==True:\n print (\"The report was successfully processed on the TNS.\\n\")\n # reply response as json data\n json_data=reply_res.json()\n # feedback of the response\n feedback=list(find_keys('feedback',json_data))\n # check if feedback is dict or list\n if type(feedback[0])==type([]):\n feedback=feedback[0]\n # go trough feedback\n for i in range(len(feedback)):\n # feedback as json data\n json_f=feedback[i]\n # feedback keys\n feedback_keys=list(json_f.keys())\n # messages for printing\n msg=[]\n # go trough feedback keys\n for j in range(len(feedback_keys)):\n key=feedback_keys[j]\n json_feed=json_f[key]\n msg=msg+print_feedback(json_feed)\n if msg!=[]:\n print (\"-----------------------------------\"\\\n \"-----------------------------------\" )\n for k in range(len(msg)):\n print (msg[k][0])\n print (msg[k][1])\n print (\"-----------------------------------\"\\\n \"-----------------------------------\\n\") \n else:\n if (reply_res_check!=None):\n print (\"The report doesn't exist on the TNS.\")\n else:\n print (\"The report was not processed on the TNS \"\\\n \"because of the bad request(s).\")\n\n# Disable print\ndef blockPrint():\n sys.stdout = open(os.devnull, 'w')\n\n# Restore print\ndef enablePrint():\n sys.stdout.close()\n sys.stdout = old_stdout\n\n# sending json report (at or class) and printing reply\ndef send_report(report, production=False):\n # sending report and checking response\n if production:\n url=\"https://\"+TNSproduction+\"/api\"\n else:\n url=\"https://\"+TNSsandbox+\"/api\"\n\n print (f\"Sending {report} to TNS server {url}...\")\n\n response = send_json_report(url, report)\n response_check = check_response(response)\n\n # if report is sent\n if response_check == True:\n print (\"The report was sent to the TNS.\")\n # report response as json data\n json_data = response.json()\n # taking report id\n report_id = str(json_data['data']['report_id'])\n print (\"Report ID = \"+report_id)\n print (\"\")\n # sending report id to get reply of the report\n # and printing that reply\n # waiting for report to arrive before sending reply\n # for report id\n blockPrint()\n counter = 0\n while True:\n time.sleep(SLEEP_SEC)\n reply_response = reply(url, report_id)\n reply_res_check = check_response(reply_response)\n if reply_res_check != False or counter >= LOOP_COUNTER:\n break\n counter += 1\n enablePrint()\n print_reply(url, report_id)\n return report_id\n else:\n print (\"The report was not sent to the TNS.\")\n return None\n\n\ndef get_reply(Id, production=False):\n \"\"\" Wrap reply function for same syntax as send_report\n \"\"\"\n\n if production:\n url=\"https://\"+TNSproduction+\"/api\"\n else:\n url=\"https://\"+TNSsandbox+\"/api\"\n\n resp = reply(url, Id)\n\n return resp\n\n\n# uploading files and printing reply\ndef upload(url, list_of_files):\n # upload files and checking response\n print (\"Uploading files on the TNS...\")\n response=upload_files(url,list_of_files)\n response_check=check_response(response)\n # if files are uploaded\n if response_check==True:\n print (\"The following files are uploaded on the TNS : \")\n # response as json data\n json_data=response.json()\n # list of uploaded files\n uploaded_files=json_data['data']\n for i in range(len(uploaded_files)):\n print (\"filename : \"+str(uploaded_files[i]))\n else:\n print (\"Files are not uploaded on the TNS.\")\n print (\"\\n\")\n\n###########################################################################################\n###########################################################################################\n\n# Comment/Uncomment sections for testing the various examples:\n\n\"\"\"\n# ---------------------------------------------------\n# upload files\nlist_of_filenames=[\"rel_file_1.png\",\"rel_file_2.jpg\",\n \"spectra_example.asci.txt\",\n \"spectra_example.fits\",\n \"frb_region_file_example.fits\"]\nupload(url_tns_api,list_of_filenames)\n# ---------------------------------------------------\n\"\"\"\n\n\"\"\"\n# ---------------------------------------------------\n# send AT report\nreport_filename=\"json_at_report.txt\"\nsend_report(report_filename, production) # production is bool\n# ---------------------------------------------------\n\"\"\"\n\n\"\"\"\n# ---------------------------------------------------\n# reply\nid_report=\"62086\"\nprint_reply(url_tns_api,id_report)\n#---------------------------------------------------\n\"\"\"\n\n", "id": "4551474", "language": "Python", "matching_score": 1.680834174156189, "max_stars_count": 0, "path": "event/tns_api_bulk_report.py" }, { "content": "import click\nfrom event import tns_api_bulk_report, caltechdata, voevent, labels\n\[email protected]('dsaevent')\ndef cli():\n pass\n\[email protected]()\[email protected]('inname')\[email protected]('outname')\[email protected]('--production', type=bool, default=False)\ndef create_voevent(inname, outname, production):\n \"\"\" takes json file with key-value pairs for create_voevent function.\n Required fields: fluence, p_flux, ra, dec, radecerr, dm, dmerr, width, snr, internalname, mjd, importance\n \"\"\"\n\n indict = labels.readfile(inname)\n ve = voevent.create_voevent(production=production, **indict)\n voevent.create_voevent(ve, outname=outname)\n\n\[email protected]()\[email protected]('inname')\[email protected]('destination')\ndef send_voevent(inname, destination):\n \"\"\" Read VOEvent XML file and send it somewhere\n \"\"\"\n\n pass\n\n\[email protected]()\[email protected]('report_filename')\[email protected]('--production', type=bool, default=False)\ndef tns_send(report_filename, production):\n \"\"\" Send event to TNS to be named.\n report_filename is JSON format file with TNS metadata.\n \"\"\"\n\n result = tns_api_bulk_report.send_report(report_filename, production)\n\n\[email protected]()\[email protected]('report_filename')\[email protected]('--production', type=bool, default=False)\ndef ctd_send(report_filename, production):\n \"\"\" Create entry at Caltech Data for data set\n An entry will be identified by an Id (integer).\n \"\"\"\n\n with open(report_filename, 'r') as fp:\n dd = json.load(fp)\n\n dictin = dd # TODO: extract relevant fields from TNS json for ctd\n\n caltechdata.send_ctd(dictin, production=production)\n\n \[email protected]()\[email protected]('Id')\[email protected]('filenames')\[email protected]('--production', type=bool, default=False)\ndef ctd_upload(Id, filenames, production):\n \"\"\" Edit entry at Caltech Data for data set to upload data given by filenames.\n Id is an integer that defines the Caltech Data entry.\n \"\"\"\n\n caltechdata.edit_ctd(Id, filenames=filenames, production=production)\n", "id": "8291920", "language": "Python", "matching_score": 2.964932441711426, "max_stars_count": 0, "path": "event/cli.py" }, { "content": "import os\nimport datetime\nimport json\nimport pytz\nfrom astropy import time\nfrom xml.dom import minidom\nfrom event import labels\ntry:\n import voeventparse as vp\nexcept ImportError:\n print(\"voeventparse not available. cannot create voevents\")\n\n\ndef create_voevent(triggerfile=None, deployment=False, **kwargs):\n \"\"\" template syntax for voeventparse creation of voevent\n \"\"\"\n\n required = ['internalname', 'mjds', 'dm', 'width', 'snr', 'ra', 'dec', 'radecerr']\n preferred = ['fluence', 'p_flux', 'importance', 'dmerr']\n\n # set values\n dd = kwargs.copy()\n if triggerfile is not None:\n trigger = labels.readfile(triggerfile)\n\n for k, v in trigger.items(): # should be one entry in T2 json trigger file\n dd['internalname'] = k\n for kk, vv in v.items():\n if kk in required + preferred:\n dd[kk] = vv\n\n assert all([k in dd for k in required]), f'Input keys {list(dd.keys())} not complete (requires {required})'\n\n # TODO: set this correctly\n dt = time.Time(dd['mjds'], format='mjd').to_datetime(timezone=pytz.utc)\n\n # create voevent instance\n role = vp.definitions.roles.observation if deployment else vp.definitions.roles.test\n v = vp.Voevent(stream='', # TODO: check\n stream_id=1, role=role)\n\n vp.set_who(v, date=datetime.datetime.utcnow(),\n author_ivorn=\"voevent.dsa-110.caltech.org\") # TODO: check\n\n vp.set_author(v, title=\"DSA-110 Testing Node\",\n contactName=\"<NAME>\", contactEmail=\"<EMAIL>\"\n )\n\n params = []\n dm = vp.Param(name=\"dm\",\n value=str(dd['dm']),\n unit=\"pc/cm^3\",\n ucd=\"phys.dispMeasure;em.radio.750-1500MHz\",\n dataType='float',\n ac=True\n )\n dm.Description = 'Dispersion Measure'\n params.append(dm)\n\n width = vp.Param(name=\"width\",\n value=str(dd['width']),\n unit=\"ms\",\n ucd=\"time.duration;src.var.pulse\",\n dataType='float',\n ac=True\n )\n width.Description = 'Temporal width of burst'\n params.append(width)\n\n snr = vp.Param(name=\"snr\",\n value=str(dd['snr']),\n ucd=\"stat.snr\",\n dataType='float',\n ac=True\n )\n snr.Description = 'Signal to noise ratio'\n params.append(snr)\n \n if 'fluence' in dd:\n fluence = vp.Param(name='fluence',\n value=str(dd['fluence']),\n unit='Jansky ms',\n ucd='em.radio.750-1500MHz', # TODO: check\n dataType='float',\n ac=False)\n fluence.Description = 'Fluence'\n params.append(fluence)\n\n if 'p_flux' in dd:\n p_flux = vp.Param(name='peak_flux',\n value=str(dd['p_flux']),\n unit='Janskys',\n ucd='em.radio.750-1500MHz',\n dataType='float',\n ac=True\n )\n p_flux.Description = 'Peak Flux'\n params.append(p_flux)\n\n if 'dmerr' in dd:\n dmerr = vp.Param(name=\"dm_error\",\n value=str(dd['dmerr']),\n unit=\"pc/cm^3\",\n ucd=\"phys.dispMeasure;em.radio.750-1500MHz\",\n dataType='float',\n ac=True\n )\n dmerr.Description = 'Dispersion Measure error'\n params.append(dmerr)\n\n v.What.append(vp.Group(params=params, name='event parameters'))\n\n vp.add_where_when(v,\n coords=vp.Position2D(ra=str(dd['ra']), dec=str(dd['dec']), err=str(dd['radecerr']),\n units='deg', system=vp.definitions.sky_coord_system.utc_fk5_geo),\n obs_time=dt,\n observatory_location='OVRO')\n\n print(\"\\n***Here is your WhereWhen:***\\n\")\n print(vp.prettystr(v.WhereWhen))\n\n print(\"\\n***And your What:***\\n\")\n print(vp.prettystr(v.What))\n\n vp.add_how(v, descriptions='Discovered with DSA-110',\n references=vp.Reference('http://deepsynoptic.org'))\n\n if 'importance' in dd:\n vp.add_why(v, importance=str(dd['importance']))\n else:\n vp.add_why(v)\n v.Why.Name=str(dd['internalname'])\n\n vp.assert_valid_as_v2_0(v)\n\n return v\n\n\ndef write_voevent(v, outname='new_voevent_example.xml'):\n with open(outname, 'w') as f:\n voxml = vp.dumps(v)\n xmlstr = minidom.parseString(voxml).toprettyxml(indent=\" \")\n f.write(xmlstr)\n print(\"Wrote your voevent to \", os.path.abspath(outname))\n", "id": "8441936", "language": "Python", "matching_score": 0.8551348447799683, "max_stars_count": 0, "path": "event/voevent.py" }, { "content": "__all__ = ['voevent', 'names', 'caltechdata', 'labels']\n\nfrom event import *\n", "id": "647581", "language": "Python", "matching_score": 1.2962197065353394, "max_stars_count": 0, "path": "event/__init__.py" }, { "content": "__all__ = ['caltools']\n", "id": "2122055", "language": "Python", "matching_score": 0.8077926635742188, "max_stars_count": 0, "path": "catalogs/__init__.py" } ]
2.604637
Philippus229
[ { "content": "from urllib.parse import quote\r\nfrom urllib import request\r\nimport requests, random, json, os, subprocess, math\r\nfrom Crypto.Cipher import AES\r\nfrom Crypto.Util.Padding import unpad\r\nfrom bs4 import BeautifulSoup\r\n\r\nusername, password = open(\"credentials.cfg\", \"r\").read().split(\"\\n\") if os.path.isfile(\"credentials.cfg\") else (input(\"Username: \"), input(\"Password: \"))\r\nif not os.path.isfile(\"credentials.cfg\"):\r\n if input(\"Remember me? (y/n): \").lower() == \"y\":\r\n open(\"credentials.cfg\", \"w\").write(f\"{username}\\n{password}\")\r\n\r\nsession = requests.Session()\r\nsession.headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4093.3 Safari/537.36\"}\r\n\r\nservers = [\"https://cr-unblocker.us.to/start_session?version=1.1\",\r\n \"https://api1.cr-unblocker.com/getsession.php?version=1.1\",\r\n \"https://api2.cr-unblocker.com/start_session?version=1.1\"]\r\n\r\ndef localizeToUs():\r\n print(\"Fetching session id...\")\r\n for server in servers:\r\n print(f\"Trying to retrieve session id from {server.split('//')[1].split('/')[0]}...\")\r\n try: res = session.get(server)\r\n except: res = None\r\n if res:\r\n sessionData = res.json()[\"data\"]\r\n if sessionData[\"country_code\"] == \"US\":\r\n print(f\"Got session id, setting cookie {sessionData['session_id']}.\")\r\n session.cookies.set(**{\"name\": \"session_id\",\r\n \"value\": sessionData[\"session_id\"],\r\n \"domain\": f\"crunchyroll.com/videos/anime/alpha?group=all\",})\r\n session.cookies.set(**{\"name\": \"c_locale\",\r\n \"value\": \"enUS\",\r\n \"domain\": f\"crunchyroll.com/videos/anime/alpha?group=all\",})\r\n if not \"header_profile_dropdown\" in session.get(\"https://crunchyroll.com\").text:\r\n data = session.post(f\"https://api.crunchyroll.com/login.0.json?session_id={sessionData['session_id']}&locale=enUS&account={quote(username)}&password={quote(password)}\").json()[\"data\"]\r\n if data != None:\r\n print(f\"User logged in until {data['expires']}\")\r\n else:\r\n print(\"Failed\")\r\n continue\r\n return True\r\n print(\"Failed\")\r\n\r\ndef downloadHLS(url, filepath, sameResForAll):\r\n test3 = session.get(url).text.split(\"\\n\")\r\n availResolutions = [(test3[l].split(\",RESOLUTION=\")[1].split(\",\")[0], test3[l+1]) for l in range(len(test3)) if \"#EXT-X-STREAM-INF\" in test3[l]]\r\n selected = \"\"\r\n if len(availResolutions) > 0:\r\n i = 0\r\n if type(sameResForAll) == str:\r\n i = [r[0] for r in availResolutions].index(sameResForAll)\r\n else:\r\n for r in range(len(availResolutions)):\r\n print(f\"{r}: {availResolutions[r][0]}\")\r\n i = int(input(\"Resolution > \"))\r\n if sameResForAll == None:\r\n sameResForAll = [False, availResolutions[i][0]][input(\"Use same resolution for all? (y/n): \").lower() == \"y\"]\r\n selected = availResolutions[i][1]\r\n print(\"Downloading chunk list...\")\r\n tmpcl = [str(l) for l in request.urlopen(selected).readlines()] if len(availResolutions) > 0 else [f\" {l}\\\\n \" for l in test3]\r\n keyurl = [l.split(\"URI=\\\"\")[1].split(\"\\\"\\\\n\")[0] for l in tmpcl if \"#EXT-X-KEY:METHOD=AES-128\" in l][0]\r\n key = request.urlopen(keyurl).read()\r\n print(key)\r\n chunklist = [tmpcl[l+1].replace(\"\\\\n\", \"\") for l in range(len(tmpcl)) if \"#EXTINF\" in tmpcl[l]]\r\n print(\"Done, downloading chunks...\")\r\n if not os.path.isdir(\"temp\"):\r\n os.mkdir(\"temp\")\r\n for c in range(len(chunklist)):\r\n request.urlretrieve(chunklist[c][2:][:-1], os.path.join(\"temp\", f\"{c}.ts\"))\r\n print(f\"{c+1} of {len(chunklist)} done...\")\r\n print(\"Done, decoding and combining chunks...\")\r\n with open(filepath, \"wb\") as f0:\r\n tmpdirlen = len(os.listdir(\"temp\"))\r\n for f1 in range(tmpdirlen):\r\n f2 = open(os.path.join(\"temp\", f\"{f1}.ts\"), \"rb\")\r\n f0.write(unpad(AES.new(key, AES.MODE_CBC, iv=f2.read(16)).decrypt(f2.read()), AES.block_size))\r\n f2.close()\r\n os.remove(os.path.join(\"temp\", f\"{f1}.ts\"))\r\n print(f\"{f1+1} of {tmpdirlen} done...\")\r\n f0.close()\r\n os.rmdir(\"temp\")\r\n print(\"Done, converting file to mp4...\")\r\n done = subprocess.Popen(f'ffmpeg.exe -i \"{filepath}\" -c:v libx264 -c:a aac \"{filepath[:-2]}mp4\"', stdout=subprocess.PIPE, shell=True).wait()\r\n os.remove(filepath)\r\n print(\"Done!\")\r\n return sameResForAll\r\n\r\ndef merge_clean(filepath):\r\n for t in range(2):\r\n av = [\"audio\",\"video\"][t]\r\n print(f\"Merging {av} segments...\")\r\n with open(f\"{av}.m4{av[0]}\", \"wb\") as out:\r\n for sfn in os.listdir(f\"{av}_tmp\"):\r\n sfp = os.path.join(f\"{av}_tmp\", sfn)\r\n out.write(open(sfp, \"rb\").read())\r\n os.remove(sfp)\r\n out.close()\r\n print(\"Merging audio and video...\")\r\n if not os.path.isdir(os.path.dirname(filepath)):\r\n os.mkdir(os.path.dirname(filepath))\r\n done = subprocess.Popen(f'ffmpeg -i video.m4v -i audio.m4a -c:v copy -c:a copy \"{filepath}\"', stdout=subprocess.PIPE, shell=True).wait()\r\n os.remove(\"audio.m4a\")\r\n os.remove(\"video.m4v\")\r\n print(\"Done!\")\r\n\r\ndef cut(string, cut0, cut1, rev=0):\r\n return string.split(cut0)[not rev].split(cut1)[rev]\r\n\r\ndef segToDict(seg):\r\n tmp_dict = {s.split('=\"')[0]:int(cut(s,'=\"','\"')) for s in seg.split(\" \") if \"=\" in s}\r\n tmp_dict[\"n\"] = tmp_dict[\"r\"]+1 if \"r\" in tmp_dict else 1\r\n return tmp_dict\r\n\r\ndef downloadDash(url, fp):\r\n add_headers = {\"Accept\": \"*/*\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Accept-Language\": \"en-GB,en-US;q=0.9,en;q=0.8\",\r\n \"Connection\": \"keep-alive\",\r\n \"Host\": cut(url,\"//\",\"/\"),\r\n \"Origin\": \"https://static.crunchyroll.com\",\r\n \"Referer\": \"https://static.crunchyroll.com/\",\r\n \"Sec-Fetch-Dest\": \"empty\",\r\n \"Sec-Fetch-Mode\": \"cors\",\r\n \"Sec-Fetch-Site\": \"cross-site\"}\r\n data = session.get(url, headers=add_headers).text\r\n open(\"test.mpd\", \"w\").write(data)\r\n base_url0 = cut(data,\"<BaseURL>\",\"</BaseURL>\")\r\n for t in range(2):\r\n av = [\"video\",\"audio\"][t]\r\n if not os.path.isdir(f\"{av}_tmp\"):\r\n os.mkdir(av+\"_tmp\")\r\n a_set = [set_split.split(\"</AdaptationSet>\")[0] for set_split in data.split(\"<AdaptationSet\") if f'mimeType=\"{av}/mp4\"' in set_split][0]\r\n seg_tmp = cut(a_set, \"<SegmentTemplate\", \"</SegmentTemplate>\")\r\n init = cut(seg_tmp,'initialization=\"','\"')\r\n media = cut(seg_tmp,'media=\"','\"')\r\n start_num = int(cut(seg_tmp,'startNumber=\"','\"'))\r\n print(\"Quality options not implemented yet, defaulting to highest...\") ###TODO\r\n try: rep_id, base_url = sorted([(cut(r,'id=\"','\"'), cut(r,\"<BaseURL>\",\"</BaseURL>\"), int(cut(r,'bandwidth=\"','\"'))) for r in a_set.split(\"<Representation\")[1:]], key=lambda x: x[-1])[-1][:-1]\r\n except: rep_id, base_url = sorted([(cut(r,'id=\"','\"'), base_url0, int(cut(r,'bandwidth=\"','\"'))) for r in a_set.split(\"<Representation\")[1:]], key=lambda x: x[-1])[-1][:-1]\r\n print(base_url+init.replace(\"$RepresentationID$\", rep_id))\r\n open(os.path.join(f\"{av}_tmp\", f\"{av}0000.m4{av[0]}\"), \"wb\").write(session.get(base_url+init.replace(\"$RepresentationID$\", rep_id)).content)\r\n seg_tl = cut(seg_tmp,\"<SegmentTimeline>\",\"</SegmentTimeline>\")\r\n segs = [segToDict(s) for s in seg_tl.split(\"<S\")[1:]]\r\n sn = 1\r\n num_segs = int(math.fsum([s[\"n\"] for s in segs]))\r\n print(f\"Downloading {av} segments...\")\r\n for si in range(len(segs)):\r\n for i in range(segs[si][\"n\"]):\r\n open(os.path.join(f\"{av}_tmp\", f\"{av}{sn:04}.m4{av[0]}\"), \"wb\").write(session.get(base_url+media.replace(\"$RepresentationID$\",rep_id).replace(\"$Number$\",str(start_num+sn-1))).content)\r\n print(f\"{sn} of {num_segs} done...\")\r\n sn += 1\r\n merge_clean(fp)\r\n\r\nif localizeToUs():\r\n animeString = session.get(\"https://crunchyroll.com/videos/anime/alpha?group=all\").text\r\n soup = BeautifulSoup(animeString, \"html.parser\")\r\n animeList = [(a[\"title\"], a[\"href\"]) for a in soup.find_all(\"a\", {\"class\": \"text-link ellipsis\"})]\r\n while True:\r\n sameLangForAll = None\r\n sameResForAll = None\r\n for a in range(len(animeList)):\r\n print(f\"{a}: {animeList[a][0]}\")\r\n seasonString = session.get(f\"https://crunchyroll.com{animeList[int(input('Anime > '))][1]}\").text\r\n soup = BeautifulSoup(seasonString, \"html.parser\")\r\n seasonList = []\r\n episodeList = []\r\n if soup.find(\"ul\", {\"class\": \"list-of-seasons cf\"}).find(\"li\")[\"class\"] == [\"season\"]:\r\n episodeList = [(e.find(\"img\")[\"alt\"], e[\"href\"]) for e in soup.find(\"li\", {\"class\": \"season\"}).find_all(\"a\")]\r\n else:\r\n seasonList = [(s.find(\"a\")[\"title\"], [(e.find(\"img\")[\"alt\"], e[\"href\"]) for e in s.find_all(\"a\")[1:]]) for s in soup.find_all(\"li\", {\"class\": \"season small-margin-bottom\"})]\r\n if len(seasonList) > 0:\r\n for s in range(len(seasonList)):\r\n print(f\"{s}: {seasonList[s][0]}\")\r\n episodeList = seasonList[int(input(\"Season > \"))][1]\r\n episodesToDownload = []\r\n while True:\r\n print(\"-1: Start Download\")\r\n for e in range(len(episodeList)):\r\n print(f\"{e}: {episodeList[len(episodeList)-e-1][0]}\")\r\n i = int(input(\"Episode > \"))\r\n if i == -1:\r\n break\r\n elif not episodeList[len(episodeList)-i-1] in episodesToDownload:\r\n episodesToDownload.append(episodeList[len(episodeList)-i-1])\r\n file_dest = input(\"Download destination: \")\r\n for e in episodesToDownload:\r\n videodata = json.loads(session.get(f\"https://crunchyroll.com{e[1]}\").text.split(\"vilos.config.media = \")[1].split(\";\\n\")[0])\r\n #with open(\"videodata.json\", \"w\", encoding=\"utf-8\") as f:\r\n # json.dump(videodata, f, ensure_ascii=False, indent=4)\r\n streams = videodata[\"streams\"]\r\n currTitle = f\"Episode {videodata['metadata']['display_episode_number']} - {videodata['metadata']['title']}\"\r\n print(f\"--------{currTitle}--------\")\r\n subtitleList = []\r\n for s in [s0 for s0 in streams if s0[\"format\"] in [\"adaptive_hls\", \"adaptive_dash\"]]:\r\n subtitleList.append((f\"{s['hardsub_lang']} ({s['format'].replace('adaptive_','')})\", s[\"url\"]))\r\n i = 0\r\n if type(sameLangForAll) == str:\r\n i = [sl[0] for sl in subtitleList].index(sameLangForAll)\r\n else:\r\n for sl in range(len(subtitleList)):\r\n print(f\"{sl}: {subtitleList[sl][0]}\")\r\n i = int(input(\"Subtitle Language > \"))\r\n if sameLangForAll == None:\r\n sameLangForAll = [False, subtitleList[i][0]][input(\"Use same subtitle language for all? (y/n): \").lower() == \"y\"]\r\n subdata = subtitleList[i]\r\n if subdata[0].endswith(\"(hls)\"):\r\n sameResForAll = downloadHLS(subdata[1], os.path.join(file_dest, f\"{currTitle}.ts\"), sameResForAll)\r\n else:\r\n downloadDash(subdata[1], os.path.join(file_dest, f\"{currTitle}.mp4\"))\r\nprint(\"Initialization failed!\")\r\n", "id": "4805218", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "CrunchyDL-old.py" }, { "content": "import os, math, json, time, shutil, requests, subprocess\r\nfrom urllib.parse import quote\r\nfrom urllib import request\r\nfrom Crypto.Cipher import AES\r\nfrom Crypto.Util.Padding import unpad\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\n\r\nlocalizeToUS = True\r\nlog_in = False #False recommended for mpeg-dash and even then it's extremely slow (will (hopefully) soon be fixed)\r\ndlmode = True #True=download, False=watch in browser\r\ntextmode = False\r\n\r\nfile_dest = \"test\"#input(\"Output directory: \")\r\nif not os.path.isdir(\"tmp\"): os.mkdir(\"tmp\")\r\n\r\noptions = Options()\r\n#options.add_argument(\"--headless\")\r\noptions.add_experimental_option(\"prefs\", {\r\n \"download.default_directory\": os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"tmp\")).replace(\"/\",\"\\\\\"),\r\n \"download.prompt_for_download\": False,\r\n \"download.directory_upgrade\": True,\r\n \"safebrowsing.enabled\": True\r\n})\r\ndriver = webdriver.Chrome(options=options)\r\n\r\nsession = requests.Session()\r\nsession.headers.update({\"User-Agent\": driver.execute_script(\"return navigator.userAgent\")})\r\n\r\nusername, password = open(\"credentials.cfg\", \"r\").read().split(\"\\n\") if os.path.isfile(\"credentials.cfg\") else (input(\"Username: \"), input(\"Password: \"))\r\nif not os.path.isfile(\"credentials.cfg\"):\r\n if input(\"Remember me? (y/n): \").lower() == \"y\":\r\n open(\"credentials.cfg\", \"w\").write(f\"{username}\\n{password}\")\r\n\r\nservers = [\"https://cr-unblocker.us.to/start_session?version=1.1\"]\r\n\r\ndef login():\r\n if localizeToUS:\r\n for server in servers:\r\n try: sessionData = session.get(server).json()[\"data\"]\r\n except: sessionData = None\r\n if sessionData and sessionData[\"country_code\"] == \"US\":\r\n session.cookies.set(\"session_id\", sessionData[\"session_id\"])\r\n session.cookies.set(\"c_locale\", \"enUS\")\r\n data = session.post(f\"https://api.crunchyroll.com/login.0.json?session_id={sessionData['session_id']}&locale=enUS&account={quote(username)}&password={quote(password)}\").json()[\"data\"]\r\n print(data)\r\n if data:\r\n print(\"Logged in (US)!\")\r\n driver.get(\"https://crunchyroll.com\")\r\n for name, value in session.cookies.items(): #somehow this actually works pretty well\r\n driver.add_cookie({\"name\": name, \"value\": value}) #didn't think setting cookies would be enough\r\n else:\r\n print(\"Login failed!\")\r\n else:\r\n '''driver.get(\"https://crunchyroll.com/login\")\r\n driver.find_element_by_id(\"login_form_name\").send_keys(username)\r\n driver.find_element_by_id(\"login_form_password\").send_keys(<PASSWORD>)\r\n driver.find_element_by_id(\"login_form_password\").submit()'''\r\n driver.get(\"https://www.crunchyroll.com\")\r\n for cookie in driver.get_cookies():\r\n session.cookies.set(cookie[\"name\"], cookie[\"value\"])\r\n data = session.post(f\"https://api.crunchyroll.com/login.0.json?session_id={session.cookies['session_id']}&account={quote(username)}&password={quote(password)}\").json()[\"data\"]\r\n for name, value in session.cookies.items():\r\n driver.add_cookie({\"name\": name, \"value\": value})\r\n print(\"Probably logged in!\")\r\n\r\nif log_in or localizeToUS: login()\r\n\r\ndef cut(string, cut0, cut1, rev=0):\r\n return string.split(cut0)[1-rev].split(cut1)[rev]\r\n\r\ndef segToDict(seg):\r\n tmp_dict = {s.split('=\"')[0]:int(cut(s,'=\"','\"')) for s in seg.split(\" \") if \"=\" in s}\r\n tmp_dict[\"n\"] = tmp_dict[\"r\"]+1 if \"r\" in tmp_dict else 1\r\n return tmp_dict\r\n\r\ndef retrieveURL0(url, mode=\"r\", fp=None): #TODO: find a way to disable the video player in chrome\r\n for f in os.listdir(\"tmp\"): os.remove(os.path.join(\"tmp\", f))\r\n driver.get(url)\r\n while len(os.listdir(\"tmp\")) in [0,2]: time.sleep(0.5) #wait for download to finish\r\n time.sleep(1)\r\n filename = os.listdir(\"tmp\")[-1]\r\n content = open(os.path.join(\"tmp\", filename), mode).read()\r\n if fp: shutil.copyfile(os.path.join(\"tmp\", filename), fp)\r\n return content\r\n\r\ndef retrieveURL1(url, mode=\"r\", fp=None, headers={}):\r\n for cookie in driver.get_cookies():\r\n session.cookies.set(cookie[\"name\"], cookie[\"value\"])\r\n res = session.get(url, headers=headers)\r\n if fp: open(fp, \"wb\").write(res.content)\r\n content = res.text if mode == \"r\" else res.content if mode == \"rb\" else res.text\r\n return content\r\n\r\ndef retrieveURL(url, mode=\"r\", fp=None, thing=log_in):\r\n return retrieveURL0(url, mode, fp) if thing else \\\r\n retrieveURL1(url, mode, fp,\r\n headers={\"Accept\": \"*/*\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Accept-Language\": \"de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7\",\r\n \"Connection\": \"keep-alive\",\r\n \"Host\": cut(url,\"//\",\"/\"),\r\n \"Origin\": \"https://static.crunchyroll.com\",\r\n \"Referer\": \"https://static.crunchyroll.com/vilos-v2/web/vilos/player.html?control=1&advancedsettings=1\",\r\n \"Sec-Fetch-Dest\": \"empty\",\r\n \"Sec-Fetch-Mode\": \"cors\",\r\n \"Sec-Fetch-Site\": \"cross-site\"})\r\n\r\ndef downloadHLS(url, filepath, sameResForAll):\r\n test3 = retrieveURL(url, fp=\"test.m3u8\", thing=True).split(\"\\n\")\r\n availResolutions = [(test3[l].split(\",RESOLUTION=\")[1].split(\",\")[0], test3[l+1]) for l in range(len(test3)) if \"#EXT-X-STREAM-INF\" in test3[l]]\r\n selected = \"\"\r\n if len(availResolutions) > 0:\r\n i = 0\r\n if type(sameResForAll) == str:\r\n i = [r[0] for r in availResolutions].index(sameResForAll)\r\n else:\r\n for r in range(len(availResolutions)):\r\n print(f\"{r}: {availResolutions[r][0]}\")\r\n i = int(input(\"Resolution > \"))\r\n if sameResForAll == None:\r\n sameResForAll = [False, availResolutions[i][0]][input(\"Use same resolution for all? (y/n): \").lower() == \"y\"]\r\n selected = availResolutions[i][1]\r\n print(\"Downloading chunk list...\")\r\n tmpcl = [str(l) for l in request.urlopen(selected).readlines()] if len(availResolutions) > 0 else [f\" {l}\\\\n \" for l in test3]\r\n keyurl = [l.split(\"URI=\\\"\")[1].split(\"\\\"\\\\n\")[0] for l in tmpcl if \"#EXT-X-KEY:METHOD=AES-128\" in l][0]\r\n key = request.urlopen(keyurl).read()\r\n print(key)\r\n chunklist = [tmpcl[l+1].replace(\"\\\\n\", \"\") for l in range(len(tmpcl)) if \"#EXTINF\" in tmpcl[l]]\r\n print(\"Done, downloading chunks...\")\r\n if not os.path.isdir(\"temp\"):\r\n os.mkdir(\"temp\")\r\n for c in range(len(chunklist)):\r\n request.urlretrieve(chunklist[c][2:][:-1], os.path.join(\"temp\", f\"{c}.ts\"))\r\n print(f\"{c+1} of {len(chunklist)} done...\")\r\n print(\"Done, decoding and combining chunks...\")\r\n with open(filepath, \"wb\") as f0:\r\n tmpdirlen = len(os.listdir(\"temp\"))\r\n for f1 in range(tmpdirlen):\r\n f2 = open(os.path.join(\"temp\", f\"{f1}.ts\"), \"rb\")\r\n f0.write(unpad(AES.new(key, AES.MODE_CBC, iv=f2.read(16)).decrypt(f2.read()), AES.block_size))\r\n f2.close()\r\n os.remove(os.path.join(\"temp\", f\"{f1}.ts\"))\r\n print(f\"{f1+1} of {tmpdirlen} done...\")\r\n f0.close()\r\n os.rmdir(\"temp\")\r\n print(\"Done, converting file to mp4...\")\r\n done = subprocess.Popen(f'ffmpeg.exe -i \"{filepath}\" -c:v libx264 -c:a aac \"{filepath[:-2]}mp4\"', stdout=subprocess.PIPE, shell=True).wait()\r\n os.remove(filepath)\r\n print(\"Done!\")\r\n return sameResForAll\r\n\r\ndef merge_clean(filepath):\r\n for t in range(2):\r\n av = [\"audio\",\"video\"][t]\r\n print(f\"Merging {av} segments...\")\r\n with open(f\"{av}.m4{av[0]}\", \"wb\") as out:\r\n for sfn in os.listdir(f\"{av}_tmp\"):\r\n sfp = os.path.join(f\"{av}_tmp\", sfn)\r\n out.write(open(sfp, \"rb\").read())\r\n os.remove(sfp)\r\n out.close()\r\n print(\"Merging audio and video...\")\r\n if not os.path.isdir(os.path.dirname(filepath)):\r\n os.mkdir(os.path.dirname(filepath))\r\n done = subprocess.Popen(f'ffmpeg -i video.m4v -i audio.m4a -c:v copy -c:a copy \"{filepath}\"', stdout=subprocess.PIPE, shell=True).wait()\r\n os.remove(\"audio.m4a\")\r\n os.remove(\"video.m4v\")\r\n print(\"Done!\")\r\n\r\ndef downloadDash(url, fp):\r\n data = retrieveURL(url, fp=\"test.mpd\")\r\n base_url0 = cut(data,\"<BaseURL>\",\"</BaseURL>\") if \"<BaseURL>\" in data \\\r\n else url.split(\"manifest.mpd\")[0].replace(\"&amp;\",\"&\").replace(\"dl.\",\"\") #temporary, probably gonna make a dictionary (or try to figure out a way to read response headers with selenium (maybe switch to selenium-requests?))\r\n for t in range(2):\r\n av = [\"video\",\"audio\"][t]\r\n if not os.path.isdir(f\"{av}_tmp\"):\r\n os.mkdir(av+\"_tmp\")\r\n a_set = [set_split.split(\"</AdaptationSet>\")[0] for set_split in data.split(\"<AdaptationSet\") if f'mimeType=\"{av}/mp4\"' in set_split][0]\r\n seg_tmp = cut(a_set, \"<SegmentTemplate\", \"</SegmentTemplate>\")\r\n init = cut(seg_tmp,'initialization=\"','\"')\r\n media = cut(seg_tmp,'media=\"','\"')\r\n start_num = int(cut(seg_tmp,'startNumber=\"','\"'))\r\n print(\"Quality options not implemented yet, defaulting to highest...\") ###TODO\r\n try: rep_id, base_url = sorted([(cut(r,'id=\"','\"'), cut(r,\"<BaseURL>\",\"</BaseURL>\"), int(cut(r,'bandwidth=\"','\"'))) for r in a_set.split(\"<Representation\")[1:]], key=lambda x: x[-1])[-1][:-1]\r\n except: rep_id, base_url = sorted([(cut(r,'id=\"','\"'), base_url0, int(cut(r,'bandwidth=\"','\"'))) for r in a_set.split(\"<Representation\")[1:]], key=lambda x: x[-1])[-1][:-1]\r\n print(base_url+init.replace(\"$RepresentationID$\", rep_id))\r\n retrieveURL(base_url+init.replace(\"$RepresentationID$\", rep_id),\r\n fp=os.path.join(f\"{av}_tmp\", f\"{av}0000.m4{av[0]}\"))\r\n seg_tl = cut(seg_tmp,\"<SegmentTimeline>\",\"</SegmentTimeline>\")\r\n segs = [segToDict(s) for s in seg_tl.split(\"<S\")[1:]]\r\n sn = 1\r\n num_segs = int(math.fsum([s[\"n\"] for s in segs]))\r\n print(f\"Downloading {av} segments...\")\r\n for si in range(len(segs)):\r\n for i in range(segs[si][\"n\"]):\r\n retrieveURL(base_url+media.replace(\"$RepresentationID$\",rep_id).replace(\"$Number$\",str(start_num+sn-1)),\r\n fp=os.path.join(f\"{av}_tmp\", f\"{av}{sn:04}.m4{av[0]}\"))\r\n print(f\"{sn} of {num_segs} done...\")\r\n sn += 1\r\n merge_clean(fp)\r\n\r\ndef downloadEpisodes(episodes, sameLangForAll, sameResForAll):\r\n for episode in episodes:\r\n if episode != driver.current_url:\r\n driver.get(episode)\r\n videodata = json.loads(driver.page_source.split(\"vilos.config.media = \")[1].split(\";\\n\")[0])\r\n streams = videodata[\"streams\"]\r\n currTitle = f\"Episode {videodata['metadata']['display_episode_number']} - {videodata['metadata']['title']}\"\r\n print(f\"--------{currTitle}--------\")\r\n subtitleList = []\r\n for s in [s0 for s0 in streams if s0[\"format\"] in [\"adaptive_hls\", \"adaptive_dash\"]]:\r\n subtitleList.append((f\"{s['hardsub_lang']} ({s['format'].replace('adaptive_','')})\", s[\"url\"]))\r\n i = 0\r\n if type(sameLangForAll) == str:\r\n i = [sl[0] for sl in subtitleList].index(sameLangForAll)\r\n else:\r\n for sl in range(len(subtitleList)):\r\n print(f\"{sl}: {subtitleList[sl][0]}\")\r\n i = int(input(\"Subtitle Language > \"))\r\n if sameLangForAll == None:\r\n sameLangForAll = subtitleList[i][0] if input(\"Use same subtitle language for all? (y/n): \").lower() == \"y\" else False\r\n subdata = subtitleList[i]\r\n if subdata[0].endswith(\"(hls)\"):\r\n sameResForAll = downloadHLS(subdata[1], os.path.join(file_dest, f\"{currTitle}.ts\"), sameResForAll)\r\n else:\r\n downloadDash(subdata[1], os.path.join(file_dest, f\"{currTitle}.mp4\"))\r\n\r\nrunning = lambda: driver.get_log(\"driver\")[-1][\"message\"] != \"Unable to evaluate script: disconnected: not connected to DevTools\\n\"\r\n\r\nif not os.path.isdir(file_dest):\r\n os.mkdir(file_dest)\r\ndriver.get(\"https://www.crunchyroll.com/videos/anime/alpha?group=all\")\r\nif dlmode and not textmode:\r\n print(\"Navigate to an episode and the download will automatically start\")\r\nwhile running:\r\n if dlmode:\r\n if textmode:\r\n animeList = driver.find_elements_by_xpath(\"//a[@class='text-link ellipsis']\")\r\n for a in range(len(animeList)):\r\n print(f\"{a}: {animeList[a].text}\")\r\n driver.get(animeList[int(input(\"Anime > \"))].get_attribute(\"href\"))\r\n seasonList = driver.find_elements_by_class_name(\"season\")\r\n episodeList = []\r\n if len(seasonList) == 1:\r\n episodeList = seasonList[0].find_elements_by_class_name(\"episode\")\r\n elif len(seasonList) > 1:\r\n for s in range(len(seasonList)):\r\n print(f\"{s}: {seasonList[s].find_element_by_class_name('season-dropdown').get_attribute('title')}\")\r\n episodeList = seasonList[int(input(\"Season > \"))].find_elements_by_class_name(\"episode\")\r\n episodesToDownload = []\r\n while True:\r\n print(\"-1: Start Download\")\r\n for e in range(len(episodeList)):\r\n print(f\"{e}: {episodeList[len(episodeList)-e-1].find_element_by_tag_name('img').get_attribute('alt')}\")\r\n i = int(input(\"Episode > \"))\r\n if i == -1: break\r\n elif not episodeList[len(episodeList)-i-1].get_attribute(\"href\") in episodesToDownload:\r\n episodesToDownload.append(episodeList[len(episodeList)-i-1].get_attribute(\"href\"))\r\n downloadEpisodes(episodesToDownload, None, None)\r\n else:\r\n if \"/episode-\" in driver.current_url and \"vilos.config.media = \" in driver.page_source:\r\n downloadEpisodes([driver.current_url], False, False)\r\n else:\r\n time.sleep(1)\r\n", "id": "9256734", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "CrunchyDL.py" } ]
0
jedybg
[ { "content": "from layout import (initializeWindow, showWindow)\n\ninitializeWindow()\nshowWindow()", "id": "11472359", "language": "Python", "matching_score": 1.1792783737182617, "max_stars_count": 7, "path": "resolveAdvancedImporter.py" }, { "content": "import base64\r\nimport tkinter as tk\r\nfrom PIL import ImageTk, Image\r\nfrom icon import icon\r\n\r\nmainWindow = tk.Tk()\r\n\r\ndef InitializeTkWindow():\r\n mainWindow.title(\"DaVinci Resolve Advanced Importer\")\r\n mainWindow.resizable(False, False)\r\n mainWindow.call('wm', 'iconphoto', mainWindow._w, ImageTk.PhotoImage(data=base64.b64decode(icon)))\r\n", "id": "6616473", "language": "Python", "matching_score": 0.05766807124018669, "max_stars_count": 7, "path": "init.py" }, { "content": "from enum import Enum\r\n\r\nclass ResolveClipTypes(Enum):\r\n Video = \"Video\"\r\n Audio = \"Audio\"\r\n VideoAudio = \"Video + Audio\"\r\n Still = \"Still\"\r\n Multicam = \"Multicam\"\r\n Timeline = \"Timeline\"\r\n Compound = \"Compound\"\r\n Matte = \"Matte\"\r\n RefClip = \"Ref Clip\"\r\n Stereo = \"Stereo\"\r\n VFXConnect = \"VFX Connect\"\r\n Generator = \"Generator\"\r\n Fusion = \"Fusion\"\r\n \r\n def isAnyType(clip, *types):\r\n for type in types:\r\n if(clip.GetClipProperty(\"Type\") == type.value):\r\n return True\r\n \r\n return False\r\n \r\n def getImportedTypes():\r\n return [ResolveClipTypes.Video, ResolveClipTypes.Audio, ResolveClipTypes.VideoAudio, ResolveClipTypes.Still]\r\n \r\n def isImported(clip):\r\n return ResolveClipTypes.isAnyType(clip, *ResolveClipTypes.getImportedTypes())", "id": "971182", "language": "Python", "matching_score": 0.4553464949131012, "max_stars_count": 7, "path": "clipTypes.py" }, { "content": "import sys\r\nimport imp\r\nimport os\r\nimport json\r\nimport config as c\r\n\r\ndef GetResolve():\r\n scriptModule = None\r\n try:\r\n import fusionscript as scriptModule\r\n except ImportError:\r\n resolvePath = c.getResolvePath()\r\n # Look for an auto importer config path\r\n if resolvePath:\r\n try:\r\n scriptModule = imp.load_dynamic(\"fusionscript\", resolvePath)\r\n except ImportError:\r\n print(\"[Resolve Importer] Failed to load resolve at config path: \" + resolvePath)\r\n pass\r\n\r\n if not scriptModule:\r\n # Look for installer based environment variables:\r\n libPath=os.getenv(\"RESOLVE_SCRIPT_LIB\")\r\n if libPath:\r\n try:\r\n scriptModule = imp.load_dynamic(\"fusionscript\", libPath)\r\n except ImportError:\r\n pass\r\n\r\n if not scriptModule:\r\n # Look for default install locations:\r\n ext=\".so\"\r\n if sys.platform.startswith(\"darwin\"):\r\n path = \"/Applications/DaVinci Resolve/DaVinci Resolve.app/Contents/Libraries/Fusion/\"\r\n elif sys.platform.startswith(\"win\") or sys.platform.startswith(\"cygwin\"):\r\n ext = \".dll\"\r\n path = \"C:\\\\Program Files\\\\Blackmagic Design\\\\DaVinci Resolve\\\\\"\r\n elif sys.platform.startswith(\"linux\"):\r\n path = \"/opt/resolve/libs/Fusion/\"\r\n try:\r\n scriptModule = imp.load_dynamic(\"fusionscript\", path + \"fusionscript\" + ext)\r\n except ImportError:\r\n pass\r\n\r\n if scriptModule:\r\n sys.modules[\"DaVinciResolveScript\"] = scriptModule\r\n import DaVinciResolveScript as bmd\r\n else:\r\n raise ImportError(\"Could not locate module dependencies\")\r\n\r\n return bmd.scriptapp(\"Resolve\")\r\n\r\nresolve = GetResolve()\r\nprojectManager = resolve.GetProjectManager()\r\nproject = projectManager.GetCurrentProject()\r\nmediaPool = project.GetMediaPool()\r\nmediaStorage = resolve.GetMediaStorage()", "id": "6743495", "language": "Python", "matching_score": 0.11771082878112793, "max_stars_count": 7, "path": "resolve.py" }, { "content": "import tkinter as tk\r\nfrom tkinter import ttk\r\nimport tkinter.font as tkfont\r\nfrom resolveBinTree import ResolveBinTree\r\n\r\nclass BinSelector(ttk.Combobox):\r\n \r\n def __init__(self, master, selectedBinLabel, selectBinFunction, allowNoSelection = True, noneLabel = \"None\", **kw) -> None:\r\n self.allowNoSelection = allowNoSelection\r\n self.selectBinFunction = selectBinFunction\r\n self.noneLabel = noneLabel\r\n self.binPaths = self.generateBinPaths()\r\n \r\n self.selectedBin, selectedBinLabel = self.findSelectedBinFromPath(selectedBinLabel)\r\n \r\n self.selectedBinLabelVar = tk.StringVar(value = selectedBinLabel)\r\n \r\n self.setSelectedBin(self.selectedBin, selectedBinLabel)\r\n \r\n super().__init__(master, textvariable=self.selectedBinLabelVar, **kw)\r\n \r\n self[\"values\"] = self.binPaths\r\n \r\n self.bind('<FocusIn>', self.onFocusIn)\r\n self.bind('<<ComboboxSelected>>', self.onItemSelected)\r\n self.bind('<ButtonPress>', self.onConfigure)\r\n self[\"state\"] = \"readonly\"\r\n \r\n def getSelectedBin(self):\r\n return self.selectedBin\r\n \r\n def getSelectedBinPath(self):\r\n return self.selectedBinLabel\r\n \r\n def getMasterBin(self):\r\n return ResolveBinTree.get()\r\n \r\n def getDefaultBin(self):\r\n return None if self.allowNoSelection else self.getMasterBin()\r\n \r\n def findSelectedBinFromPath(self, selectedBinPath):\r\n if self.allowNoSelection and selectedBinPath == self.noneLabel:\r\n return None, self.noneLabel\r\n \r\n masterBin = ResolveBinTree.get()\r\n \r\n bin = masterBin.findBinFromPath(selectedBinPath, None if self.allowNoSelection else masterBin)\r\n \r\n if bin == None:\r\n print(f\"[Bin Selector] Failed to find bin from path {selectedBinPath}\")\r\n selectedBinPath = masterBin.getPath() if masterBin else \"\"\r\n \r\n return bin, selectedBinPath\r\n \r\n \r\n def setSelectedBin(self, selectedBin, selectedBinLabel = None):\r\n \r\n self.selectedBin = selectedBin\r\n if selectedBin:\r\n self.selectedBinLabel = selectedBinLabel\r\n \r\n self.selectedBinLabelVar.set(selectedBinLabel)\r\n \r\n def generateBinPaths(self):\r\n labels = ResolveBinTree.get().getBinPathsRecursive()\r\n \r\n if self.allowNoSelection:\r\n labels.insert(0, self.noneLabel)\r\n \r\n return labels\r\n \r\n def onConfigure(self, event):\r\n style = ttk.Style()\r\n\r\n long = max(self.cget('values'), key=len)\r\n\r\n font = tkfont.nametofont(str(self.cget('font')))\r\n width = max(0,font.measure(long.strip() + '0') - self.winfo_width())\r\n\r\n style.configure('TCombobox', postoffset=(0,0,width,0))\r\n \r\n def onFocusIn(self, event):\r\n self[\"values\"] = self.binPaths = self.generateBinPaths()\r\n \r\n def onItemSelected(self, event):\r\n selectedBinPath = self.selectedBinLabelVar.get()\r\n \r\n bin, selectedBinPath = self.findSelectedBinFromPath(selectedBinPath)\r\n \r\n self.setSelectedBin(bin, selectedBinPath)\r\n self.selectBinFunction(event)", "id": "2649818", "language": "Python", "matching_score": 2.723161458969116, "max_stars_count": 7, "path": "binSelector.py" }, { "content": "import re\nimport tkinter as tk\nimport textwrap\nimport webbrowser\nfrom init import (mainWindow, InitializeTkWindow)\nfrom resolve import mediaPool\nimport config as c\nfrom binSelector import BinSelector\nfrom resolveBinTree import ResolveBinTree\nfrom resolveImporter import ResolveImporter\nfrom tkinter import ttk\nfrom tkinter import Grid\nfrom tkinter import messagebox\nfrom tkinter import filedialog\nfrom tkinter.messagebox import showerror, askokcancel, WARNING\n\nmainFrame = ttk.Frame(mainWindow)\nfolderPathFrame = ttk.LabelFrame(mainFrame, text=\"Auto Importer\")\ncontrolFrame = ttk.Frame(mainFrame)\nconfigFrame = ttk.LabelFrame(mainFrame, text=\"Auto Importer Configuration\")\nextraFunctionsFrame = ttk.LabelFrame(mainFrame, text=\"Extra Functions\")\n\n# for during importing\ndisabledControlsDuringImport = []\n\ndef initializeFrames():\n mainFrame.grid(column=0, row=0, padx = 10, pady = 10)\n\n for i in range(10):\n mainFrame.grid_rowconfigure(i, weight=1)\n mainFrame.grid_columnconfigure(i, weight=1)\n\n folderPathFrame.grid(row = 0, sticky=\"nsew\", pady=(0, 10), ipady=3)\n configFrame.grid(row = 1, sticky=\"nsew\", pady=(0, 10), ipady=3)\n extraFunctionsFrame.grid(row = 2, sticky=\"nsew\", pady=(0, 10), ipady=3)\n controlFrame.grid(row = 3, sticky=\"nsew\", pady=(0, 10), ipady=3)\n\n Grid.columnconfigure(controlFrame, 0, weight = 1)\n\ndef initializeFolderPathFrame():\n importFolderLabel = ttk.Label(folderPathFrame, text=\"Select Target Directory:\")\n importFolderLabel.grid(row = 0, columnspan=3, sticky=tk.W)\n\n folderPathEntry = ttk.Entry(folderPathFrame, textvariable = c.folderPath)\n folderPathEntry.grid(row = 1, columnspan=2, ipadx = 172, pady = 10, sticky=tk.EW)\n disabledControlsDuringImport.append(folderPathEntry)\n\n def folderPathEntry_FocusOut(e):\n ResolveImporter.validateImportPath()\n\n folderPathEntry.bind('<FocusOut>', folderPathEntry_FocusOut)\n\n def openFolderButton_Click():\n path = filedialog.askdirectory(\n title='Select Target Directory to auto-import',\n initialdir='/')\n\n if path:\n c.folderPath.set(path)\n\n openFolderButton = ttk.Button(folderPathFrame, text='Select Folder', command=openFolderButton_Click, width=25)\n disabledControlsDuringImport.append(openFolderButton)\n openFolderButton.grid(column = 2, row = 1, columnspan=1, padx=(10, 0))\n\n rootBinLabel = ttk.Label(folderPathFrame, text=\"Select a root bin:\")\n rootBinLabel.grid(row = 2, column=0, columnspan=2, sticky=tk.W)\n\n def selectRootBin(e):\n c.importToBin = rootBinSelector.getSelectedBin()\n print(f\"Initialized Root bin: {c.importToBin}\")\n\n rootBinSelector = BinSelector(folderPathFrame, c.importToBinPath, selectRootBin, False, width=23)\n rootBinSelector.grid(row=2, column=2, columnspan=1, sticky=tk.W, padx=(10, 0))\n disabledControlsDuringImport.append(rootBinSelector)\n\n # Update the selected bin\n selectRootBin(None)\n\ndef initializeConfigFrame():\n currentRow = 0\n\n ignoredFileExtensionsLabel = ttk.Label(configFrame, text=\"Ignored File Extensions:\")\n ignoredFileExtensionsLabel.grid(row=currentRow, column=0, columnspan=1, sticky=tk.EW)\n\n ignoredFileExtensionsEntry = ttk.Entry(configFrame, textvariable = c.ignoredFileExtensions)\n ignoredFileExtensionsEntry.grid(row=currentRow, column=1, columnspan=2, sticky=tk.EW, padx=(20, 0), ipadx=97)\n disabledControlsDuringImport.append(ignoredFileExtensionsEntry)\n\n def ignoredFileExtensionsEntry_FocusOut(e):\n extensionsMatcher = re.compile(r'^[\\w\\-,]+$')\n ignoredExtensionsString = c.ignoredFileExtensions.get().replace('.', '').replace(' ', '')\n if not re.fullmatch(extensionsMatcher, ignoredExtensionsString):\n showerror(title=\"Error\", message=\"Invalid ignored extensions. Please list one or more file extensions separated by commas.\")\n return\n\n c.ignoredFileExtensions.set(ignoredExtensionsString)\n\n\n ignoredFileExtensionsEntry.bind('<FocusOut>', ignoredFileExtensionsEntry_FocusOut)\n\n currentRow += 1\n\n removeFilesLabel = ttk.Label(configFrame, text=\"Automatically remove from Resolve:\")\n removeFilesLabel.grid(row=currentRow, column=0, sticky=tk.EW, pady = (10,0))\n\n def removeExtraFiles_Toggle():\n isOn = removeExtraFilesCheckboxString.get() == \"1\"\n\n c.removeExtraFiles.set(isOn)\n\n if isOn:\n messagebox.showwarning(title=\"Read before starting the Auto Importer\", message=textwrap.dedent(\"\"\"\\\n This will make the Auto Importer remove any unused file that is in Resolve but not in the Target Directory in Resolve.\n\n This is designed to allow you to manage your files only in your filesystem and Resolve to match your changes.\n\n Note: This will never delete files that are already in use.\"\"\"))\n\n removeExtraFilesCheckboxString = tk.StringVar(value=\"1\" if c.removeExtraFiles.get() else \"0\")\n removeExtraFilesCheckbox = ttk.Checkbutton(configFrame, text=\"Remove Extra Files\", command=removeExtraFiles_Toggle,\n variable=removeExtraFilesCheckboxString, onvalue=\"1\", offvalue=\"0\")\n removeExtraFilesCheckbox.grid(row=currentRow, column=1, sticky=tk.W, padx=(20, 0), pady = (10,0))\n disabledControlsDuringImport.append(removeExtraFilesCheckbox)\n\n def removeEmptyBinsArchives_Toggle():\n isOn = removeEmptyBinsCheckboxString.get() == \"1\"\n\n c.removeEmptyBins.set(isOn)\n\n if isOn:\n messagebox.showwarning(title=\"Read before starting the Auto Importer\", message=textwrap.dedent(\"\"\"\\\n This will make the Auto Importer look for empty bins in Resolve and remove them.\n\n This is designed to allow you to manage your files only in your filesystem and Resolve to match your changes.\n\n Note: This will never delete folders with items in them.\n Note: If you enabled the \\\"Remove Extra Files\\\" option — the Auto Importer will delete any folder that has only unused files in it.\"\"\"))\n\n removeEmptyBinsCheckboxString = tk.StringVar(value=\"1\" if c.removeEmptyBins.get() else \"0\")\n removeEmptyBinsCheckbox = ttk.Checkbutton(configFrame, text=\"Delete Empty Bins\", command=removeEmptyBinsArchives_Toggle,\n variable=removeEmptyBinsCheckboxString, onvalue=\"1\", offvalue=\"0\")\n removeEmptyBinsCheckbox.grid(row=currentRow, column=2, sticky=tk.W, padx=(10, 0), pady = (10,0))\n disabledControlsDuringImport.append(removeEmptyBinsCheckbox)\n\n currentRow += 1\n\n keepBinsLabel = ttk.Label(configFrame, text=\"Ignored Bins (never delete them):\")\n keepBinsLabel.grid(row=currentRow, column=0, sticky=tk.EW, pady = (10,0))\n\n currentRow += 1\n\n ignoredBinsEntry = ttk.Entry(configFrame, textvariable = c.ignoredBinsEntry)\n ignoredBinsEntry.grid(row = currentRow, columnspan=2, ipadx = 172, pady = 10, sticky=tk.EW)\n ignoredBinsEntry[\"state\"] = \"readonly\"\n\n def updateIgnoredBins(keepBinLabel):\n c.ignoredBins.clear()\n\n addedKeepBin = \",\" + ignoredBinSelector.getSelectedBin().getPath() if keepBinLabel else \"\"\n binsEntry = c.ignoredBinsEntry.get() + addedKeepBin\n\n binPaths = list(dict.fromkeys(binsEntry.split(\",\")))\n binPaths.reverse()\n\n masterBin = ResolveBinTree.get()\n finalPaths = \"\"\n\n for binPath in binPaths:\n if not binPath:\n continue\n\n bin = masterBin.findBinFromPath(binPath)\n\n if bin:\n c.ignoredBins.insert(0, bin)\n finalPaths = \",\" + bin.getPath() + finalPaths\n\n if len(finalPaths) > 1:\n finalPaths = finalPaths[1:]\n\n c.ignoredBinsEntry.set(finalPaths)\n\n clearLabel = \"Clear\"\n\n def addIgnoredBin(e):\n if ignoredBinSelector.getSelectedBin():\n updateIgnoredBins(ignoredBinSelector.getSelectedBin().getPath())\n else:\n c.ignoredBins.clear()\n c.ignoredBinsEntry.set(\"\")\n\n ignoredBinSelector.set(clearLabel)\n\n ignoredBinSelector = BinSelector(configFrame, clearLabel, addIgnoredBin, True, clearLabel, width=23)\n ignoredBinSelector.grid(row=currentRow, column=2, columnspan=1, sticky=tk.W, padx=(10, 0))\n disabledControlsDuringImport.append(ignoredBinSelector)\n\n updateIgnoredBins(None)\n\n currentRow += 1\n\n unzipArchivesLabel = ttk.Label(configFrame, text=\"Unzip & Delete archives:\")\n unzipArchivesLabel.grid(row=currentRow, column=0, sticky=tk.EW)\n\n def unzipArchives_Toggle():\n isOn = unzipArchivesCheckboxString.get() == \"1\"\n\n c.unzipArchives.set(isOn)\n\n if isOn:\n messagebox.showwarning(title=\"Read before starting the Auto Importer\", message=\"This will make the Auto Importer unzip all archives in the specified directory and then import the files inside. Useful when working with B-Roll websites.\")\n\n unzipArchivesCheckboxString = tk.StringVar(value=\"1\" if c.unzipArchives.get() else \"0\")\n unzipArchivesCheckbox = ttk.Checkbutton(configFrame, text=\"Unzip Archives\", command=unzipArchives_Toggle,\n variable=unzipArchivesCheckboxString, onvalue=\"1\", offvalue=\"0\")\n unzipArchivesCheckbox.grid(row=currentRow, column=1, sticky=tk.W, padx=(20, 0))\n disabledControlsDuringImport.append(unzipArchivesCheckbox)\n\n def deleteUnzippedArchives_Toggle():\n isOn = deleteUnzippedArchivesCheckboxString.get() == \"1\"\n c.deleteUnzippedArchives.set(isOn)\n\n if isOn:\n messagebox.showwarning(title=\"Read before starting the Auto Importer\", message=\"This will make the Auto Importer delete all unzipped archives after extraction from the import folder.\")\n\n deleteUnzippedArchivesCheckboxString = tk.StringVar(value=\"1\" if c.deleteUnzippedArchives.get() else \"0\")\n deleteUnzippedArchivesCheckbox = ttk.Checkbutton(configFrame, text=\"Delete Unzipped Archives\", command=deleteUnzippedArchives_Toggle,\n variable=deleteUnzippedArchivesCheckboxString, onvalue=\"1\", offvalue=\"0\")\n deleteUnzippedArchivesCheckbox.grid(row=currentRow, column=2, sticky=tk.W, padx=(10, 0))\n disabledControlsDuringImport.append(deleteUnzippedArchivesCheckbox)\n\ndef initializeExtraFunctionsFrame():\n currentRow = 0\n\n timelineBinLabel = ttk.Label(extraFunctionsFrame, text = \"Automatically move all timelines to bin:\")\n timelineBinLabel.grid(row = currentRow, columnspan=2, pady = (10, 10), sticky=tk.EW)\n\n def selectTimelinesBin(e):\n c.timelinesBin = timelineBinSelector.getSelectedBin()\n print(f\"Initialized Timelines bin: {c.timelinesBin}\")\n\n timelineBinSelector = BinSelector(extraFunctionsFrame, c.timelinesBinPath, selectTimelinesBin, True, width=23)\n timelineBinSelector.grid(row=currentRow, column=2, columnspan=3, pady = (10, 10), sticky=tk.E)\n disabledControlsDuringImport.append(timelineBinSelector)\n\n selectTimelinesBin(None)\n\n currentRow += 1\n\n compoundClipsBinLabel = ttk.Label(extraFunctionsFrame, text = \"Automatically move all compound clips to bin:\")\n compoundClipsBinLabel.grid(row = currentRow, columnspan=2, pady = (0, 10), sticky=tk.EW)\n\n def selectCompoundClipsBin(e):\n c.compoundClipsBin = compounClipsBinSelector.getSelectedBin()\n print(f\"Initialized Compound clips bin: {c.compoundClipsBin}\")\n\n\n compounClipsBinSelector = BinSelector(extraFunctionsFrame, c.compoundClipsBinPath, selectCompoundClipsBin, True, width=23)\n compounClipsBinSelector.grid(row=currentRow, column=2, columnspan=3, pady = (0, 10), sticky=tk.E)\n disabledControlsDuringImport.append(compounClipsBinSelector)\n\n selectCompoundClipsBin(None)\n\n currentRow += 1\n\n fusionCompsBinLabel = ttk.Label(extraFunctionsFrame, text = \"Automatically move all fusion comps to bin:\")\n fusionCompsBinLabel.grid(row = currentRow, columnspan=2, pady = (0, 10), sticky=tk.EW)\n\n def selectFusionCompsBin(e):\n c.fusionCompsBin = fusionCompsBinSelector.getSelectedBin()\n print(f\"Initialized Fusion comps bin: {c.fusionCompsBin}\")\n\n fusionCompsBinSelector = BinSelector(extraFunctionsFrame, c.fusionCompsBinPath, selectFusionCompsBin, True, width=23)\n fusionCompsBinSelector.grid(row=currentRow, column=2, columnspan=3, pady = (0, 10), sticky=tk.E)\n disabledControlsDuringImport.append(fusionCompsBinSelector)\n\n selectFusionCompsBin(None)\n\n currentRow += 1\n\n manuallyRemoveLabel = ttk.Label(extraFunctionsFrame, text=\"Manually Delete:\")\n manuallyRemoveLabel.grid(row=currentRow, column=0, sticky=tk.EW, padx=(0, 172))\n\n def deleteUnusedFilesButton_Click():\n unusedFiles = ResolveBinTree.get().getUnusedFiles()\n\n filePaths = \"\"\n i = 0\n for unusedFile in unusedFiles:\n if i >= 2:\n filePaths += f\"\\nand {len(unusedFiles) - i} more...\\n\"\n break\n filePaths += f\"{unusedFile.GetClipProperty()['File Path']}\\n\"\n i += 1\n\n confirm = askokcancel(title=\"Confirm Deletion of Unused Files\",\n message=f\"{len(unusedFiles)} unused files will be deleted from both Resolve and your filesystem:\\n\\n{filePaths}\\nThose can easily be found using a Smart Bin.\\n\\nNote: Use the help button to go to a page that explains that and more.\", icon = WARNING)\n\n if confirm:\n print(f\"[{c.importToBin.getName()}] Deleting {len(unusedFiles)} unused files.\")\n c.importToBin.deleteClips(unusedFiles, deleteFiles=True, refresh=True)\n\n removeUnusedFilesButton = ttk.Button(extraFunctionsFrame, text=\"Unused Files\", command=deleteUnusedFilesButton_Click)\n\n removeUnusedFilesButton.grid(row=currentRow, column=1, sticky=tk.W, padx=(20, 0), ipadx=14)\n disabledControlsDuringImport.append(removeUnusedFilesButton)\n\n def removeMissingClipsButton_Click():\n missingClips = ResolveBinTree.get().getMissingClips()\n\n clipPaths = \"\"\n i = 0\n for clip in missingClips:\n if i >= 9:\n clipPaths += f\"\\nand {len(missingClips) - i} more...\\n\"\n break\n clipPaths += f\"{clip.GetClipProperty()['File Path']}\\n\"\n i += 1\n\n confirm = askokcancel(title=\"Confirm Deletion of Missing Clips\",\n message=f\"{len(missingClips)} missing clips will be removed from Resolve:\\n\\n{clipPaths}\\nThose can easily be found using a Smart Bin.\\n\\nNote: Use the help button to go to a page that explains that and more.\", icon = WARNING)\n\n if confirm:\n print(f\"[{c.importToBin.getName()}] Deleting {len(missingClips)} missing clips.\")\n c.importToBin.deleteClips(missingClips, refresh=True)\n\n removeMissingClipsButton = ttk.Button(extraFunctionsFrame, text=\"Missing Clips\", command=removeMissingClipsButton_Click)\n removeMissingClipsButton.grid(row=currentRow, column=2, sticky=tk.W, padx=(20, 0), ipadx=14)\n disabledControlsDuringImport.append(removeMissingClipsButton)\n\n def removeEmptyBinsButton_Click():\n emptyBins = ResolveBinTree.get().getEmptyChildBins([c.importToBin], recursive=True, delete=False)\n\n binPaths = \"\"\n i = 0\n for bin in emptyBins:\n if i >= 9:\n binPaths += f\"\\nand {len(emptyBins) - i} more...\\n\"\n break\n binPaths += f\"{bin.getPath()}\\n\"\n i += 1\n\n confirm = askokcancel(title=\"Confirm Deletion of Empty Bins\",\n message=f\"{len(emptyBins)} empty bins will be removed from Resolve. \\n\\n{binPaths}\\nRemoving empty bins should not have any significant impact on your project.\", icon = WARNING)\n\n if confirm:\n initialBin = mediaPool.GetCurrentFolder()\n ResolveBinTree.get().getEmptyChildBins([c.importToBin], recursive=True, delete=True)\n mediaPool.SetCurrentFolder(initialBin)\n\n removeEmptyBinsButton = ttk.Button(extraFunctionsFrame, text=\"Empty Bins\", command=removeEmptyBinsButton_Click)\n removeEmptyBinsButton.grid(row=currentRow, column=3, sticky=tk.W, padx=(20, 0), ipadx=14)\n disabledControlsDuringImport.append(removeEmptyBinsButton)\n\ndef initializeControlFrame():\n tk.Label(controlFrame).grid(row = 0) # spacer\n tk.Label(controlFrame, textvariable=c.importedMessage).grid(row = 1, sticky=tk.W) # message\n\n progressIndicator = ttk.Progressbar(\n controlFrame,\n orient='horizontal',\n mode='indeterminate',\n length=260,\n )\n progressIndicator.grid(row = 2, column=0, columnspan=2, sticky=tk.W)\n progressIndicator.grid_remove()\n progressIndicator.start()\n\n def helpButton_Click():\n webbrowser.open(c.DOCUMENTATION_URL)\n\n buttonWidth = 28\n\n helpButton = ttk.Button(controlFrame, text=\"Help\", command=helpButton_Click, width = buttonWidth)\n helpButton.grid(row = 2, column=1, padx=(0,15))\n\n startButton = ttk.Button(controlFrame, text='Start Importer', command=ResolveImporter.toggleImport, width=buttonWidth)\n startButton.grid(row = 2, column=2)\n\n def updateStartButtonText(*args):\n if c.importing.get() == True:\n for control in disabledControlsDuringImport:\n control[\"state\"] = \"disabled\"\n text = \"Stop Auto Importer\"\n progressIndicator.grid()\n else:\n for control in disabledControlsDuringImport:\n control[\"state\"] = \"readonly\" if isinstance(control, BinSelector) else \"normal\"\n text = \"Start Auto Importer\"\n progressIndicator.grid_remove()\n\n startButton[\"text\"] = text\n\n c.importing.trace('w', updateStartButtonText)\n\ndef showWindow():\n InitializeTkWindow()\n\n def mainWindow_onClose():\n if c.importing.get() == True:\n messagebox.showerror(\"Quit\", \"Please stop the importer before quitting\")\n elif messagebox.askokcancel(\"Quit\", \"Do you want to quit?\"):\n c.saveCache()\n mainWindow.destroy()\n\n mainWindow.protocol(\"WM_DELETE_WINDOW\", mainWindow_onClose)\n mainWindow.mainloop()\n\ndef initializeWindow():\n initializeFrames()\n initializeFolderPathFrame()\n initializeConfigFrame()\n initializeExtraFunctionsFrame()\n initializeControlFrame()\n", "id": "10001070", "language": "Python", "matching_score": 6.578732013702393, "max_stars_count": 7, "path": "layout.py" }, { "content": "import sys\r\nimport os\r\nimport json\r\nimport tkinter as tk\r\n\r\nDOCUMENTATION_URL = \"https://neverproductive.notion.site/Resolve-Advanced-Importer-50f1a8a6241d4264824602054c499b31\"\r\n\r\nsleepBetweenChecks = 5\r\n\r\nfolderPath = tk.StringVar(value=\"C:\\\\Users\\\\jorda\\\\Downloads\")\r\n\r\nignoredBinsEntry = tk.StringVar(value = \"Master/Video Assets\")\r\nremoveExtraFiles = tk.BooleanVar(value = True)\r\nremoveEmptyBins = tk.BooleanVar(value = True)\r\nignoredFileExtensions = tk.StringVar(value=\"json,ini\")\r\nunzipArchives = tk.BooleanVar(value = True)\r\ndeleteUnzippedArchives = tk.BooleanVar(value = True)\r\n\r\nimportToBinPath = \"Master/Test\"\r\ntimelinesBinPath = \"Master/Timelines\"\r\ncompoundClipsBinPath = \"Master/Compound Clips\"\r\nfusionCompsBinPath = \"Master/Fusion Comps\"\r\n\r\n# Runtime\r\nimporting = tk.BooleanVar(value = False)\r\nimportedMessage = tk.StringVar(value = \"\")\r\nimportToBin = None\r\ntimelinesBin = None\r\ncompoundClipsBin = None\r\nfusionCompsBin = None\r\nignoredBins = []\r\n\r\ndef getConfigPath():\r\n if getattr(sys, 'frozen', False):\r\n application_path = os.path.dirname(sys.executable)\r\n else:\r\n application_path = os.path.dirname(os.path.abspath(__file__))\r\n\r\n return os.path.join(application_path, 'config.json')\r\n\r\ndef loadCache():\r\n global sleepBetweenChecks, folderPath, ignoredBinsEntry, ignoredFileExtensions, removeExtraFiles, removeEmptyBins, unzipArchives, deleteUnzippedArchives, importToBinPath, timelinesBinPath, compoundClipsBinPath, fusionCompsBinPath\r\n with open(getConfigPath(), \"r\") as configFile:\r\n data = json.load(configFile)\r\n sleepBetweenChecks = data[\"sleepBetweenChecks\"]\r\n folderPath.set(data[\"folderPath\"])\r\n ignoredBinsEntry.set(data[\"ignoredBinsEntry\"])\r\n removeExtraFiles.set(data[\"removeExtraFiles\"])\r\n removeEmptyBins.set(data[\"removeEmptyBins\"])\r\n ignoredFileExtensions.set(data[\"ignoredFileExtensions\"])\r\n unzipArchives.set(data[\"unzipArchives\"])\r\n deleteUnzippedArchives.set(data[\"deleteUnzippedArchives\"])\r\n importToBinPath = data[\"importToBinPath\"]\r\n timelinesBinPath = data[\"timelinesBinPath\"]\r\n compoundClipsBinPath = data[\"compoundClipsBinPath\"]\r\n fusionCompsBinPath = data[\"fusionCompsBinPath\"]\r\n\r\ndef saveCache():\r\n data = None\r\n with open(getConfigPath(), \"r\") as configFile:\r\n data = json.load(configFile)\r\n data[\"folderPath\"] = folderPath.get()\r\n data[\"ignoredBinsEntry\"] = ignoredBinsEntry.get()\r\n data[\"removeExtraFiles\"] = removeExtraFiles.get()\r\n data[\"removeEmptyBins\"] = removeEmptyBins.get()\r\n data[\"ignoredFileExtensions\"] = ignoredFileExtensions.get()\r\n data[\"unzipArchives\"] = unzipArchives.get()\r\n data[\"deleteUnzippedArchives\"] = deleteUnzippedArchives.get()\r\n data[\"importToBinPath\"] = importToBin.getPath() if importToBin else \"Master\"\r\n data[\"timelinesBinPath\"] = timelinesBin.getPath() if timelinesBin else \"None\"\r\n data[\"compoundClipsBinPath\"] = compoundClipsBin.getPath() if compoundClipsBin else \"None\"\r\n data[\"fusionCompsBinPath\"] = fusionCompsBin.getPath() if fusionCompsBin else \"None\"\r\n\r\n with open(getConfigPath(), \"w\") as configFile:\r\n json.dump(data, configFile, indent=4)\r\n\r\ndef getResolvePath():\r\n with open(getConfigPath(), \"r\") as file:\r\n data = json.load(file)\r\n resolvePath = data[\"resolvePath\"]\r\n\r\n return resolvePath\r\n\r\nloadCache()\r\n\r\n\r\n", "id": "456393", "language": "Python", "matching_score": 3.849011182785034, "max_stars_count": 7, "path": "config.py" }, { "content": "import os\r\nimport threading\r\nimport config as c\r\nfrom resolve import (mediaPool)\r\nfrom time import sleep\r\nfrom tkinter.messagebox import showerror\r\n\r\nfrom resolveBinTree import ResolveBinTree\r\n\r\nclass ResolveImporter(threading.Thread):\r\n \r\n IMPORTED_MESSAGE_DURATION = 0.7\r\n \r\n importerThread = None\r\n \r\n def __init__(self, directory) -> None:\r\n super().__init__()\r\n \r\n self._stop = threading.Event()\r\n self.directory = directory\r\n \r\n def stop(self):\r\n self._stop.set()\r\n \r\n def stopped(self):\r\n return self._stop.isSet()\r\n \r\n def run(self):\r\n while True:\r\n sleepDuration = c.sleepBetweenChecks - self.IMPORTED_MESSAGE_DURATION\r\n if not self.updateMessage(\"Importing\"): return\r\n sleep(sleepDuration/3)\r\n if not self.updateMessage(\"Importing.\"): return\r\n sleep(sleepDuration/3)\r\n if not self.updateMessage(\"Importing..\"): return\r\n sleep(sleepDuration/3)\r\n if not self.updateMessage(\"Importing...\"): return\r\n \r\n self.importDir()\r\n \r\n if c.timelinesBin or c.compoundClipsBin or c.fusionCompsBin:\r\n master = ResolveBinTree.get()\r\n \r\n if c.timelinesBin:\r\n timelines = master.getTimelines()\r\n \r\n timelinesToMove = []\r\n for timeline in timelines:\r\n if not c.timelinesBin.hasClip(timeline):\r\n timelinesToMove.append(timeline)\r\n \r\n if len(timelinesToMove) > 0:\r\n c.timelinesBin.moveClipsToBin(timelinesToMove)\r\n print(f\"[Resolve Importer] Moved {[t.GetClipProperty('Clip Name') for t in timelinesToMove]} timelines to {c.timelinesBin.getPath()}\")\r\n \r\n if c.compoundClipsBin:\r\n compoundClips = master.getCompoundClips()\r\n \r\n compoundClipsToMove = []\r\n for clip in compoundClips:\r\n if not c.compoundClipsBin.hasClip(clip):\r\n compoundClipsToMove.append(clip)\r\n \r\n if len(compoundClipsToMove) > 0:\r\n c.compoundClipsBin.moveClipsToBin(compoundClipsToMove)\r\n print(f\"[Resolve Importer] Moved {[c.GetClipProperty('Clip Name') for c in compoundClipsToMove]} compound clips to {c.compoundClipsBin.getPath()}\")\r\n \r\n if c.fusionCompsBin:\r\n fusionComps = master.getFusionComps()\r\n \r\n fusionCompsToMove = []\r\n for clip in fusionComps:\r\n if not c.fusionCompsBin.hasClip(clip):\r\n fusionCompsToMove.append(clip)\r\n \r\n if len(fusionCompsToMove) > 0:\r\n c.fusionCompsBin.moveClipsToBin(fusionCompsToMove)\r\n print(f\"[Resolve Importer] Moved {[c.GetClipProperty('Clip Name') for c in fusionComps]} fusion comps to {c.fusionCompsBin.getPath()}\")\r\n \r\n master.refresh()\r\n \r\n if not self.updateMessage(\"Importing... Finished Import\"): return\r\n sleep(self.IMPORTED_MESSAGE_DURATION)\r\n \r\n # returns false if stopped\r\n def updateMessage(self, message):\r\n if self.stopped():\r\n c.importedMessage.set(\"\")\r\n return False\r\n \r\n c.importedMessage.set(message)\r\n return True\r\n \r\n def importDir(self):\r\n print(f\"[Resolve Importer] Importing from {self.directory} to {c.importToBin.getPath()}\")\r\n \r\n c.importToBin.refresh()\r\n \r\n c.importToBin.syncBinWithFolder(self.directory, recursive = True)\r\n \r\n def toggleImport():\r\n if(ResolveImporter.importerThread):\r\n print(f\"[Resolve Importer] Stopping to Import from {c.folderPath.get()} to bin {c.importToBin.getPath()}\")\r\n c.importing.set(False)\r\n ResolveImporter.importerThread.stop()\r\n ResolveImporter.importerThread = None\r\n else:\r\n if not ResolveImporter.validateImportPath():\r\n return\r\n \r\n c.saveCache()\r\n \r\n print(f\"[Resolve Importer] Starting to Import from {c.folderPath.get()} to bin {c.importToBin.getPath()}\")\r\n c.importing.set(True)\r\n c.importedMessage.set(\"Importing\")\r\n ResolveImporter.importerThread = ResolveImporter(c.folderPath.get())\r\n ResolveImporter.importerThread.daemon = True\r\n ResolveImporter.importerThread.start()\r\n \r\n def validateImportPath():\r\n if not os.path.isdir(c.folderPath.get()):\r\n showerror(title=\"Error\", message=\"Invalid import path. Please check your path config and try again.\")\r\n return False\r\n return True", "id": "8242264", "language": "Python", "matching_score": 3.3743155002593994, "max_stars_count": 7, "path": "resolveImporter.py" }, { "content": "import os\r\nimport zipfile\r\nimport config as c\r\nfrom os.path import normpath\r\nfrom pathHelpers import *\r\nfrom resolve import (mediaPool)\r\nfrom clipTypes import ResolveClipTypes\r\n\r\n# Clip Name: c.GetClipProperty('Clip Name') # GetName doesn't work for all types\r\n# Clip Path: c.GetClipProperty('File Path')\r\n\r\nclass ResolveBinTree:\r\n BIN_PATH_SEPARATOR = \"/\"\r\n Instance = None\r\n \r\n def __init__(self, bin, parent = None) -> None:\r\n self.bin = bin\r\n self.parent = parent\r\n self.name = self.bin.GetName()\r\n self.childBins = []\r\n self.notAddedFiles = set()\r\n \r\n # get path\r\n currentBin = self\r\n \r\n path = \"\"\r\n \r\n while currentBin:\r\n path = self.BIN_PATH_SEPARATOR + currentBin.getName() + path\r\n currentBin = currentBin.getParent()\r\n \r\n # remove the first slash\r\n path = path[1:]\r\n \r\n self.path = path\r\n \r\n self.refresh()\r\n \r\n def __str__(self) -> str:\r\n return self.getPath()\r\n \r\n def __repr__(self):\r\n return self.__str__()\r\n \r\n def __eq__(self, other: object) -> bool:\r\n if self is None or other is None:\r\n return False\r\n \r\n if not isinstance(other, ResolveBinTree):\r\n return False\r\n \r\n return self.getPath() == other.getPath() and len(self.getChildBins()) == len(other.getChildBins())\r\n \r\n def __hash__(self) -> int:\r\n return hash(self.getPath()) * len(self.getChildBins())\r\n \r\n def refresh(self):\r\n self.clips = self.bin.GetClipList()\r\n \r\n foundBinTrees = []\r\n noBinTreeFolders = []\r\n \r\n for folder in self.bin.GetSubFolderList():\r\n found = False\r\n for childBin in self.childBins:\r\n if self.isChildBinResolveFolder(childBin, folder):\r\n found = True\r\n foundBinTrees.append(childBin)\r\n break\r\n \r\n if not found:\r\n noBinTreeFolders.append(folder)\r\n \r\n extraBinTrees = [bt for bt in self.childBins if bt not in foundBinTrees]\r\n \r\n for extraBinTree in extraBinTrees:\r\n self.childBins.remove(extraBinTree)\r\n \r\n for folder in noBinTreeFolders:\r\n self.childBins.append(ResolveBinTree(folder, self))\r\n \r\n for childBin in self.childBins:\r\n childBin.refresh()\r\n \r\n def get():\r\n if not ResolveBinTree.Instance:\r\n ResolveBinTree.Instance = ResolveBinTree(mediaPool.GetRootFolder())\r\n \r\n ResolveBinTree.Instance.refresh()\r\n return ResolveBinTree.Instance\r\n \r\n def getName(self):\r\n return self.name\r\n \r\n def getParent(self):\r\n return self.parent\r\n \r\n def getPath(self):\r\n return self.path\r\n \r\n def getBinPathsRecursive(self):\r\n paths = []\r\n path = self.getPath()\r\n paths.append(path)\r\n \r\n for childBin in self.getChildBins():\r\n childBinPaths = childBin.getBinPathsRecursive()\r\n paths.extend(childBinPaths)\r\n \r\n return paths\r\n \r\n def findBinFromPath(self, path, default = None):\r\n if not isinstance(path, list):\r\n if not path:\r\n path = []\r\n else:\r\n path = path.split(self.BIN_PATH_SEPARATOR)\r\n \r\n if len(path) == 0:\r\n print(f\"[{self.getName()}] Error finding bin: path is empty\")\r\n return default\r\n \r\n if path[0] == self.getName():\r\n path.pop(0)\r\n \r\n if len(path) == 0:\r\n return self\r\n \r\n for childBin in self.getChildBins():\r\n pathInChildBin = childBin.findBinFromPath(path, default)\r\n if pathInChildBin != default:\r\n return pathInChildBin\r\n \r\n return default\r\n \r\n def hasClips(self):\r\n return not not self.clips\r\n \r\n def hasClip(self, clip):\r\n hasClip = False\r\n \r\n for childClip in self.getBin().GetClipList():\r\n if childClip.GetMediaId() == clip.GetMediaId():\r\n hasClip = True\r\n break\r\n \r\n return hasClip\r\n \r\n def hasChildBins(self):\r\n return not not self.childBins\r\n \r\n def getBin(self):\r\n return self.bin\r\n \r\n def getClips(self, recursive = False):\r\n return self.clips\r\n \r\n def getChildBins(self, recursive = False):\r\n return self.childBins\r\n \r\n def isEmpty(self):\r\n if self.hasClips():\r\n return False\r\n \r\n if self.hasChildBins():\r\n for bin in self.getChildBins():\r\n if not bin.isEmpty():\r\n return False\r\n \r\n return True\r\n \r\n # Check if child bin matches a resolve folder (because resolve folders don't have an ID :())\r\n def isChildBinResolveFolder(self, childBin, resolveFolder):\r\n if childBin not in self.getChildBins():\r\n return False\r\n \r\n if len(childBin.getChildBins()) != len(resolveFolder.GetSubFolderList()):\r\n return False\r\n \r\n if childBin.getPath() != self.getPath() + self.BIN_PATH_SEPARATOR + resolveFolder.GetName():\r\n return False\r\n \r\n return True\r\n \r\n def isIgnored(self):\r\n ignoredBinPaths = [b.getPath() for b in c.ignoredBins]\r\n \r\n if c.timelinesBin:\r\n ignoredBinPaths.append(c.timelinesBin.getPath())\r\n if c.compoundClipsBin:\r\n ignoredBinPaths.append(c.compoundClipsBin.getPath())\r\n if c.fusionCompsBin:\r\n ignoredBinPaths.append(c.fusionCompsBin.getPath())\r\n \r\n for ignoredBinPath in ignoredBinPaths:\r\n if ignoredBinPath in self.getPath() and len(self.getPath()) >= len(ignoredBinPath):\r\n return True\r\n \r\n return False\r\n \r\n def getTimelines(self, recursive = True, respectIgnore = True):\r\n timelines = []\r\n \r\n timelines.extend(self.getClipsByType(recursive, respectIgnore, ResolveClipTypes.Timeline))\r\n \r\n return timelines\r\n \r\n def getCompoundClips(self, recursive = True, respectIgnore = True):\r\n compoundClips = []\r\n \r\n compoundClips.extend(self.getClipsByType(recursive, respectIgnore, ResolveClipTypes.Compound))\r\n \r\n return compoundClips\r\n \r\n def getFusionComps(self, recursive = True, respectIgnore = True):\r\n comps = []\r\n \r\n comps.extend(self.getClipsByType(recursive, respectIgnore, ResolveClipTypes.Generator, ResolveClipTypes.Fusion))\r\n \r\n return comps\r\n \r\n def getClipsByType(self, recursive = True, respectIgnore = False, *clipTypes):\r\n clips = []\r\n \r\n if not respectIgnore or not self.isIgnored():\r\n for clip in self.getClips():\r\n if ResolveClipTypes.isAnyType(clip, *clipTypes):\r\n clips.append(clip)\r\n \r\n if recursive:\r\n for childBin in self.getChildBins():\r\n clips.extend(childBin.getClipsByType(recursive, respectIgnore, *clipTypes)) \r\n \r\n return clips\r\n \r\n def getMissingClips(self):\r\n files = []\r\n missingFiles = []\r\n \r\n files.extend(self.getClipsByType(True, True, *ResolveClipTypes.getImportedTypes()))\r\n \r\n for file in files:\r\n if not os.path.exists(normpath(file.GetClipProperty()['File Path'])):\r\n missingFiles.append(file)\r\n \r\n return missingFiles\r\n \r\n def getUnusedFiles(self):\r\n files = []\r\n unusedFiles = []\r\n \r\n files.extend(self.getClipsByType(True, True, *ResolveClipTypes.getImportedTypes()))\r\n \r\n for file in files:\r\n if int(file.GetClipProperty()['Usage']) == 0:\r\n unusedFiles.append(file)\r\n \r\n return unusedFiles\r\n \r\n def getEmptyChildBins(self, skipBins = [], recursive = True, delete = False):\r\n emptyBins = []\r\n \r\n for bin in self.getChildBins():\r\n if bin in skipBins or bin.isIgnored():\r\n continue\r\n \r\n if bin.isEmpty():\r\n emptyBins.append(bin)\r\n \r\n if delete:\r\n self.deleteChildBins([bin])\r\n \r\n continue\r\n \r\n elif recursive:\r\n emptyChildBins = bin.getEmptyChildBins(skipBins, recursive, False)\r\n emptyBins.extend(emptyChildBins)\r\n \r\n if delete:\r\n bin.deleteChildBins(emptyChildBins)\r\n \r\n return emptyBins\r\n \r\n def moveClipsToBin(self, clips, refresh = True):\r\n mediaPool.MoveClips(clips, self.getBin())\r\n \r\n if refresh:\r\n self.refresh()\r\n \r\n def deleteClips(self, files, deleteFiles = False, refresh = False):\r\n if not files:\r\n return\r\n \r\n action = \"Deleting\" if deleteFiles else \"Removing\"\r\n \r\n print(f\"[{self.getName()}] {action} clips: \" + str([file.GetClipProperty()['File Path'] for file in files]))\r\n \r\n if deleteFiles:\r\n for file in files:\r\n os.remove(normpath(file.GetClipProperty()['File Path']))\r\n \r\n mediaPool.DeleteClips(files)\r\n \r\n for file in files:\r\n if file in self.clips:\r\n self.clips.remove(file)\r\n \r\n if refresh:\r\n self.refresh()\r\n \r\n def deleteChildBins(self, bins):\r\n if not bins:\r\n return\r\n \r\n print(f\"[{self.getName()}] Deleting bins: \" + str(bins))\r\n \r\n for bin in bins:\r\n mediaPool.DeleteFolders(bin.getBin())\r\n self.childBins.remove(bin)\r\n \r\n def syncBinWithFolder(self, folder, recursive = True):\r\n ignoredFileExtensions = ['.' + x for x in c.ignoredFileExtensions.get().split(',') if x]\r\n importedFiles = []\r\n indexedChildBins = []\r\n \r\n if not folder:\r\n return\r\n \r\n for root, dirs, files in os.walk(folder):\r\n # add missing files\r\n for file in files:\r\n filePath = normpath(os.path.join(root, file))\r\n \r\n # handle archives\r\n if zipfile.is_zipfile(filePath):\r\n # unzip archives\r\n if c.unzipArchives.get():\r\n zipPath = getPathWithoutFileExtension(filePath)\r\n if not os.path.exists(zipPath):\r\n print(f\"[{self.getName()}] Unzipping archive {filePath}\")\r\n with zipfile.ZipFile(filePath,\"r\") as zip:\r\n zip.extractall(zipPath)\r\n dirs.append(getFolderNameFromPath(zipPath))\r\n \r\n if c.deleteUnzippedArchives.get():\r\n print(f\"[{self.getName()}] Deleting unzipped archive {filePath}\")\r\n os.remove(filePath)\r\n \r\n # no need to import zip files\r\n continue\r\n \r\n if filePath in self.notAddedFiles:\r\n continue\r\n \r\n # is the file ignored\r\n if getFileExtensionFromPath(filePath) in ignoredFileExtensions:\r\n print(f\"[{self.getName()}] Skipping ignored file (by extension) {filePath}\")\r\n self.notAddedFiles.add(filePath)\r\n continue\r\n \r\n # is the file already imported\r\n importedFile = next((f for f in self.getClips() if f and normpath(f.GetClipProperty()['File Path']) == filePath), None)\r\n \r\n if not importedFile:\r\n importedFilesList = mediaPool.ImportMedia(filePath)\r\n print(f\"[{self.getName()}] Adding File {filePath}\")\r\n \r\n if not importedFilesList:\r\n if(getFileNameFromPath(file).startswith(\".\")):\r\n print(f\"[{self.getName()}] Skipping file that starts with . {filePath} (Resolve can't import those)\")\r\n else:\r\n print(f\"[{self.getName()}] Failed Adding File {file}\")\r\n self.notAddedFiles.add(filePath)\r\n continue\r\n \r\n importedFile = importedFilesList[0]\r\n \r\n if not self.hasClip(importedFile):\r\n self.moveClipsToBin([importedFile], False)\r\n \r\n self.clips.append(importedFile)\r\n \r\n importedFiles.append(importedFile.GetClipProperty()['File Path'])\r\n \r\n # add missing folders\r\n for dir in dirs:\r\n dirPath = os.path.join(root, dir)\r\n dirName = getFolderNameFromPath(dir)\r\n childBin = next((b for b in self.getChildBins() if b.getName() == dirName), None)\r\n \r\n if not childBin:\r\n newBin = mediaPool.AddSubFolder(self.getBin(), dirName)\r\n \r\n if not newBin:\r\n print(f\"[{self.getName()}] Failed adding bin {dirName}\")\r\n \r\n print(f\"[{self.getName()}] Adding new bin {dirName}\")\r\n childBin = ResolveBinTree(newBin, self)\r\n self.childBins.append(childBin)\r\n \r\n indexedChildBins.append(childBin)\r\n \r\n if recursive:\r\n childBin.syncBinWithFolder(dirPath)\r\n \r\n # we only want to process the current directory; let the bin tree handle the recursion\r\n break\r\n \r\n # bins that are not in the folder \r\n extraBins = list(set(self.getChildBins()) - set(indexedChildBins))\r\n \r\n # iterate over the extra bins for the delete operations\r\n for extraBin in extraBins:\r\n extraBin.syncBinWithFolder(None)\r\n \r\n # remove files from Resolve that don't exist in the folder\r\n if c.removeExtraFiles.get() and not self.isIgnored():\r\n for clip in self.getClips():\r\n if(ResolveClipTypes.isImported(clip) and # is a file type we can import\r\n not clip.GetClipProperty()['File Path'] in importedFiles and # is not already imported\r\n int(clip.GetClipProperty(\"Usage\")) == 0): # is not used\r\n print(f\"[{self.getName()}] Removing unused file {clip.GetClipProperty()['File Path']}\")\r\n self.deleteClips([clip], deleteFiles=False, refresh=False)\r\n \r\n # remove empty bins\r\n if c.removeEmptyBins.get() and not self.isIgnored():\r\n self.getEmptyChildBins(indexedChildBins, recursive=False, delete=True)", "id": "9805378", "language": "Python", "matching_score": 4.885073661804199, "max_stars_count": 7, "path": "resolveBinTree.py" }, { "content": "from pathlib import Path\r\n \r\ndef getFileNameFromPath(path):\r\n return Path(path).resolve().stem\r\n\r\ndef getFolderNameFromPath(path):\r\n return Path(path).resolve().name\r\n\r\ndef getFileExtensionFromPath(path):\r\n return Path(path).resolve().suffix\r\n\r\ndef getPathWithoutFileExtension(path):\r\n return Path(path).with_suffix('')\r\n", "id": "8470005", "language": "Python", "matching_score": 0.8237123489379883, "max_stars_count": 7, "path": "pathHelpers.py" } ]
1.95122
jamesooo
[ { "content": "import OpenSSL, sendgrid\nimport ssl, socket, os, smtplib, time, datetime\nfrom sendgrid.helpers.mail import *\nfrom datetime import timedelta\n\ndef sendMail(domain):\n email = os.environ.get('EMAIL_ADDRESS')\n sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n from_email = Email(email)\n to_email = Email(email)\n subject = \"SSL Cert for \" + domain + \" will expire soon\"\n content = Content(\"text/plain\", \"SSL Cert for \" + domain + \" will expire soon\")\n mail = Mail(from_email, subject, to_email, content)\n response = sg.client.mail.send.post(request_body=mail.get())\n print(response.status_code)\n print(response.body)\n print(response.headers)\n\nnumDays = 30\nnow = time.time()\nwarnAfter = now + 86400*numDays\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nwith open(dir_path + '/domains') as f:\n domains = f.readlines()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\ndomains = [x.strip() for x in domains]\n\nexpired = []\ntimestamp = time.strftime(\"%c\")\nprint (\"Current time %s\" % timestamp )\n\nfor domain in domains:\n print(\"%s - OK\" % domain)\n cert=ssl.get_server_certificate((domain, 443))\n x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)\n notAfter = x509.get_notAfter()\n exp = time.mktime(datetime.datetime.strptime(notAfter, \"%Y%m%d%H%M%SZ\").timetuple())\n\n if(exp < warnAfter):\n sendMail(domain)\n", "id": "3026682", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ssl_expiry_checker.py" } ]
0
HulewiczKamil
[ { "content": "import json\n\nfrom django.db import models\n\n\n# Create your models here.\n\nclass Calendar(models.Model):\n id = models.CharField(max_length=100, primary_key=True)\n etag = models.CharField(max_length=100)\n summary = models.CharField(max_length=1000)\n accessRole = models.CharField(max_length=100)\n timeZone = models.CharField(max_length=100)\n description = models.CharField(max_length=1000)\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)\n", "id": "320080", "language": "Python", "matching_score": 2.9894344806671143, "max_stars_count": 3, "path": "meeting_scheduler/scheduler_api/models.py" }, { "content": "# Generated by Django 3.2 on 2021-05-06 20:40\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Calendar',\n fields=[\n ('id', models.CharField(max_length=100, primary_key=True, serialize=False)),\n ('etag', models.CharField(max_length=100)),\n ('summary', models.CharField(max_length=1000)),\n ('accessRole', models.CharField(max_length=100)),\n ('timeZone', models.CharField(max_length=100)),\n ('description', models.CharField(max_length=1000)),\n ],\n ),\n ]\n", "id": "5158932", "language": "Python", "matching_score": 0.2976056933403015, "max_stars_count": 3, "path": "meeting_scheduler/scheduler_api/migrations/0001_initial.py" }, { "content": "from rest_framework import serializers\n\nclass PostSerializer(serializers.ModelSerializer):\n class Meta:\n pass", "id": "1151929", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "meeting_scheduler/scheduler_api/serializers.py" }, { "content": "class Block:\n def __init__(self, start_time, end_time) -> None:\n self.starts = start_time\n self.ends= end_time\n \n def extend_block_ending(self, new_end_time):\n print(\"extednded block time of ending\")\n self.ends = new_end_time\n print(self)\n\n def extend_block_beginning(self, new_start_time):\n print(\"extednded block time of start\")\n self.starts = new_start_time\n\n def __str__(self) -> str:\n return f\"Meeting block from: {self.starts} to {self.ends}\"", "id": "1748426", "language": "Python", "matching_score": 0.9663184285163879, "max_stars_count": 3, "path": "meeting_scheduler/scheduler_api/Blocks.py" }, { "content": "import datetime\nfrom google_auth_httplib2 import Request\nimport requests\nfrom typing import List, Dict\nfrom copy import deepcopy\nfrom .Blocks import Block\nfrom rest_framework.response import Response\n\n\ndef get_free_blocks(\n request,\n service,\n calendars: List[str],\n beginning_date: str,\n ending_date: str,\n beginning_hours: str,\n beginning_minutes: str,\n ending_hours: int,\n ending_minutes: int,\n duration_hours: int,\n duration_minutes: int,\n ) -> Dict:\n meetings = []\n meetings_blocks = []\n all_events = dict()\n\n\n\n bdate = f\"{beginning_date}T{beginning_hours}:{beginning_minutes}:00.000000Z\"\n edate = f\"{ending_date}T{ending_hours}:{ending_minutes}:00.000000Z\"\n datetime_bdate = datetime.datetime.strptime(bdate[:-8],\"%Y-%m-%dT%H:%M:%S\")\n datetime_edate = datetime.datetime.strptime(edate[:-8],\"%Y-%m-%dT%H:%M:%S\")\n bdate_tmp = (datetime_bdate - datetime.timedelta(hours=7)).strftime(\"%Y-%m-%dT%H:%M:%S\") + \".000000Z\"\n delta_time = datetime.timedelta(hours=duration_hours, minutes=duration_minutes)\n\n for calendar in calendars:\n events_result = service.events().list(calendarId=calendar, \n timeMin=bdate_tmp,\n timeMax=edate,\n singleEvents=True,\n orderBy='startTime').execute()\n all_events[calendar] = events_result.get('items', [])\n\n for e in all_events:\n for listing in all_events[e]:\n meetings.append({\n 'starts':datetime.datetime.strptime(listing[\"start\"][\"dateTime\"][:-6],\"%Y-%m-%dT%H:%M:%S\"),\n 'ends':datetime.datetime.strptime(listing[\"end\"][\"dateTime\"][:-6],\"%Y-%m-%dT%H:%M:%S\")\n }) \n sorted_meetings = sorted(meetings, key=lambda x: x['starts'])\n print(sorted_meetings)\n if len(sorted_meetings) > 0:\n meetings_blocks.append(Block(sorted_meetings[0]['starts'],sorted_meetings[0]['ends']))\n print(\"XD\")\n for i in range(1, len(sorted_meetings)):\n if meetings_blocks[-1].ends >= sorted_meetings[i]['starts']:\n print(f\"X {meetings_blocks} : {sorted_meetings[i]}\\n\")\n if meetings_blocks[-1].ends < sorted_meetings[i]['ends']:\n meetings_blocks[-1].extend_block_ending(sorted_meetings[i]['ends'])\n else:\n pass\n else:\n meetings_blocks.append(Block(sorted_meetings[i]['starts'], sorted_meetings[i]['ends']))\n \n meetings_blocks.append(Block(datetime_edate,datetime_edate))\n print(meetings_blocks)\n meetings_blocks_a = select_viable_blocks(meeting_blocks=meetings_blocks, complete_starting_date=datetime_bdate)\n\n for b in meetings_blocks:\n print(f\"Start:{b.starts}, end: {b.ends}\")\n\n generated_dates = generate_viable_starting_dates_a(meetings_blocks_a, datetime_bdate, datetime_edate, delta_time)\n print(type(generated_dates),\"==============================\")\n\n v_dates=limit_hours(generated_dates,int(beginning_hours),int(beginning_minutes), int(ending_hours), int(ending_minutes),duration_hours, duration_minutes)\n\n return {\n \"calendars\": calendars,\n \"dates\": v_dates,\n \"duration_hours\": duration_hours,\n \"duration_minutes\": duration_minutes,\n }\n\n\ndef select_viable_blocks(meeting_blocks: List, complete_starting_date):\n \n return [block for block in meeting_blocks \n if (block.starts<complete_starting_date \n and block.ends>complete_starting_date) \n or block.starts > complete_starting_date]\n\n\ndef generate_viable_starting_dates(\n meeting_blocks: List,\n complete_starting_date,\n complete_ending_date,\n meeting_duration\n ) -> str:\n\n _ = []\n tmp_date = complete_starting_date\n if(meeting_blocks[0].starts > complete_starting_date \n and meeting_blocks[0].starts - complete_starting_date > meeting_duration):\n while not tmp_date + meeting_duration > meeting_blocks[0].starts:\n _.append((tmp_date).strftime(\"%Y-%m-%dT%H:%M:%S\"))\n tmp_date += datetime.timedelta(minutes=5)\n #_.append((tmp_date + meeting_duration).str)\n if len(meeting_blocks) > 0:\n for i in range(1,len(meeting_blocks)):\n tmp_date = deepcopy(meeting_blocks[i-1].ends)\n while not tmp_date + meeting_duration > meeting_blocks[i].starts:\n _.append((tmp_date).strftime(\"%Y-%m-%dT%H:%M:%S\"))\n tmp_date += datetime.timedelta(minutes=5)\n\n if(meeting_blocks[-1].ends < complete_ending_date\n and complete_ending_date - meeting_blocks[-1].ends > meeting_duration):\n while not tmp_date + meeting_duration > meeting_blocks[0].starts:\n _.append((tmp_date).strftime(\"%Y-%m-%dT%H:%M:%S\"))\n tmp_date += datetime.timedelta(minutes=5)\n\n return _\n\ndef generate_viable_starting_dates_a(\n meeting_blocks: List,\n complete_starting_date,\n complete_ending_date,\n meeting_duration\n ) -> str:\n\n _ = []\n tmp_date = deepcopy(complete_starting_date)\n if(meeting_blocks[0].starts > complete_starting_date \n and meeting_blocks[0].starts - complete_starting_date > meeting_duration):\n while not tmp_date + meeting_duration > meeting_blocks[0].starts:\n _.append((tmp_date).strftime(\"%Y-%m-%dT%H:%M:%S\"))\n tmp_date += datetime.timedelta(minutes=5)\n #_.append((tmp_date + meeting_duration).str)\n if len(meeting_blocks) > 0:\n for i in range(1,len(meeting_blocks)):\n tmp_date = deepcopy(meeting_blocks[i-1].ends)\n while not tmp_date + meeting_duration > meeting_blocks[i].starts:\n _.append((tmp_date).strftime(\"%Y-%m-%dT%H:%M:%S\"))\n tmp_date += datetime.timedelta(minutes=5)\n\n if(meeting_blocks[-1].ends < complete_ending_date\n and complete_ending_date - meeting_blocks[-1].ends > meeting_duration):\n while not tmp_date + meeting_duration > meeting_blocks[0].starts:\n _.append((tmp_date).strftime(\"%Y-%m-%dT%H:%M:%S\"))\n tmp_date += datetime.timedelta(minutes=5)\n print(_)\n return _\n\n\ndef limit_hours(array, s_hr=0, s_min=0, e_hr=0, e_min=0, duration_hours=0, duration_minutes=0):\n print(f\"starting_ hours: {array[0][-8:-6]}\")\n print(f\"starting_ mins: {array[0][-5:-3]}\")\n print(array)\n result =[]\n print(\"Duration hrs: \", duration_hours)\n for elem in array:\n mtng_start_h = int(elem[-8:-6])\n mtng_start_m = round(int(elem[-5:-3])/60, 6)\n mtng_end_m = int(elem[-5:-3]) + int(duration_minutes)\n mtng_end_h = int(duration_hours) + mtng_start_h \n add = 0\n if mtng_end_m >= 60:\n add=1\n mtng_end_m = round((mtng_end_m % 60) /60, 6)\n else:\n add = 0\n mtng_end_m = round((mtng_end_m) /60, 6)\n \n if mtng_end_h + add >= 24:\n continue\n else:\n #print(mtng_start_h, \"+\", mtng_start_m, \"\\n\")\n if ( mtng_start_h + mtng_start_m >= round((int(s_hr)+int(s_min)/60), 6)) and mtng_end_h + mtng_end_m + add<= round((int(e_hr)+int(e_min)/60), 6):\n print(elem, \" \", f\"{mtng_end_h + mtng_end_m} <= {round((int(e_hr)+int(e_min)/60), 6)}\")\n result.append(elem)\n print(result)\n return result\n", "id": "11411247", "language": "Python", "matching_score": 4.07838773727417, "max_stars_count": 3, "path": "meeting_scheduler/scheduler_api/alg.py" }, { "content": "from rest_framework.decorators import parser_classes, api_view, permission_classes\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nimport datetime\nimport os.path\nimport json\nimport requests\nfrom googleapiclient.discovery import build\nfrom google.oauth2.credentials import Credentials\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly, AllowAny, IsAuthenticated\nfrom dj_rest_auth.registration.views import SocialLoginView, SocialConnectView\n\nfrom allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter\n\nfrom .models import Calendar\nfrom .Blocks import Block\nfrom .alg import get_free_blocks \n\ndef get_google_token(access_token, refresh_token, \n client_id=os.environ.get('CLIENT_ID'), \n client_secret=os.environ.get('SECRET_GOOGLE'),\n uri=os.environ.get(\"REDIRECT_URI\")):\n \"\"\"\n Custom function responsible for generating credentials object\n based on the data provided by the user in request.\n \"\"\"\n creds = Credentials(token=access_token,\n refresh_token=refresh_token,\n #token_uri=request.data[\"token_uri\"],\n token_uri=uri,\n client_id=client_id,\n client_secret=client_secret,\n scopes=[\n \"https://www.googleapis.com/auth/calendar.app.created\",\n \"https://www.googleapis.com/auth/calendar.events.freebusy\",\n \"https://www.googleapis.com/auth/calendar.freebusy\",\n \"https://www.googleapis.com/auth/calendar\",\n \"https://www.googleapis.com/auth/calendar.events\",\n \"https://www.googleapis.com/auth/calendar.events.owned\",\n \"https://www.googleapis.com/auth/calendar.calendarlist\",\n \"https://www.googleapis.com/auth/calendar.calendars\",\n \"https://www.googleapis.com/auth/calendar.acls\"])\n return creds\n\ndef get_google_credentials():\n creds = None\n path = 'token.json'\n\n SCOPES = [\n \"https://www.googleapis.com/auth/calendar.app.created\",\n \"https://www.googleapis.com/auth/calendar.events.freebusy\",\n \"https://www.googleapis.com/auth/calendar.freebusy\",\n \"https://www.googleapis.com/auth/calendar\",\n \"https://www.googleapis.com/auth/calendar.events\",\n \"https://www.googleapis.com/auth/calendar.events.owned\",\n \"https://www.googleapis.com/auth/calendar.calendarlist\",\n \"https://www.googleapis.com/auth/calendar.calendars\",\n \"https://www.googleapis.com/auth/calendar.acls\"\n ]\n\n if os.path.exists(path):\n creds = Credentials.from_authorized_user_file(path, SCOPES)\n else:\n print('No creds available')\n\n return creds\n\n@api_view(['POST'])\n#@api_view(['GET'])\n@parser_classes([JSONParser])\ndef get_calendars_list(request, format=None):\n \"\"\"\n :param request:{\n access_token : string -> google API access token\n refresh_token : string -> google API refresh token\n }\n :param format:\n :return: calendars list in json\n \"\"\"\n service = build('calendar', 'v3', credentials=get_google_token(request.data[\"access_token\"], request.data[\"refresh_token\"]))\n\n #service = build('calendar', 'v3', credentials=tmp_creds())\n\n calendar_list = service.calendarList().list().execute()\n\n calendars = []\n # # example how to get calendars objects\n # for item in calendar_list['items']: calendar = Calendar(id=item['id'],\n # etag=item['etag'], summary=item['summary'], accessRole=item['accessRole'], timeZone=item['timeZone'])\n # calendars.append(calendar.toJSON())\n print(calendar_list.get('items', []))\n\n return Response(calendar_list.get('items', []))\n\n@api_view(['GET'])\n#@api_view(['POST'])\n@parser_classes([JSONParser])\ndef get_events_from_calendar(request, calendar, format=None):\n \"\"\"\n return events in calendar from given time interval\n :param request:{\n time_min : datetime\n time_max : datetime\n }\n :param calendar: calendar id\n :param format:\n :return:\n \"\"\"\n service = build('calendar', 'v3', credentials=get_google_credentials())\n\n today = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n time_min = request.GET.get('time_min', today)\n\n today_plus_ten = (datetime.datetime.utcnow() + datetime.timedelta(days=10)).isoformat() + 'Z' # 'Z' indicates UTC time\n time_max = request.GET.get('time_max', today_plus_ten)\n\n events_result = service.events().list(calendarId=calendar, timeMin=time_min, timeMax=time_max, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if not events:\n return Response('No results', status=204)\n\n return Response(events)\n\n\n@api_view(['POST'])\n@parser_classes([JSONParser])\ndef insert_event_to_calendar(request, calendar, format=None):\n \"\"\"\n Insert Event to calendar\n :param request: {\n summary: string\n location: string\n description: string\n start: datetime\n end: datetime\n attendees: list\n }\n :param calendar: calendar id\n :param format:\n :return:\n \"\"\"\n service = build('calendar', 'v3', credentials=get_google_credentials())\n\n event = {\n 'summary': request.GET.get('summary', ''),\n 'location': request.GET.get('location', ''),\n 'description': request.GET.get('description', ''),\n 'start': request.GET.get('start', ''),\n 'end': request.GET.get('end', ''),\n 'attendees': request.GET.get('attendees', ''),\n 'reminders': {\n 'useDefault': True,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n # event = {\n # 'summary': 'Google I/O 2015',\n # 'location': '800 Howard St., San Francisco, CA 94103',\n # 'description': 'A chance to hear more about Google\\'s developer products.',\n # 'start': {\n # 'dateTime': '2021-05-01T09:00:00-07:00',\n # 'timeZone': 'America/Los_Angeles',\n # },\n # 'end': {\n # 'dateTime': '2021-05-01T17:00:00-07:00',\n # 'timeZone': 'America/Los_Angeles',\n # },\n # 'recurrence': [\n # 'RRULE:FREQ=DAILY;COUNT=2'\n # ],\n # 'attendees': [\n # {'email': '<EMAIL>'},\n # {'email': '<EMAIL>'},\n # ],\n # 'reminders': {\n # 'useDefault': False,\n # 'overrides': [\n # {'method': 'email', 'minutes': 24 * 60},\n # {'method': 'popup', 'minutes': 10},\n # ],\n # },\n # }\n\n event = service.events().insert(calendarId=calendar, body=event).execute()\n\n return Response({'Event created:': event.get('htmlLink')})\n\n\n@api_view(['GET', 'POST'])\n@parser_classes([JSONParser])\ndef is_free_or_busy(request, format=None):\n \"\"\"\n check if user is busy at given time\n :param request:{\n time_min : datetime\n time_max : datetime\n items : list\n }\n :param format:\n :return:\n \"\"\"\n service = build('calendar', 'v3', credentials=get_google_credentials())\n\n today = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n time_min = request.GET.get('time_min', today)\n\n today_plus_ten = (datetime.datetime.utcnow() + datetime.timedelta(\n days=10)).isoformat() + 'Z' # 'Z' indicates UTC time\n time_max = request.GET.get('time_max', today_plus_ten)\n\n # List of calendars and/or groups to query.\n items = request.GET.get('items', [{\"id\": 'primary'}])\n\n body = {\n \"timeMin\": time_min,\n \"timeMax\": time_max,\n \"items\": items\n }\n\n eventsResult = service.freebusy().query(body=body).execute()\n cal_dict = eventsResult[u'calendars']\n\n return Response(cal_dict)\n\n@api_view(['GET', 'POST'])\n# @permission_classes([IsAuthenticated])\n@parser_classes([JSONParser])\ndef google_data(request, format=None):\n \"\"\"\n Provide necessary data to generate google API access and refresh token\n :param request: {\n code : string -> Code generated by Google Oauth \n }\n :param format:\n :return:\n \"\"\"\n\n if request.method == \"GET\":\n content ={\n 'status':'request was permitted'\n }\n return Response(content)\n elif request.method == \"POST\":\n #print (f'=========================== received data: {request.data[\"x\"]}')\n #print(f'token: {request.data[\"token\"]}')\n print(os.environ.get('CLIENT_ID'))\n print(os.environ.get('SECRET_GOOGLE'))\n print(os.environ.get('REDIRECT_URI'))\n data = {\n \"code\": request.data['code'],\n \"client_id\": os.environ.get('CLIENT_ID'),\n \"client_secret\": os.environ.get('SECRET_GOOGLE'),\n \"redirect_uri\": os.environ.get('REDIRECT_URI'),\n \"grant_type\": \"authorization_code\"\n }\n\n #print(data)\n req = requests.post(\"https://oauth2.googleapis.com/token\", data=data)\n #print(req)\n json_data = json.loads(req.text)\n\n return Response(data=json_data, status=200)\n\n# @permission_classes([IsAuthenticated])\n@api_view([\"POST\"])\n@parser_classes([JSONParser]) \ndef simple_algorithm(request):\n \"\"\"\n Provide necessary data to generate google API access and refresh token\n :param request: {\n \"access_token\": string,\n \"refresh_token\": string,\n \"calendars\": list[str],\n \"beginning_date\": \"2021-06-15 15:50:52.236664Z\",\n \"ending_date\": \"2021-06-16 15:50:52.236664Z\",\n \"beggining_hours\": 12,\n \"beggining_minutes\": 30,\n \"ending_hours\": 16,\n \"ending_minutes\": 00,\n \"meeting_duration_hours\": 2,\n \"meeting_duration_minutes\": 30,\n \"meeting_name\": \"meeting_name\"\n }\n :param format:\n :return:\n \"\"\"\n service = build('calendar', 'v3', credentials=get_google_token(request.data[\"access_token\"], request.data[\"refresh_token\"]))\n meetings_date = []\n meetings_blocks = []\n all_members_events = dict()\n \n today = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n time_min = request.GET.get('time_min', today)\n\n today_plus_ten = (datetime.datetime.utcnow() + datetime.timedelta(days=1)).isoformat() + 'Z' # 'Z' indicates UTC time\n time_max = request.GET.get('time_max', today_plus_ten)\n \n for calendar in request.data[\"calendars\"]:\n events_result = service.events().list(calendarId=calendar, \n timeMin=time_min,\n timeMax=time_max, \n singleEvents=True,\n orderBy='startTime').execute()\n all_members_events[calendar] = events_result.get('items', [])\n #if not events:\n # return Response('No results', status=204)\n for e in all_members_events:\n for listing in all_members_events[e]:\n meetings_date.append(\n {\n 'starts':datetime.datetime.strptime(listing[\"start\"][\"dateTime\"][:-6],\"%Y-%m-%dT%H:%M:%S\"),\n 'ends':datetime.datetime.strptime(listing[\"end\"][\"dateTime\"][:-6],\"%Y-%m-%dT%H:%M:%S\")\n }\n ) \n sorted_meetings = sorted(\n meetings_date,\n key=lambda x: x['starts']\n )\n meetings_blocks.append(Block(sorted_meetings[0]['starts'],sorted_meetings[0]['ends']))\n\n for i in range(1, len(sorted_meetings)):\n found_overlaping = False\n for block in meetings_blocks:\n if sorted_meetings[i]['starts'] <= block.ends:\n found_overlaping = True\n if sorted_meetings[i]['ends'] >= block.ends:\n block.extend_block_ending(sorted_meetings[i]['ends'])\n else:\n pass\n if not found_overlaping:\n meetings_blocks.append(Block(sorted_meetings[i]['starts'],sorted_meetings[i]['ends']))\n\n today = (datetime.datetime.utcnow() + datetime.timedelta(hours=2)).isoformat() + 'Z'\n print(today)\n today = datetime.datetime.strptime(today[:-8],\"%Y-%m-%dT%H:%M:%S\")\n\n today_in_5hrs = (datetime.datetime.utcnow() + datetime.timedelta(hours=7)).isoformat()\n today_in_5hrs = datetime.datetime.strptime(today_in_5hrs[:-8],\"%Y-%m-%dT%H:%M:%S\")\n\n duration = datetime.timedelta(hours=2)\n \n for m in meetings_blocks:\n print(m)\n if meetings_blocks[0].starts - today > duration:\n\n return Response(f\"Jest taki termin, przed wszystkimi, {meetings_blocks[0].starts - today}\")\n else:\n for idx in range(1,len(meetings_blocks)):\n if meetings_blocks[idx-1].ends - meetings_blocks[idx-1].starts > duration:\n return Response(f\"znaleziono termin między blokami, o godzinie {meetings_blocks[idx-1].ends}\")\n if meetings_blocks[-1].ends - today_in_5hrs > duration:\n return Response(\"Jest taki termin po wszystkich blokach\")\n\n return Response(\"Nie znaleziono nic\")\n\n\n print(sorted_meetings)\n events_json = json.dumps(all_members_events)\n\n return Response(events_json)\n\n\n@api_view([\"POST\"])\n@parser_classes([JSONParser]) \ndef simple_algorithm_v2(request):\n service = build('calendar', 'v3', credentials=get_google_token(request.data[\"access_token\"], request.data[\"refresh_token\"]))\n today = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n time_min = request.GET.get('beginning_date', today)\n\n today_plus_seven = (datetime.datetime.utcnow() + datetime.timedelta(days=7)).isoformat() + 'Z' # 'Z' indicates UTC time\n time_max = request.GET.get('ending_date', today_plus_seven)\n data = get_free_blocks(\n request=request,\n service=service,\n calendars=request.data[\"calendars\"],\n #beginning_date=\"2021-06-15T07:40:00.000000Z\",\n beginning_date=request.data[\"beginning_date\"],\n ending_date=request.data[\"ending_date\"],\n beginning_hours=int(request.data[\"beginning_hours\"]),\n beginning_minutes=int(request.data[\"beginning_minutes\"]),\n ending_hours=int(request.data[\"ending_hours\"]),\n ending_minutes=int(request.data[\"ending_minutes\"]),\n duration_hours=int(request.data[\"meeting_duration_hours\"]),\n duration_minutes=int(request.data[\"meeting_duration_minutes\"]),\n )\n \n # return Response(f\"{today} --- type --- {type(today)}\")\n \n print(data)\n \n return Response(data,status=200)\n\n@api_view([\"POST\"])\n@parser_classes([JSONParser]) \ndef insert_meetings(request):\n\n service = build('calendar', 'v3', credentials=get_google_token(request.data[\"access_token\"], request.data[\"refresh_token\"]))\n\n hours = request.data[\"duration_hours\"]\n minutes = request.data[\"duration_minutes\"]\n start = request.data[\"date\"][:19]\n end = datetime.datetime.strptime(start,\"%Y-%m-%dT%H:%M:%S\")+datetime.timedelta(hours=int(hours),minutes=int(minutes))\n end = datetime.datetime.strftime(end,\"%Y-%m-%dT%H:%M:%S\") + \"+02:00\"\n start = request.data[\"date\"][:19] + \"+02:00\"\n print(request.data)\n print(start)\n print(end)\n event = {\n 'summary': request.data[\"title\"],\n #'location': 'Wroclaw, Poland',\n 'description': request.data[\"description\"],\n 'start': {\n 'dateTime': start,\n 'timeZone': 'Europe/Warsaw',\n },\n 'end': {\n 'dateTime': end,\n 'timeZone': 'Europe/Warsaw',\n },\n # 'recurrence': [\n # 'RRULE:FREQ=DAILY;COUNT=2'\n # ],\n 'attendees': [\n {\"email\": cal} for cal in request.data[\"calendars\"]\n ],\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n # for calendar in request.data[\"calendars\"]:\n print(event)\n event = service.events().insert(calendarId=request.data[\"calendars\"][0], body=event).execute()\n return Response(data=event, status=200)\n\nclass GoogleLogin(SocialLoginView):\n adapter_class = GoogleOAuth2Adapter", "id": "7247093", "language": "Python", "matching_score": 3.4745543003082275, "max_stars_count": 3, "path": "meeting_scheduler/scheduler_api/views.py" }, { "content": "from django.urls import path, include\n\nfrom . import views\n\napp_name = 'scheduler_api'\n\nurlpatterns = [\n path('calendars/list', views.get_calendars_list, name='calendars-list'),\n path('calendar/<str:calendar>/events', views.get_events_from_calendar, name='events-list'),\n path('calendar/<str:calendar>/event/insert', views.insert_event_to_calendar, name='insert-event'),\n path('calendar/freebusy', views.is_free_or_busy, name='free-busy'),\n path('custom-tokens/', views.google_data, name=\"googledata\"),\n path('dj-rest-auth/google/', views.GoogleLogin.as_view(), name='google-login'),\n path('calendars/s-algorithm', views.simple_algorithm, name='simple_algorithm'),\n path('calendars/algorithm', views.simple_algorithm_v2, name='algorithm'),\n path('calendars/insert', views.insert_meetings, name=\"Arrange meetings\")\n]\n\n", "id": "10586763", "language": "Python", "matching_score": 1.7270952463150024, "max_stars_count": 3, "path": "meeting_scheduler/scheduler_api/urls.py" }, { "content": "from django.urls import path\nfrom django.views.generic import TemplateView\n\napp_name = 'scheduler'\n\nurlpatterns = [\n path('', TemplateView.as_view(template_name=\"scheduler/index.html\")),\n]\n", "id": "10112881", "language": "Python", "matching_score": 0.08820299804210663, "max_stars_count": 3, "path": "meeting_scheduler/scheduler/urls.py" }, { "content": "#!/usr/bin/env python\n\nimport threading\nfrom time import sleep\nfrom gym import Gym\nfrom flask import Flask, request, render_template\nfrom flask_socketio import SocketIO, emit\n#from threading import Lock\nfrom members import GenericMember, GymChad, CallisctenicsEnjoyer, CardioFreak\nfrom random import random, sample, randint\nfrom random import uniform\n\ngym = Gym()\n\nclass Host(threading.Thread):\n def __init__(self) -> None:\n super().__init__(target=self.invite_following_customers)\n self.MEMBERS = list()\n global gym\n self.gym = gym\n self.condition = threading.Condition()\n self.gym_member_id = 0\n\n def add_member(self, m):\n with self.condition:\n self.MEMBERS.append(m)\n self.MEMBERS[-1].start()\n \n def remove_member(self, m):\n with self.condition:\n self.MEMBERS.remove(m)\n \n def invite_following_customers(self):\n global gym\n sleep(20)\n while True:\n sleep(uniform(2,3))\n rnd = randint(0, 100)\n if rnd % 3 == 0:\n self.add_member(CallisctenicsEnjoyer(self.gym_member_id, gym, self))\n self.gym_member_id+=1\n elif rnd % 3 == 1:\n self.add_member(CardioFreak(self.gym_member_id,gym, self))\n self.gym_member_id+=1\n else:\n self.add_member(GymChad(self.gym_member_id,gym, self))\n self.gym_member_id+=1\n \n def gym_members_statuses_to_dict(self) -> dict:\n with self.condition:\n members =[{\n \"status\": m.status,\n \"type\": m.type,\n \"id\": m.pid,\n \"time_rem\": m.remaining_sleep, \n \"total_sleep\": m.total_sleep,\n } for m in self.MEMBERS]\n \n \n resources = [\n gym.total_weights.avaliable_weight,\n gym.total_weights.total,\n gym.threadmills.generate_string(),\n gym.ergometers.generate_string(),\n gym.pullup_bars.generate_string(),\n gym.crunch_machines.generate_string(),\n gym.elipticals.generate_string(),\n gym.bicycles.generate_string(),\n gym.benchpresses.generate_string(),\n gym.smith.generate_string(),\n gym.deadlift.generate_string(),\n ]\n\n x = {\n \"currentMembersCount\": len(self.MEMBERS),\n \"members\": members,\n \"res\": resources,\n }\n\n return x\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"key\"\nsocketio = SocketIO(app)\nMEMBERS_LIMIT = 10\nMEMBERS = list()\nTRIGGER_END_EVENT = False\ngym_member_id = 0\n\ngym_host = Host()\ngym_host.start()\n\[email protected]('/')\ndef index():\n return render_template('index.html', context={'members': gym_host.MEMBERS})\n\[email protected]('run')\ndef run():\n global MEMBERS_LIMIT, MEMBERS, TRIGGER_END_EVENT, gym_member_id, gym, gym_host\n for i in range(0, MEMBERS_LIMIT+5, 3):\n gym_host.add_member(CallisctenicsEnjoyer(i, gym, host=gym_host))\n gym_host.add_member(CardioFreak(i+1, gym, host=gym_host))\n gym_host.add_member(GymChad(i+2, gym, host=gym_host))\n gym_host.MEMBERS = sample(gym_host.MEMBERS, len(gym_host.MEMBERS))\n gym_host.gym_member_id = i+3\n socketio.run(app)\n \[email protected]('connection')\ndef connected(json):\n print(\"Initialized connection\")\n print(json)\n socketio.emit('updating', {\n \"start\":\"gym\",\n \"gymMembers\": len(gym_host.MEMBERS)\n })\n\[email protected](\"update_received\")\ndef request_update():\n socketio.emit('request_update')\n\[email protected]('request_update_gym')\ndef update_view():\n global gym_host\n socketio.emit(\"updating\", gym_host.gym_members_statuses_to_dict())\n\nif (__name__ == \"__main__\"):\n run()\n\n", "id": "1143180", "language": "Python", "matching_score": 3.1419124603271484, "max_stars_count": 1, "path": "flaskr/app.py" }, { "content": "from threading import Thread, Condition\nfrom time import sleep\nfrom datetime import datetime\nfrom random import randint, uniform, sample\n\nclass GenericMember(Thread):\n def __init__(self, pid, gym, dw, gm_type, host):\n super().__init__(target=self.run)\n self.pid = pid\n self.condition = Condition()\n self.locker_number = -1\n self.attempts = 0\n self.eid = -1\n self.desired_weight = dw\n self.gym = gym\n self.list_of_excercises=[]\n self.type = gm_type\n self.host = host\n self.status = \"Entering gym\"\n self.total_sleep = 0\n self.remaining_sleep = 0\n self.is_remaiming_time_set = False\n\n if randint(0,1) == 0:\n self.sex = \"m\"\n else: \n self.sex = \"f\"\n\n def gsleep(self, sleeping_period):\n self.total_sleep = round(sleeping_period,2)\n self.remaining_sleep = round(sleeping_period,2)\n while not self.remaining_sleep <= 0:\n if self.remaining_sleep > 0.2:\n sleep(0.2)\n self.remaining_sleep -= 0.2\n self.remaining_sleep = round(self.remaining_sleep,2)\n else:\n sleep(self.remaining_sleep)\n self.remaining_sleep -= self.remaining_sleep\n self.total_sleep = 0\n\n def run(self):\n self.enter_gym()\n if self.locker_number != -1:\n self.gsleep(uniform(2,3))\n self.execute_training_plan()\n self.gsleep(uniform(3,4))\n self.exit_gym()\n\n def execute_training_plan(self):\n for ex in self.list_of_excercises:\n self.train(ex,8,10)\n\n def train(self, equipment, min_occupation_time, max_occupation_time):\n equipment.start_training(self)\n self.gsleep(uniform(min_occupation_time,max_occupation_time))\n equipment.stop_training(self)\n\n def exit_gym(self):\n if self.locker_number != -1:\n self.gym.reception.release_locker(self)\n print(f\"[{datetime.now()}] {self}: Wychodzę po treningu\")\n self.status = \"Leaving after training\"\n self.gsleep(5)\n else:\n print(f\"[{datetime.now()}] {self}: Nie chce mi się czekać\")\n self.status = \"I got bored waiting for locker... quitting\"\n self.gsleep(5)\n self.host.remove_member(self)\n\n def enter_gym(self):\n self.locker_number = self.gym.reception.ask_to_enter(self)\n\n def __str__(self):\n return f\"Member {self.type} {self.pid}\"\n\n\nclass CardioFreak(GenericMember):\n def __init__(self, pid, gym, host):\n super().__init__(pid, gym, randint(40,70), \"CardioFreak\", host=host)\n l = [\n self.gym.bicycles,\n self.gym.ergometers,\n self.gym.threadmills,\n self.gym.elipticals,\n self.gym.deadlift\n ]\n self.list_of_excercises = sample(l, len(l))\n\nclass CallisctenicsEnjoyer(GenericMember):\n def __init__(self, pid, gym, host):\n super().__init__(pid, gym, randint(85,100), \"CallistenicsEnj\", host=host)\n l = [\n self.gym.threadmills,\n self.gym.pullup_bars,\n self.gym.ergometers,\n self.gym.benchpresses,\n self.gym.crunch_machines,\n ]\n self.list_of_excercises = sample(l, len(l))\n\nclass GymChad(GenericMember):\n def __init__(self, pid, gym, host):\n super().__init__(pid, gym, randint(100,200), \"GymChad\", host=host)\n l = [\n self.gym.pullup_bars,\n self.gym.crunch_machines,\n self.gym.benchpresses,\n self.gym.deadlift,\n self.gym.smith\n ]\n self.list_of_excercises = sample(l, len(l))", "id": "2855700", "language": "Python", "matching_score": 2.261976718902588, "max_stars_count": 1, "path": "flaskr/members.py" }, { "content": "from threading import Condition\nimport time\nfrom datetime import datetime\n\nclass Reception():\n def __init__(self, locker_count):\n self.condition = Condition()\n self.female_lockers = [True for _ in range(locker_count//2)]\n self.male_lockers = [True for _ in range(locker_count//2)]\n self.time = time.time()\n\n def ask_to_enter(self, gym_member):\n if gym_member.sex == \"f\":\n return self.aquire_locker(gym_member,self.female_lockers)\n else:\n return self.aquire_locker(gym_member,self.male_lockers)\n\n def aquire_locker(self, gym_member, lockers):\n with self.condition:\n while not any(lockers):\n if gym_member.attempts > 10:\n return -1\n gym_member.attempts += 1\n self.condition.wait(1)\n\n for i in range(len(lockers)):\n if lockers[i]:\n lockers[i] = False\n if gym_member.sex == \"f\":\n print(f\"[{datetime.now()}] Odebrałam szafkę z szatni {gym_member.sex}: {gym_member.pid}\")\n gym_member.status = f\"I collected locker from {gym_member.sex} locker room\"\n else:\n print(f\"[{datetime.now()}] Odebrałem szafkę z szatni {gym_member.sex}: {gym_member.pid}\")\n gym_member.status = f\"I collected locker from {gym_member.sex} locker room\"\n return i \n\n def release_locker(self, gym_member):\n if gym_member.sex == \"f\":\n self.release(gym_member,self.female_lockers)\n else:\n self.release(gym_member,self.male_lockers)\n\n def release(self, gym_member, lockers):\n with self.condition:\n if gym_member.locker_number == None:\n print(\"Err\")\n else:\n lockers[gym_member.locker_number] = True\n self.condition.notify_all()\n pass", "id": "5784953", "language": "Python", "matching_score": 1.4769686460494995, "max_stars_count": 1, "path": "flaskr/reception.py" }, { "content": "from random import uniform\nfrom threading import Condition\nfrom time import sleep\nfrom datetime import datetime\n\n\nclass CardioEquipement:\n def __init__(self, eq_count, name):\n self.condition = Condition()\n self.array = [True for _ in range(eq_count)]\n self.name = name\n self.taken_by = [\"None\" for _ in range(eq_count)]\n \n def __str__(self) -> str:\n return self.name\n\n def start_training(self, g):\n with self.condition:\n while not any(self.array):\n self.condition.wait()\n g.status = f\"Waiting for {self}\"\n \n for eq in range(len(self.array)):\n if self.array[eq]:\n self.take_info(g)\n #print(f\"[{datetime.now()}]{g} took {self}\")\n self.array[eq] = False\n self.taken_by[eq] = g.pid\n g.eid = eq\n return eq\n\n def stop_training(self, g):\n with self.condition:\n #print(g.pid, f\" Zwolnił {self}\")\n self.release_info(g)\n self.array[g.eid] = True\n self.taken_by[g.eid] = \"None\"\n self.condition.notify_all()\n\n def generate_string(self):\n string_info = \"Taken by: [\"\n for i in self.taken_by:\n string_info+=f\"{i},\"\n string_info+=\"]\"\n return string_info\n\n def take_info(self, g):\n g.status = f\"Works out on {self}\"\n print(f\"[{datetime.now()}] {g} took {self}\")\n\n def release_info(self, g):\n g.status = f\"Stopped working out on {self}\"\n print(f\"[{datetime.now()}] {g} released {self}\")\n\n\nclass Threadmill(CardioEquipement):\n def __init__(self, eq_count):\n super().__init__(eq_count, \"Threadmill\")\n\n\nclass Ergometer(CardioEquipement):\n def __init__(self, eq_count):\n super().__init__(eq_count, \"Ergometer\")\n\n\n\nclass FreeWeightEx:\n def __init__(self, eq_count, name, all_plates):\n self.condition = Condition()\n self.array = [True for _ in range(eq_count)]\n self.name = name\n self.weight = all_plates\n self.taken_by = [\"None\" for _ in range(eq_count)]\n \n def __str__(self) -> str:\n return self.name\n\n def start_training(self, g):\n with self.condition:\n while not any(self.array):\n self.condition.wait()\n print(f\"[{datetime.now()}] {g} waiting for {self}\")\n g.status = f\"Waiting for {self}\"\n for eq in range(len(self.array)):\n if self.array[eq]:\n #print(f\"{g.pid} Took {self}\")\n self.taken_by[eq] = g.pid\n self.array[eq] = False\n g.eid = eq\n break\n with self.weight.condition:\n while not self.weight.avaliable_weight - g.desired_weight > 0:\n print(f\"[{datetime.now()}] {g} could not start lifting on {self}, missing plates, AVALIABLE: {self.weight.avaliable_weight} -> WANTED TO TAKE: {g.desired_weight}\")\n g.status = f\"Waiting on plates for {self}\"\n self.weight.condition.wait()\n self.weight.avaliable_weight -= g.desired_weight\n self.take_info(g)\n return eq\n\n def stop_training(self, g):\n with self.weight.condition:\n self.weight.avaliable_weight += g.desired_weight\n self.release_info(g)\n self.weight.condition.notify_all()\n\n with self.condition:\n self.array[g.eid] = True\n self.taken_by[g.eid] = \"None\"\n self.condition.notify_all()\n \n def generate_string(self):\n string_info = \"Taken by: [\"\n for i in self.taken_by:\n string_info+=(f\"{i},\")\n string_info+=\"]\"\n return string_info\n\n def take_info(self, g):\n print(f\"[{datetime.now()}] {g} took {self}, currently avaliable weight: {self.weight.avaliable_weight}\")\n g.status = f\"Works out on {self}\"\n def release_info(self, g):\n print(f\"[{datetime.now()}] {g} released {self}, currently avaliable weight: {self.weight.avaliable_weight}\")\n g.status = f\"Stopped working out on {self}\"\n\n\nclass Benchpress(FreeWeightEx):\n def __init__(self, eq_count, all_plates):\n super().__init__(eq_count, \"Benchpress\", all_plates)\n\n\nclass Weight:\n def __init__(self, total_weight) -> None:\n self.avaliable_weight = total_weight\n self.total = total_weight\n self.condition = Condition()\n\n\nclass Bicycle(CardioEquipement):\n def __init__(self, eq_count):\n super().__init__(eq_count, \"Bicycle\")\n\n\nclass CrunchMachine(CardioEquipement):\n def __init__(self, eq_count):\n super().__init__(eq_count, \"Crunch Machine\")\n\n\nclass Eliptical(CardioEquipement):\n def __init__(self, eq_count):\n super().__init__(eq_count, \"Eliptical\")\n\n\nclass Deadlift(FreeWeightEx):\n def __init__(self, eq_count, all_plates):\n super().__init__(eq_count, \"Deadlift\", all_plates)\n\n\nclass Smith_machine(FreeWeightEx):\n def __init__(self, eq_count, all_plates):\n super().__init__(eq_count, \"Smith\", all_plates)\n\n\nclass Fitness_room:\n def __init__(self):\n pass\n\n\nclass PullUpBar(CardioEquipement):\n def __init__(self, eq_count):\n super().__init__(eq_count, \"Pull Up Bar\")", "id": "12472294", "language": "Python", "matching_score": 3.841726064682007, "max_stars_count": 1, "path": "flaskr/resources.py" }, { "content": "from reception import Reception\nfrom resources import Threadmill, Benchpress, Ergometer, PullUpBar, Eliptical, Bicycle, CrunchMachine, Weight, Deadlift, Smith_machine\n\nLOCKER_COUNT = 20\n\nclass Gym:\n def __init__(self) -> None:\n global LOCKER_COUNT\n self.reception = Reception(LOCKER_COUNT)\n\n self.total_weights = Weight(500) \n self.threadmills = Threadmill(2)\n self.ergometers = Ergometer(2)\n self.pullup_bars = PullUpBar(2)\n self.crunch_machines = CrunchMachine(2)\n self.elipticals = Eliptical(2)\n self.bicycles = Bicycle(2)\n self.benchpresses = Benchpress(3, self.total_weights)\n self.smith = Smith_machine(2, self.total_weights)\n self.deadlift = Deadlift(2, self.total_weights)", "id": "6395769", "language": "Python", "matching_score": 1.8916748762130737, "max_stars_count": 1, "path": "flaskr/gym.py" } ]
1.891675
nicodv
[ { "content": "import yaml\n\n\ndef read_local_cfg():\n \"\"\"Read local settings from a text file. This file should\n contain secret settings that are not to be checked into Git.\n \"\"\"\n with open('/var/www/local_config.yml') as f:\n return yaml.load(f)\n", "id": "8888037", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "bgg/util/__init__.py" }, { "content": "#!flask/bin/python\n\nfrom flask import Flask, abort\n\nfrom myapp import dosomething\n\napplication = Flask(__name__)\n\n\[email protected]('/', methods=['POST'])\ndef run_calculation():\n try:\n dosomething.print_it(\"I'm doing something!\")\n return 'SUCCESS', 200\n except:\n abort(400)\n", "id": "6589664", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "rest/daemonlistener.py" }, { "content": "\"\"\"\nModule that implements a retry decorator.\n\nYou can, for example, do this:\n\n @retry(5)\n def my_function():\n ...\n\nAnd 'my_function', upon an exception, will be retried 4 more times until\na final exception is raised. 'retry' will wait a little bit longer after each\nfailure before retrying.\n\nVery useful for, for example, retrying a download if timeouts occur frequently.\nCustomization of exceptions and exception handlers is possible.\n\"\"\"\n\nfrom time import sleep\nfrom functools import wraps\n\n\ndef _warning_printer(func, exception, tries_remaining):\n \"\"\"Simple exception handler that prints a warning.\n\n :param exception: The exception instance which was raised\n :param int tries_remaining: The number of tries remaining\n \"\"\"\n print(\"Caught '{0}' in {1}, {2} tries remaining.\".format(\n exception, func.__name__, tries_remaining))\n\n\ndef _error_printer(func, exception, tries):\n \"\"\"Exception handler that prints an error.\n\n :param exception: The exception instance which was raised\n :param int tries: Total number of tries\n \"\"\"\n\n try:\n print(\"{} failed (reason: {}), giving up after {} tries.\".format(\n func.__name__, exception.reason, int(tries)))\n except AttributeError:\n print(\"{} failed, giving up after {} tries.\".format(\n func.__name__, int(tries)))\n\n\ndef retry(max_tries, delay=1, backoff=2, exceptions=(Exception,),\n on_retry=_warning_printer, on_fail=_error_printer):\n \"\"\"Function decorator implementing retry logic.\n\n The decorator will call the function up to max_tries times if it raises\n an exception.\n\n By default it catches instances of the Exception class and subclasses.\n This will recover after all but the most fatal errors. You may specify a\n custom tuple of exception classes with the 'exceptions' argument; the\n function will only be retried if it raises one of the specified\n exceptions.\n\n Additionally you may specify a on_retry function which will be\n called prior to retrying with the number of remaining tries and the\n exception instance. This is primarily intended to give the opportunity to\n log the failure. on_fail is another function called after failure if no\n retries remain.\n\n :param int max_tries: Maximum number of retries\n :param int or float delay: Sleep this many seconds * backoff *\n try number after failure\n :param int or float backoff: Multiply delay by this after each failure\n :param tuple exceptions: A tuple of exception classes; default (Exception,)\n :param func on_retry: An on-retry exception handler function\n (args should be: function, exception, tries_remaining)\n :param func on_fail: A final exception handler function\n (args should be: function, exception, tries_remaining)\n \"\"\"\n\n assert max_tries > 0\n\n def dec(func):\n # 'wraps' updates a wrapper function to look like the wrapped function\n @wraps(func)\n def f2(*args, **kwargs):\n mydelay = delay\n tries = reversed(range(max_tries))\n for tries_remaining in tries:\n try:\n return func(*args, **kwargs)\n except exceptions as e:\n if tries_remaining > 0:\n # call on_retry exception handler after an exception\n if on_retry is not None:\n on_retry(func, e, tries_remaining)\n sleep(mydelay)\n mydelay *= backoff\n else:\n # no more retries, call the on_fail exception handler\n if on_fail is not None:\n on_fail(func, e, max_tries)\n else:\n raise e\n return f2\n return dec\n", "id": "8516604", "language": "Python", "matching_score": 1.2828397750854492, "max_stars_count": 1, "path": "bgg/util/retry.py" }, { "content": "import re\nimport time\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom bgg.util.retry import retry\nfrom lxml import etree\n\n# Number of top games to scrape\nN_TOP_GAMES = 5\n# The number of games per page in the BGG rankings\nN_GAMES_PER_PAGE = 100\n# Number of games details to download in a signle API call\nAPI_CHUNK_SIZE = 100\n# Default number of seconds to sleep between API calls\nAPI_SLEEP = 120\n\nBGG_RANKING_URL = 'http://www.boardgamegeek.com/browse/boardgame/page/'\nBGG_API_URL = \"http://www.boardgamegeek.com/xmlapi2/thing?type=boardgame&id={}&stats=1\"\n\n\ndef get_bgg_game_ids(n_games=N_TOP_GAMES):\n \"\"\"Download the IDs of the 'n_games' highest ranked games from BGG.\n\n :param n_games: Number of games to download from BGG rankings\n \"\"\"\n\n # number of pages to scrape\n npages = (n_games - 1) // N_GAMES_PER_PAGE + 1\n\n @retry(3)\n def __get_page(url):\n page = requests.get(url, timeout=120)\n soup = BeautifulSoup(page.content, 'html.parser')\n # items are found by 'id=results_objectname*' attribute in 'div' tag\n game_ids = []\n for ii in range(1, N_GAMES_PER_PAGE + 1):\n # Get the tag of the ii'th game on this page\n item = soup.find('div', {'id': 'results_objectname' + str(ii)})\n # ID can be found in link href\n href = item.a.attrs['href']\n game_id = re.search(r'/boardgame/(.*)/', href).groups()[0]\n game_ids.append(game_id)\n return game_ids\n\n all_ids = []\n for pp in range(1, npages + 1):\n print(\"Reading page {} / {}\".format(pp, npages))\n cur_url = BGG_RANKING_URL + str(pp)\n cur_ids = __get_page(cur_url)\n all_ids.extend(cur_ids)\n\n return all_ids[:n_games]\n\n\ndef get_bgg_game_details(df, sleep=API_SLEEP):\n \"\"\"Queries the BGG API for details about games.\n\n :param df: DataFrame with games, must have a 'game_id' column\n :param int sleep: Number of seconds to sleep between API calls\n :rtype: pd.DataFrame\n \"\"\"\n n_pages = (len(df.index) - 1) // API_CHUNK_SIZE + 1\n for ii in range(1, n_pages + 1):\n print(\"Gettings stats, chunk {} / {}\".format(ii, n_pages))\n\n start_row = (ii - 1) * API_CHUNK_SIZE\n end_row = ii * API_CHUNK_SIZE - 1\n selection = df['id'].ix[start_row:end_row]\n url = BGG_API_URL.format(','.join(selection.astype(str)))\n\n result = requests.get(url, timeout=60)\n elem = etree.fromstring(result.content)\n\n items = elem.iterchildren()\n for jj, item in enumerate(items):\n row = start_row + jj\n df.ix[row, 'name'] = [nm.attrib['value']\n for nm in item.findall('name')\n if nm.attrib['type'] == 'primary'][0]\n df.ix[row, 'year'] = item.find('yearpublished').attrib['value']\n df.ix[row, 'minplayers'] = item.find('minplayers').attrib['value']\n df.ix[row, 'maxplayers'] = item.find('maxplayers').attrib['value']\n df.ix[row, 'minplaytime'] = item.find('minplaytime').attrib['value']\n df.ix[row, 'maxplaytime'] = item.find('maxplaytime').attrib['value']\n\n # Use regex to deal with cases like '21 and up'.\n age_str = item.find('minage').attrib['value']\n df.ix[row, 'minage'] = re.findall(r'\\d+', age_str)[0]\n\n df.ix[row, 'boardgamecategory'] = \\\n ','.join(x.attrib['value'] for x in item.findall('link')\n if x.attrib['type'] == 'boardgamecategory')\n df.ix[row, 'boardgamemechanic'] = \\\n ','.join(x.attrib['value'] for x in item.findall('link')\n if x.attrib['type'] == 'boardgamemechanic')\n df.ix[row, 'boardgamedesigner'] = \\\n ','.join(x.attrib['value'] for x in item.findall('link')\n if x.attrib['type'] == 'boardgamedesigner')\n df.ix[row, 'boardgameartist'] = \\\n ','.join(x.attrib['value'] for x in item.findall('link')\n if x.attrib['type'] == 'boardgameartist')\n\n # Statistics\n df.ix[row, 'usersrated'] = item.find('statistics').find('ratings')\\\n .find('usersrated').attrib['value']\n df.ix[row, 'average'] = item.find('statistics').find('ratings')\\\n .find('average').attrib['value']\n df.ix[row, 'bayesaverage'] = item.find('statistics').find('ratings')\\\n .find('bayesaverage').attrib['value']\n df.ix[row, 'stddev'] = item.find('statistics').find('ratings')\\\n .find('stddev').attrib['value']\n df.ix[row, 'averageweight'] = item.find('statistics').find('ratings')\\\n .find('averageweight').attrib['value']\n\n time.sleep(sleep)\n\n # Convert numerical columns to proper dtypes.\n dtypes = {\n 'year': int,\n 'minplayers': int,\n 'maxplayers': int,\n 'minplaytime': int,\n 'maxplaytime': int,\n 'minage': int,\n 'usersrated': int,\n 'average': float,\n 'bayesaverage': float,\n 'stddev': float,\n 'averageweight': float,\n }\n for col, dtype in dtypes.items():\n df[col] = df[col].astype(dtype)\n\n return df\n", "id": "1193874", "language": "Python", "matching_score": 2.705653429031372, "max_stars_count": 1, "path": "bgg/datafetch/bggapi.py" }, { "content": "import pandas as pd\nimport psycopg2\n\nfrom bgg.util.retry import retry\nfrom bgg.util import read_local_cfg\n\nCFG = read_local_cfg()\n\n\nclass GamesDAO(object):\n\n @retry(3)\n def __init__(self):\n\n # define our connection string\n conn_string = 'host={} port={}, dbname={} user={} password={}'.format(\n CFG.get('DB_HOST', 'localhost'),\n CFG.get('DB_PORT', 5432),\n CFG['DB_NAME'],\n CFG['DB_USER'],\n CFG['DB_PASS']\n )\n\n # make connection, which is kept open until class instance is closed\n try:\n self.conn = psycopg2.connect(conn_string)\n except Exception:\n print(\"Database connection could not be made.\")\n raise\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # make sure database connection is closed\n self.conn.commit()\n self.conn.close()\n\n def execute_sql(self, sql, args=None, fetch=None, many=False):\n \"\"\"Execute an SQL statement.\n\n :param str sql: SQL string to execute\n :param list args: Data to feed to the cursor.execute(many) statement\n :param fetch: In case of a read statement; 'one' for fetchone, 'all' for fetchall\n :type fetch: None or str\n :param bool many: Whether to use 'execute' or 'executemany'\n \"\"\"\n # 'with' statement takes care of commits/rollbacks, and\n # automatically closes cursor\n with self.conn.cursor() as cur:\n if args is not None:\n if many:\n cur.executemany(sql, args)\n else:\n cur.execute(sql, args)\n else:\n assert many is False\n cur.execute(sql)\n\n # save columns that were read, for later use\n if cur.description:\n # noinspection PyAttributeOutsideInit\n self.readColumns = [col_desc[0] for col_desc in cur.description]\n\n # in case we are fetching things, do this and return the read values\n r = None\n if fetch == 'all':\n r = cur.fetchall()\n elif fetch == 'one':\n r = cur.fetchone()\n return r\n\n def get_all_ids(self):\n return self.load_games(columns=['game_id'])\n\n def load_games(self, columns=None, where=None):\n if columns is None:\n col_str = '*'\n else:\n col_str = ','.join(columns)\n\n if where is None:\n where = ''\n\n with self.conn.cursor() as cur:\n cur.execute(\"SELECT {} FROM boardgames {}}\".format(col_str, where))\n games = cur.fetchall()\n\n return pd.DataFrame(games, columns=columns)\n\n def load_game(self, game_id, columns=None):\n return self.load_games(columns=columns, where='WHERE game_id == {}'.format(game_id))\n\n def update_ids(self, new_ids):\n # Find which of the new IDs to add by differencing with existing IDs.\n old_ids = self.get_all_ids()\n to_add = list(set(new_ids).difference(set(old_ids)))\n # Insert new IDs into table.\n with self.conn.cursor() as cur:\n cur.executemany(\"INSERT INTO boardgames (game_id) VALUES (%s)\", to_add)\n\n def save_games(self, df):\n columns = df.columns\n with self.conn.cursor() as cur:\n cur.executemany(\"INSERT INTO boardgames ({}) VALUES (%s)\"\n .format(columns), df.values)\n", "id": "9244615", "language": "Python", "matching_score": 2.788215398788452, "max_stars_count": 1, "path": "bgg/dao/gamesdao.py" }, { "content": "#!/usr/bin/env python\nimport sys\nimport argparse\n\nfrom bgg.datafetch.bggapi import get_bgg_game_ids, get_bgg_game_details\nfrom bgg.dao.gamesdao import GamesDAO\n\n\ndef update_game_ids():\n \"\"\"Download top games from BGG and stores their IDs in database.\"\"\"\n top_games = get_bgg_game_ids()\n with GamesDAO() as dao:\n dao.update_ids(top_games)\n\n\ndef update_games(new_only=False):\n \"\"\"Update game data in database.\n\n :param bool new_only: Whether or not to only update games that seem\n to be new (i.e., that have an ID but no name).\n \"\"\"\n with GamesDAO() as dao:\n games = dao.load_games()\n if new_only:\n games = games[games['name'] == '']\n sleep = 0\n else:\n sleep = 60\n updated_games = get_bgg_game_details(games, sleep=sleep)\n dao.save_games(updated_games)\n\n\ndef main(task):\n if task == 'update_new_games':\n update_games(new_only=True)\n elif task == 'update_all_games':\n update_games(new_only=False)\n else:\n eval(task + '()')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('task', help='Task to run')\n args = parser.parse_args()\n\n sys.exit(main(args.task))\n", "id": "1925341", "language": "Python", "matching_score": 1.8945152759552002, "max_stars_count": 1, "path": "bgg/datafetch/run_task.py" }, { "content": "#!/usr/bin/env python\nimport os\nfrom argparse import ArgumentParser\n\nfrom moviepy.editor import VideoFileClip\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\n\ndef frange(x, y, inc):\n while x < y:\n yield x\n x += inc\n\n\ndef average_video(filepath, outpath, start=None, end=None, sample_every=1):\n \"\"\"Calculate average of video frames\"\"\"\n\n # Load video\n vid = VideoFileClip(filepath, audio=False)\n width = vid.w\n height = vid.h\n\n if start is None and end is None:\n frame_generator = vid.iter_frames(progress_bar=True, dtype=np.uint8)\n else:\n if start is None:\n start = 0\n if end is None:\n end = vid.duration\n # compute time increment for sampling by frames\n sample_inc = sample_every / vid.fps\n frame_generator = tqdm(vid.get_frame(f) for f in frange(start, end, sample_inc))\n\n # create starting matrix of zeros\n sum_fs = np.zeros(shape=(height, width, 3), dtype=int)\n ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)\n prev_f = np.zeros(shape=(height, width, 3), dtype=int)\n sum_delta_fs = np.zeros(shape=(height, width, 3), dtype=int)\n\n n_frames = 0\n for f in frame_generator:\n delta = f - prev_f\n sum_delta_fs += delta\n sum_fs += f\n\n ma_sum_fs += f\n if divmod(n_frames, 100)[1] == 0 and n_frames > 0:\n ma_f = ma_sum_fs / 100\n Image.fromarray(ma_f.astype(np.uint8))\\\n .save(os.path.join(outpath, 'movavg_{}.png'.format(n_frames)))\n ma_sum_fs = np.zeros(shape=(height, width, 3), dtype=int)\n\n n_frames += 1\n prev_f = f\n\n # average out the values for each frame\n average_delta_f = sum_delta_fs / n_frames\n average_f = sum_fs / n_frames\n\n # Create images\n delta_img = Image.fromarray(average_delta_f.astype(np.uint8))\n delta_img.save(os.path.join(outpath, 'average_delta.png'))\n final_img = Image.fromarray(average_f.astype(np.uint8))\n final_img.save(os.path.join(outpath, 'average.png'))\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"Creates image with averaged pixels\"\n \"for a given movie clip\")\n parser.add_argument(\"-i\", required=True, type=str,\n help=\"filepath to movie clip\")\n parser.add_argument(\"-o\", required=True, type=str,\n help=\"filepath to save image to\")\n parser.add_argument(\"-s\", type=int, required=True,\n help=\"Start time for image processing, in seconds\")\n parser.add_argument(\"-e\", type=int, required=True,\n help=\"End time for image processing, in seconds\")\n parser.add_argument(\"-f\", type=int, default=24,\n help=\"Sample every f frames (default 24)\")\n args = parser.parse_args()\n average_video(args.i, args.o, args.s, args.e, args.f)\n", "id": "5553955", "language": "Python", "matching_score": 0.5609055161476135, "max_stars_count": 1, "path": "film/frameprocessor.py" } ]
1.28284
dubreuia
[ { "content": "import os\nimport time\nfrom typing import List\n\nimport numpy as np\nimport tensorflow as tf\nfrom magenta.models.nsynth import utils\nfrom magenta.models.nsynth.wavenet import fastgen\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string(\n \"log\", \"DEBUG\",\n \"The threshold for what messages will be logged. DEBUG, INFO, WARN, ERROR, \"\n \"or FATAL.\")\n\ntf.app.flags.DEFINE_string(\n \"checkpoint\",\n \"checkpoints/wavenet-ckpt/model.ckpt-200000\",\n \"The checkpoint to use, defaults to wavenet\")\n\ntf.app.flags.DEFINE_string(\n \"wav1\",\n \"sounds/160045__jorickhoofd__metal-hit-with-metal-bar-resonance__crop.wav\",\n \"The first sample to mix, defaults to a metal piece hit of 1 second\")\n\ntf.app.flags.DEFINE_string(\n \"wav2\",\n \"sounds/412017__skymary__cat-meow-short__crop.wav\",\n \"The second sample to mix, defaults to a cute cat meow of 1 second\")\n\ntf.app.flags.DEFINE_integer(\n \"sample_length\",\n 16000,\n \"The sample length of the provided samples (and the resulting generation), \"\n \"the samples needs to have the same length, defaults to 1 second\")\n\ntf.app.flags.DEFINE_integer(\n \"sample_rate\",\n 16000,\n \"The sample rate of the provided samples, defaults to 16000 second\")\n\n\ndef encode(paths: List[str],\n sample_length: int = 16000,\n sample_rate: int = 16000,\n checkpoint: str = \"checkpoints/wavenet-ckpt/model.ckpt-200000\") \\\n -> np.ndarray:\n audios = []\n for path in paths:\n audio = utils.load_audio(path,\n sample_length=sample_length,\n sr=sample_rate)\n audios.append(audio)\n audios = np.array(audios)\n encodings = fastgen.encode(audios, checkpoint, sample_length)\n return encodings\n\n\ndef mix(encoding1: np.ndarray,\n encoding2: np.ndarray) \\\n -> np.ndarray:\n encoding_mix = (encoding1 + encoding2) / 2.0\n return encoding_mix\n\n\ndef synthesize(encoding_mix: np.ndarray,\n checkpoint: str = \"checkpoints/wavenet-ckpt/model.ckpt-200000\"):\n os.makedirs(os.path.join(\"output\", \"synth\"), exist_ok=True)\n date_and_time = time.strftime(\"%Y-%m-%d_%H%M%S\")\n output = os.path.join(\"output\", \"synth\", f\"{date_and_time}.wav\")\n encoding_mix = np.array([encoding_mix])\n fastgen.synthesize(encoding_mix,\n checkpoint_path=checkpoint,\n save_paths=[output])\n\n\ndef app(unused_argv):\n encoding1, encoding2 = encode([FLAGS.wav1, FLAGS.wav2],\n sample_length=FLAGS.sample_length,\n sample_rate=FLAGS.sample_rate,\n checkpoint=FLAGS.checkpoint)\n encoding_mix = mix(encoding1, encoding2)\n synthesize(encoding_mix, checkpoint=FLAGS.checkpoint)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(FLAGS.log)\n tf.app.run(app)\n", "id": "10546393", "language": "Python", "matching_score": 6.65944242477417, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/nsynth.py" }, { "content": "import os\nimport time\nfrom typing import List\n\nimport numpy as np\nimport tensorflow as tf\nfrom magenta.models.nsynth.utils import load_audio\nfrom magenta.models.nsynth.wavenet import fastgen\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string(\n \"log\", \"DEBUG\",\n \"The threshold for what messages will be logged. DEBUG, INFO, WARN, ERROR, \"\n \"or FATAL.\")\n\ntf.app.flags.DEFINE_string(\n \"checkpoint\",\n \"checkpoints/wavenet-ckpt/model.ckpt-200000\",\n \"The checkpoint to use, defaults to wavenet\")\n\ntf.app.flags.DEFINE_string(\n \"wav1\",\n \"sounds/160045__jorickhoofd__metal-hit-with-metal-bar-resonance__crop.wav\",\n \"The first sample to mix, defaults to a metal piece hit of 1 second\")\n\ntf.app.flags.DEFINE_string(\n \"wav2\",\n \"sounds/412017__skymary__cat-meow-short__crop.wav\",\n \"The second sample to mix, defaults to a cute cat meow of 1 second\")\n\ntf.app.flags.DEFINE_integer(\n \"sample_length\",\n 16000,\n \"The sample length of the provided samples (and the resulting generation), \"\n \"the samples needs to have the same length, defaults to 1 second\")\n\ntf.app.flags.DEFINE_integer(\n \"sample_rate\",\n 16000,\n \"The sample rate of the provided samples, defaults to 16000 second\")\n\n\ndef encode(paths: List[str],\n sample_length: int = 16000,\n sample_rate: int = 16000,\n checkpoint: str = \"checkpoints/wavenet-ckpt/model.ckpt-200000\") \\\n -> np.ndarray:\n # TODO load audio and encode it using nsynth\n pass\n\n\ndef mix(encoding1: np.ndarray,\n encoding2: np.ndarray) \\\n -> np.ndarray:\n # TODO mix the encodings together\n pass\n\n\ndef synthesize(encoding_mix: np.ndarray,\n checkpoint: str = \"checkpoints/wavenet-ckpt/model.ckpt-200000\"):\n os.makedirs(os.path.join(\"output\", \"synth\"), exist_ok=True)\n date_and_time = time.strftime(\"%Y-%m-%d_%H%M%S\")\n output = os.path.join(\"output\", \"synth\", f\"{date_and_time}.wav\")\n # TODO synthetize the encodings into audio with nsynth\n pass\n\n\ndef app(unused_argv):\n encoding1, encoding2 = encode([FLAGS.wav1, FLAGS.wav2],\n sample_length=FLAGS.sample_length,\n sample_rate=FLAGS.sample_rate,\n checkpoint=FLAGS.checkpoint)\n encoding_mix = mix(encoding1, encoding2)\n synthesize(encoding_mix, checkpoint=FLAGS.checkpoint)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(FLAGS.log)\n tf.app.run(app)\n", "id": "9676499", "language": "Python", "matching_score": 0.8751779794692993, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/nsynth_devoxx.py" }, { "content": "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Library functions for creating dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\n\n\ndef generate_mixes(val, num_mixes, sourceid_to_exids, seed=0):\n \"\"\"Generate lists of Example IDs to be mixed.\"\"\"\n del val\n rs = np.random.RandomState(seed=seed) # Make the selection deterministic\n sourceid_to_exids_dict = collections.defaultdict(list)\n for sourceid, exid in sourceid_to_exids:\n sourceid_to_exids_dict[sourceid].append(exid)\n # Generate 5x the number of mixes requested. We'll dedup them later, so\n # this helps increase the chance we'll end up with as many as requested.\n # This isn't guaranteed to always work, but it almost always will and is\n # easier than trying to generate every possible mix and sampling from that\n # since the numbers blow up pretty quickly.\n mixes = zip(\n *[rs.choice(k, num_mixes * 5, replace=True).tolist()\n for k in sourceid_to_exids_dict.values()])\n # Retain only mixes with unique lists of examples.\n mixes_unique_examples = [tuple(sorted(mix)) for mix in mixes\n if len(set(mix)) == len(mix)]\n # Retain only unique mixes.\n unique_mixes = list(set(mixes_unique_examples))\n # Limit to only num_mixes.\n rs.shuffle(unique_mixes)\n unique_mixes = unique_mixes[:num_mixes]\n\n keyed_mixes = dict(enumerate(unique_mixes))\n exid_to_mixids = collections.defaultdict(list)\n for mixid, exids in keyed_mixes.items():\n for exid in exids:\n exid_to_mixids[exid].append(mixid)\n return exid_to_mixids\n", "id": "5948999", "language": "Python", "matching_score": 3.7632150650024414, "max_stars_count": 1, "path": "magenta/models/onsets_frames_transcription/create_dataset_lib.py" }, { "content": "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for create_dataset_lib.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nfrom magenta.models.onsets_frames_transcription import create_dataset_lib\n\nimport tensorflow.compat.v1 as tf\n\n\nclass CreateDatasetLibTest(tf.test.TestCase):\n\n def test_generate_unique_mixes(self):\n sourceid_to_exids = [('source1', 'a'), ('source1', 'b'),\n ('source2', 'c'), ('source2', 'd')]\n exid_to_mixids = create_dataset_lib.generate_mixes(\n val='unused', num_mixes=100, sourceid_to_exids=sourceid_to_exids)\n mix_ids = set(itertools.chain(*exid_to_mixids.values()))\n # Requested 100, but there are only 4 unique mixes, so that's how many\n # we should end up with.\n self.assertEqual(4, len(mix_ids))\n\n def test_generate_num_mixes(self):\n sourceid_to_exids = [('source1', 'a'), ('source1', 'b'), ('source1', 'c'),\n ('source2', 'd'), ('source2', 'e'), ('source2', 'f')]\n exid_to_mixids = create_dataset_lib.generate_mixes(\n val='unused', num_mixes=2, sourceid_to_exids=sourceid_to_exids)\n mix_ids = set(itertools.chain(*exid_to_mixids.values()))\n # Ensure we get the number of mixes we requested even when more unique mixes\n # would be possible.\n self.assertEqual(2, len(mix_ids))\n\n def test_unique_mixes_duplicate_sources(self):\n sourceid_to_exids = [('source1', 'a'), ('source1', 'b'), ('source1', 'c'),\n ('source2', 'a'), ('source2', 'b'), ('source2', 'c'),\n ('source3', 'a'), ('source3', 'b'), ('source3', 'c')]\n exid_to_mixids = create_dataset_lib.generate_mixes(\n val='unused', num_mixes=100, sourceid_to_exids=sourceid_to_exids)\n mix_ids = set(itertools.chain(*exid_to_mixids.values()))\n # There are only 3 unique ids, but we're request mixes of 3 items, so only\n # 1 unique mix is possible.\n self.assertEqual(1, len(mix_ids))\n\nif __name__ == '__main__':\n tf.test.main()\n", "id": "901745", "language": "Python", "matching_score": 0.24467508494853973, "max_stars_count": 1, "path": "magenta/models/onsets_frames_transcription/create_dataset_lib_test.py" }, { "content": "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data processing pipelines for drum tracks.\"\"\"\n\nfrom magenta.music import drums_lib\nfrom magenta.music import DrumTrack\nfrom magenta.music import events_lib\nfrom magenta.music import sequences_lib\nfrom magenta.music.protobuf import music_pb2\nfrom magenta.pipelines import pipeline\nfrom magenta.pipelines import statistics\nimport tensorflow as tf\n\n\ndef extract_drum_tracks(quantized_sequence,\n search_start_step=0,\n min_bars=7,\n max_steps_truncate=None,\n max_steps_discard=None,\n gap_bars=1.0,\n pad_end=False,\n ignore_is_drum=False):\n \"\"\"Extracts a list of drum tracks from the given quantized NoteSequence.\n\n This function will search through `quantized_sequence` for drum tracks. A drum\n track can span multiple \"tracks\" in the sequence. Only one drum track can be\n active at a given time, but multiple drum tracks can be extracted from the\n sequence if gaps are present.\n\n Once a note-on drum event is encountered, a drum track begins. Gaps of silence\n will be splitting points that divide the sequence into separate drum tracks.\n The minimum size of these gaps are given in `gap_bars`. The size of a bar\n (measure) of music in time steps is computed form the time signature stored in\n `quantized_sequence`.\n\n A drum track is only used if it is at least `min_bars` bars long.\n\n After scanning the quantized NoteSequence, a list of all extracted DrumTrack\n objects is returned.\n\n Args:\n quantized_sequence: A quantized NoteSequence.\n search_start_step: Start searching for drums at this time step. Assumed to\n be the beginning of a bar.\n min_bars: Minimum length of drum tracks in number of bars. Shorter drum\n tracks are discarded.\n max_steps_truncate: Maximum number of steps in extracted drum tracks. If\n defined, longer drum tracks are truncated to this threshold. If pad_end\n is also True, drum tracks will be truncated to the end of the last bar\n below this threshold.\n max_steps_discard: Maximum number of steps in extracted drum tracks. If\n defined, longer drum tracks are discarded.\n gap_bars: A drum track comes to an end when this number of bars (measures)\n of no drums is encountered.\n pad_end: If True, the end of the drum track will be padded with empty events\n so that it will end at a bar boundary.\n ignore_is_drum: Whether accept notes where `is_drum` is False.\n\n Returns:\n drum_tracks: A python list of DrumTrack instances.\n stats: A dictionary mapping string names to `statistics.Statistic` objects.\n\n Raises:\n NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length\n (derived from its time signature) is not an integer number of time\n steps.\n \"\"\"\n drum_tracks = []\n stats = dict((stat_name, statistics.Counter(stat_name)) for stat_name in\n ['drum_tracks_discarded_too_short',\n 'drum_tracks_discarded_too_long', 'drum_tracks_truncated'])\n # Create a histogram measuring drum track lengths (in bars not steps).\n # Capture drum tracks that are very small, in the range of the filter lower\n # bound `min_bars`, and large. The bucket intervals grow approximately\n # exponentially.\n stats['drum_track_lengths_in_bars'] = statistics.Histogram(\n 'drum_track_lengths_in_bars',\n [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, min_bars // 2, min_bars,\n min_bars + 1, min_bars - 1])\n\n steps_per_bar = int(\n sequences_lib.steps_per_bar_in_quantized_sequence(quantized_sequence))\n\n # Quantize the track into a DrumTrack object.\n # If any notes start at the same time, only one is kept.\n while 1:\n drum_track = DrumTrack()\n drum_track.from_quantized_sequence(\n quantized_sequence,\n search_start_step=search_start_step,\n gap_bars=gap_bars,\n pad_end=pad_end,\n ignore_is_drum=ignore_is_drum)\n search_start_step = (\n drum_track.end_step +\n (search_start_step - drum_track.end_step) % steps_per_bar)\n if not drum_track:\n break\n\n # Require a certain drum track length.\n if len(drum_track) < drum_track.steps_per_bar * min_bars:\n stats['drum_tracks_discarded_too_short'].increment()\n continue\n\n # Discard drum tracks that are too long.\n if max_steps_discard is not None and len(drum_track) > max_steps_discard:\n stats['drum_tracks_discarded_too_long'].increment()\n continue\n\n # Truncate drum tracks that are too long.\n if max_steps_truncate is not None and len(drum_track) > max_steps_truncate:\n truncated_length = max_steps_truncate\n if pad_end:\n truncated_length -= max_steps_truncate % drum_track.steps_per_bar\n drum_track.set_length(truncated_length)\n stats['drum_tracks_truncated'].increment()\n\n stats['drum_track_lengths_in_bars'].increment(\n len(drum_track) // drum_track.steps_per_bar)\n\n drum_tracks.append(drum_track)\n\n return drum_tracks, stats.values()\n\n\nclass DrumsExtractor(pipeline.Pipeline):\n \"\"\"Extracts drum tracks from a quantized NoteSequence.\"\"\"\n\n def __init__(self, min_bars=7, max_steps=512, gap_bars=1.0, name=None):\n super(DrumsExtractor, self).__init__(\n input_type=music_pb2.NoteSequence,\n output_type=drums_lib.DrumTrack,\n name=name)\n self._min_bars = min_bars\n self._max_steps = max_steps\n self._gap_bars = gap_bars\n\n def transform(self, quantized_sequence):\n try:\n # pylint has a false positive error on this method call for some reason.\n # pylint:disable=redundant-keyword-arg\n drum_tracks, stats = extract_drum_tracks(\n quantized_sequence,\n min_bars=self._min_bars,\n max_steps_truncate=self._max_steps,\n gap_bars=self._gap_bars)\n # pylint:enable=redundant-keyword-arg\n except events_lib.NonIntegerStepsPerBarError as detail:\n tf.logging.warning('Skipped sequence: %s', detail)\n drum_tracks = []\n stats = [statistics.Counter('non_integer_steps_per_bar', 1)]\n self._set_stats(stats)\n return drum_tracks\n", "id": "2519942", "language": "Python", "matching_score": 4.581577777862549, "max_stars_count": 1, "path": "magenta/pipelines/drum_pipelines.py" }, { "content": "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data processing pipelines for lead sheets.\"\"\"\nimport copy\n\nfrom magenta.music import chord_symbols_lib\nfrom magenta.music import chords_lib\nfrom magenta.music import events_lib\nfrom magenta.music import lead_sheets_lib\nfrom magenta.music import LeadSheet\nfrom magenta.music import sequences_lib\nfrom magenta.music.protobuf import music_pb2\nfrom magenta.pipelines import chord_pipelines\nfrom magenta.pipelines import melody_pipelines\nfrom magenta.pipelines import pipeline\nfrom magenta.pipelines import statistics\nimport tensorflow as tf\n\n\nclass LeadSheetExtractor(pipeline.Pipeline):\n \"\"\"Extracts lead sheet fragments from a quantized NoteSequence.\"\"\"\n\n def __init__(self, min_bars=7, max_steps=512, min_unique_pitches=5,\n gap_bars=1.0, ignore_polyphonic_notes=False, filter_drums=True,\n require_chords=True, all_transpositions=True, name=None):\n super(LeadSheetExtractor, self).__init__(\n input_type=music_pb2.NoteSequence,\n output_type=lead_sheets_lib.LeadSheet,\n name=name)\n self._min_bars = min_bars\n self._max_steps = max_steps\n self._min_unique_pitches = min_unique_pitches\n self._gap_bars = gap_bars\n self._ignore_polyphonic_notes = ignore_polyphonic_notes\n self._filter_drums = filter_drums\n self._require_chords = require_chords\n self._all_transpositions = all_transpositions\n\n def transform(self, quantized_sequence):\n try:\n lead_sheets, stats = extract_lead_sheet_fragments(\n quantized_sequence,\n min_bars=self._min_bars,\n max_steps_truncate=self._max_steps,\n min_unique_pitches=self._min_unique_pitches,\n gap_bars=self._gap_bars,\n ignore_polyphonic_notes=self._ignore_polyphonic_notes,\n filter_drums=self._filter_drums,\n require_chords=self._require_chords,\n all_transpositions=self._all_transpositions)\n except events_lib.NonIntegerStepsPerBarError as detail:\n tf.logging.warning('Skipped sequence: %s', detail)\n lead_sheets = []\n stats = [statistics.Counter('non_integer_steps_per_bar', 1)]\n except chord_symbols_lib.ChordSymbolError as detail:\n tf.logging.warning('Skipped sequence: %s', detail)\n lead_sheets = []\n stats = [statistics.Counter('chord_symbol_exception', 1)]\n self._set_stats(stats)\n return lead_sheets\n\n\ndef extract_lead_sheet_fragments(quantized_sequence,\n search_start_step=0,\n min_bars=7,\n max_steps_truncate=None,\n max_steps_discard=None,\n gap_bars=1.0,\n min_unique_pitches=5,\n ignore_polyphonic_notes=True,\n pad_end=False,\n filter_drums=True,\n require_chords=False,\n all_transpositions=False):\n \"\"\"Extracts a list of lead sheet fragments from a quantized NoteSequence.\n\n This function first extracts melodies using melodies_lib.extract_melodies,\n then extracts the chords underlying each melody using\n chords_lib.extract_chords_for_melodies.\n\n Args:\n quantized_sequence: A quantized NoteSequence object.\n search_start_step: Start searching for a melody at this time step. Assumed\n to be the first step of a bar.\n min_bars: Minimum length of melodies in number of bars. Shorter melodies are\n discarded.\n max_steps_truncate: Maximum number of steps in extracted melodies. If\n defined, longer melodies are truncated to this threshold. If pad_end is\n also True, melodies will be truncated to the end of the last bar below\n this threshold.\n max_steps_discard: Maximum number of steps in extracted melodies. If\n defined, longer melodies are discarded.\n gap_bars: A melody comes to an end when this number of bars (measures) of\n silence is encountered.\n min_unique_pitches: Minimum number of unique notes with octave equivalence.\n Melodies with too few unique notes are discarded.\n ignore_polyphonic_notes: If True, melodies will be extracted from\n `quantized_sequence` tracks that contain polyphony (notes start at the\n same time). If False, tracks with polyphony will be ignored.\n pad_end: If True, the end of the melody will be padded with NO_EVENTs so\n that it will end at a bar boundary.\n filter_drums: If True, notes for which `is_drum` is True will be ignored.\n require_chords: If True, only return lead sheets that have at least one\n chord other than NO_CHORD. If False, lead sheets with only melody will\n also be returned.\n all_transpositions: If True, also transpose each lead sheet fragment into\n all 12 keys.\n\n Returns:\n A python list of LeadSheet instances.\n\n Raises:\n NonIntegerStepsPerBarError: If `quantized_sequence`'s bar length\n (derived from its time signature) is not an integer number of time\n steps.\n \"\"\"\n sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)\n stats = dict([('empty_chord_progressions',\n statistics.Counter('empty_chord_progressions'))])\n melodies, melody_stats = melody_pipelines.extract_melodies(\n quantized_sequence, search_start_step=search_start_step,\n min_bars=min_bars, max_steps_truncate=max_steps_truncate,\n max_steps_discard=max_steps_discard, gap_bars=gap_bars,\n min_unique_pitches=min_unique_pitches,\n ignore_polyphonic_notes=ignore_polyphonic_notes, pad_end=pad_end,\n filter_drums=filter_drums)\n chord_progressions, chord_stats = chord_pipelines.extract_chords_for_melodies(\n quantized_sequence, melodies)\n lead_sheets = []\n for melody, chords in zip(melodies, chord_progressions):\n # If `chords` is None, it's because a chord progression could not be\n # extracted for this particular melody.\n if chords is not None:\n if require_chords and all(chord == chords_lib.NO_CHORD\n for chord in chords):\n stats['empty_chord_progressions'].increment()\n else:\n lead_sheet = LeadSheet(melody, chords)\n if all_transpositions:\n for amount in range(-6, 6):\n transposed_lead_sheet = copy.deepcopy(lead_sheet)\n transposed_lead_sheet.transpose(amount)\n lead_sheets.append(transposed_lead_sheet)\n else:\n lead_sheets.append(lead_sheet)\n return lead_sheets, list(stats.values()) + melody_stats + chord_stats\n", "id": "143568", "language": "Python", "matching_score": 1.252226710319519, "max_stars_count": 1, "path": "magenta/pipelines/lead_sheet_pipelines.py" }, { "content": "\"\"\"\nConverts a pretty midi sequence to a boket plot\n\"\"\"\nimport argparse\nimport ast\nimport math\nimport os\nimport sys\nfrom typing import List\nfrom typing import Tuple\n\nimport bokeh\nimport bokeh.plotting\nfrom bokeh.colors.groups import purple as colors\nfrom bokeh.embed import file_html\nfrom bokeh.io import output_file, output_notebook\nfrom bokeh.io import save\nfrom bokeh.io import show\nfrom bokeh.layouts import column\nfrom bokeh.models import BoxAnnotation\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.models import Label\nfrom bokeh.models import Range1d\nfrom bokeh.models import Title\nfrom bokeh.models.callbacks import CustomJS\nfrom bokeh.models.widgets.buttons import Button\nfrom bokeh.resources import CDN\nfrom pretty_midi import PrettyMIDI\nfrom pretty_midi import TimeSignature\n\nfrom .presets import Coloring\nfrom .presets import Preset\n\n\nclass Plotter:\n \"\"\"\n Plotter class with plot size, time scaling and live reload\n configuration.\n \"\"\"\n\n _MAX_PITCH = 127\n _MIN_PITCH = 0\n\n def __init__(self,\n preset: Preset = None,\n qpm: float = None,\n plot_pitch_range_start: int = None,\n plot_pitch_range_stop: int = None,\n plot_bar_range_start: int = None,\n plot_bar_range_stop: int = None,\n plot_max_length_bar: int = 8,\n bar_fill_alphas: List[float] = None,\n coloring: Coloring = Coloring.PITCH,\n show_velocity: bool = False,\n midi_time_signature: str = None,\n live_reload: bool = False):\n if not preset:\n preset = Preset()\n if not bar_fill_alphas:\n bar_fill_alphas = [0.25, 0.05]\n self._preset = preset\n self._qpm = qpm\n self._plot_pitch_range_start = plot_pitch_range_start\n self._plot_pitch_range_stop = plot_pitch_range_stop\n self._plot_bar_range_start = plot_bar_range_start\n self._plot_bar_range_stop = plot_bar_range_stop\n self._plot_max_length_bar = plot_max_length_bar\n self._bar_fill_alphas = bar_fill_alphas\n self._coloring = coloring\n self._show_velocity = show_velocity\n self._midi_time_signature = midi_time_signature\n self._live_reload = live_reload\n self._show_counter = 0\n\n def _get_qpm(self, pm: PrettyMIDI):\n \"\"\"\n Returns the first tempo change that is not zero, raises exception\n if not found or multiple tempo present.\n \"\"\"\n if self._qpm:\n return self._qpm\n qpm = None\n for tempo_change in pm.get_tempo_changes():\n if tempo_change.min() and tempo_change.max() and tempo_change.min() == tempo_change.max():\n if qpm:\n raise Exception(\"Multiple tempo changes are not supported \"\n + str(pm.get_tempo_changes()))\n qpm = tempo_change.min()\n if not qpm:\n raise Exception(\"Unknown qpm in: \"\n + str(pm.get_tempo_changes()))\n return qpm\n\n def _get_color(self, index_instrument, note):\n \"\"\"\n Returns the color for the instrument and the note, depends\n on self._coloring.\n \"\"\"\n if self._coloring is Coloring.PITCH:\n color_index = (note.pitch - 36) % len(colors)\n elif self._coloring is Coloring.INSTRUMENT:\n color_index = ((index_instrument + 1) * 5) % len(colors)\n else:\n raise Exception(\"Unknown coloring: \" + str(self._coloring))\n color = colors[color_index]\n color = color.lighten(0.1)\n return color\n\n def plot(self, pm: PrettyMIDI):\n \"\"\"\n Plots the pretty midi object as a plot object.\n\n :param pm: the PrettyMIDI instance to plot\n :return: the bokeh plot layout\n \"\"\"\n preset = self._preset\n\n # Calculates the QPM from the MIDI file, might raise exception if confused\n qpm = self._get_qpm(pm)\n\n # Initialize the tools, those are present on the right hand side\n plot = bokeh.plotting.figure(\n tools=\"reset,hover,save,wheel_zoom,pan\",\n toolbar_location=preset.toolbar_location)\n\n # Setup the hover and the data dict for bokeh,\n # each property must match a property in the data dict\n plot.select(dict(type=bokeh.models.HoverTool)).tooltips = ({\n \"program\": \"@program\",\n \"pitch\": \"@top\",\n \"velocity\": \"@velocity\",\n \"duration\": \"@duration\",\n \"start_time\": \"@left\",\n \"end_time\": \"@right\"})\n data = dict(\n program=[],\n top=[],\n bottom=[],\n left=[],\n right=[],\n duration=[],\n velocity=[],\n color=[])\n\n # Puts the notes in the dict for bokeh and saves first\n # and last note time, bigger and smaller pitch\n pitch_min = None\n pitch_max = None\n first_note_start = None\n last_note_end = None\n index_instrument = 0\n for instrument in pm.instruments:\n for note in instrument.notes:\n pitch_min = min(pitch_min or self._MAX_PITCH, note.pitch)\n pitch_max = max(pitch_max or self._MIN_PITCH, note.pitch)\n color = self._get_color(index_instrument, note)\n note_start = note.start\n note_end = note.start + (note.end - note.start)\n data[\"program\"].append(instrument.program)\n data[\"top\"].append(note.pitch)\n if self._show_velocity:\n data[\"bottom\"].append(note.pitch + (note.velocity / 127))\n else:\n data[\"bottom\"].append(note.pitch + 1)\n data[\"left\"].append(note_start)\n data[\"right\"].append(note_end)\n data[\"duration\"].append(note_end - note_start)\n data[\"velocity\"].append(note.velocity)\n data[\"color\"].append(color)\n first_note_start = min(first_note_start or sys.maxsize, note_start)\n last_note_end = max(last_note_end or 0, note_end)\n index_instrument = index_instrument + 1\n\n # Shows an empty plot even if there are no notes\n if first_note_start is None or last_note_end is None or pitch_min is None or pitch_max is None:\n pitch_min = self._MIN_PITCH\n pitch_max = pitch_min + 5\n first_note_start = 0\n last_note_end = 0\n\n # Gets the pitch range (min, max) from either the provided arguments\n # or min and max values from the notes\n if self._plot_pitch_range_start is not None:\n pitch_min = self._plot_pitch_range_start\n else:\n pitch_min = min(self._MAX_PITCH, pitch_min)\n if self._plot_pitch_range_stop is not None:\n pitch_max = self._plot_pitch_range_stop\n else:\n pitch_max = max(self._MIN_PITCH, pitch_max)\n\n pitch_range = pitch_max + 1 - pitch_min\n\n # Draws the rectangles on the plot from the data\n source = ColumnDataSource(data=data)\n plot.quad(left=\"left\",\n right=\"right\",\n top=\"top\",\n bottom=\"bottom\",\n line_alpha=1,\n line_color=\"black\",\n color=\"color\",\n source=source)\n\n # Draws the y grid by hand, because the grid has label on the ticks, but\n # for a plot like this, the labels needs to fit in between the ticks.\n # Also useful to change the background of the grid each line\n for pitch in range(pitch_min, pitch_max + 1):\n # Draws the background box and contours, on the underlay layer, so\n # that the rectangles and over the box annotations\n fill_alpha = (0.15 if pitch % 2 == 0 else 0.00)\n box = BoxAnnotation(bottom=pitch,\n top=pitch + 1,\n fill_color=\"gray\",\n fill_alpha=fill_alpha,\n line_color=\"black\",\n line_alpha=0.3,\n line_width=1,\n level=\"underlay\")\n plot.add_layout(box)\n label = Label(\n x=preset.label_y_axis_offset_x,\n y=pitch + preset.label_y_axis_offset_y,\n x_units=\"screen\",\n text=str(pitch),\n render_mode=\"css\",\n text_font_size=preset.label_text_font_size,\n text_font_style=preset.label_text_font_style)\n plot.add_layout(label)\n\n # Gets the time signature from pretty midi, or 4/4 if none\n if self._midi_time_signature:\n numerator, denominator = self._midi_time_signature.split(\"/\")\n time_signature = TimeSignature(int(numerator), int(denominator), 0)\n else:\n if pm.time_signature_changes:\n if len(pm.time_signature_changes) > 1:\n raise Exception(\"Multiple time signatures are not supported\")\n time_signature = pm.time_signature_changes[0]\n else:\n time_signature = TimeSignature(4, 4, 0)\n\n # Gets seconds per bar and seconds per beat\n if len(pm.get_beats()) >= 2:\n seconds_per_beat = pm.get_beats()[1] - pm.get_beats()[0]\n else:\n seconds_per_beat = 0.5\n if len(pm.get_downbeats()) >= 2:\n seconds_per_bar = pm.get_downbeats()[1] - pm.get_downbeats()[0]\n else:\n seconds_per_bar = 2.0\n\n # Defines the end time of the plot in seconds\n if self._plot_bar_range_stop is not None:\n plot_end_time = self._plot_bar_range_stop * seconds_per_bar\n else:\n # Calculates the plot start and end time, the start time can start after\n # notes or truncate notes if the plot is too long (we left truncate the\n # plot with the bounds)\n # The plot start and plot end are a multiple of seconds per bar (closest\n # smaller value for the start time, closest higher value for the end time)\n plot_end_time = int((last_note_end) / seconds_per_bar) * seconds_per_bar\n # If the last note end is exactly on a multiple of seconds per bar,\n # we don't start a new one\n is_on_bar = math.isclose(last_note_end % seconds_per_bar, seconds_per_bar)\n is_on_bar_exact = math.isclose(last_note_end % seconds_per_bar, 0.0)\n if not is_on_bar and not is_on_bar_exact:\n plot_end_time += seconds_per_bar\n\n # Defines the start time of the plot in seconds\n if self._plot_bar_range_start is not None:\n plot_start_time = self._plot_bar_range_start * seconds_per_bar\n else:\n start_time = int(first_note_start / seconds_per_bar) * seconds_per_bar\n plot_max_length_time = self._plot_max_length_bar * seconds_per_bar\n plot_start_time = max(plot_end_time - plot_max_length_time, start_time)\n\n # Draws the vertical bar grid, with a different background color\n # for each bar\n if preset.show_bar:\n bar_count = 0\n for bar_time in pm.get_downbeats():\n fill_alpha_index = bar_count % len(self._bar_fill_alphas)\n fill_alpha = self._bar_fill_alphas[fill_alpha_index]\n box = BoxAnnotation(left=bar_time,\n right=bar_time + seconds_per_bar,\n fill_color=\"gray\",\n fill_alpha=fill_alpha,\n line_color=\"black\",\n line_width=2,\n line_alpha=0.5,\n level=\"underlay\")\n plot.add_layout(box)\n bar_count += 1\n\n # Draws the vertical beat grid, those are only grid lines\n if preset.show_beat:\n for beat_time in pm.get_beats():\n box = BoxAnnotation(left=beat_time,\n right=beat_time + seconds_per_beat,\n fill_color=None,\n line_color=\"black\",\n line_width=1,\n line_alpha=0.4,\n level=\"underlay\")\n plot.add_layout(box)\n\n # Configure x axis\n plot.xaxis.bounds = (plot_start_time, plot_end_time)\n plot.xaxis.axis_label = \"time (SEC)\"\n plot.xaxis.axis_label_text_font_size = preset.axis_label_text_font_size\n plot.xaxis.ticker = bokeh.models.SingleIntervalTicker(interval=1)\n plot.xaxis.major_tick_line_alpha = 0.9\n plot.xaxis.major_tick_line_width = 1\n plot.xaxis.major_tick_out = preset.axis_x_major_tick_out\n plot.xaxis.minor_tick_line_alpha = 0\n plot.xaxis.major_label_text_font_size = preset.label_text_font_size\n plot.xaxis.major_label_text_font_style = preset.label_text_font_style\n\n # Configure y axis\n plot.yaxis.bounds = (pitch_min, pitch_max + 1)\n plot.yaxis.axis_label = \"pitch (MIDI)\"\n plot.yaxis.axis_label_text_font_size = preset.axis_label_text_font_size\n plot.yaxis.ticker = bokeh.models.SingleIntervalTicker(interval=1)\n plot.yaxis.major_label_text_alpha = 0\n plot.yaxis.major_tick_line_alpha = 0.9\n plot.yaxis.major_tick_line_width = 1\n plot.yaxis.major_tick_out = preset.axis_y_major_tick_out\n plot.yaxis.minor_tick_line_alpha = 0\n plot.yaxis.axis_label_standoff = preset.axis_y_label_standoff\n plot.outline_line_width = 1\n plot.outline_line_alpha = 1\n plot.outline_line_color = \"black\"\n\n # The x grid is deactivated because is draw by hand (see x grid code)\n plot.xgrid.grid_line_color = None\n\n # The y grid is deactivated because is draw by hand (see y grid code)\n plot.ygrid.grid_line_color = None\n\n # Configure the plot size and range\n plot_title_text = \"Visual MIDI (%s QPM, %s/%s)\" % (\n str(int(qpm)), time_signature.numerator, time_signature.denominator)\n plot.title = Title(text=plot_title_text,\n text_font_size=preset.title_text_font_size)\n plot.plot_width = preset.plot_width\n if preset.row_height:\n plot.plot_height = pitch_range * preset.row_height\n else:\n plot.plot_height = preset.plot_height\n plot.x_range = Range1d(plot_start_time, plot_end_time)\n plot.y_range = Range1d(pitch_min, pitch_max + 1)\n plot.min_border_right = 50\n\n if self._live_reload and preset.stop_live_reload_button:\n callback = CustomJS(code=\"clearInterval(liveReloadInterval)\")\n button = Button(label=\"stop live reload\")\n button.js_on_click(callback)\n layout = column(button, plot)\n else:\n layout = column(plot)\n\n return layout\n\n def save(self, pm: PrettyMIDI, filepath: str):\n \"\"\"\n Saves the pretty midi object as a plot file (html) in the provided file. If\n the live reload option is activated, the opened page will periodically\n refresh.\n\n :param pm: the PrettyMIDI instance to plot\n :param filepath: the file path to save the resulting plot to\n :return: the bokeh plot layout\n \"\"\"\n plot = self.plot(pm)\n if self._live_reload:\n html = file_html(plot, CDN)\n html = html.replace(\"</head>\", \"\"\"\n <script type=\"text/javascript\">\n var liveReloadInterval = window.setInterval(function(){\n location.reload();\n }, 2000);\n </script>\n </head>\"\"\")\n with open(filepath, 'w') as file:\n file.write(html)\n else:\n output_file(filepath)\n save(plot)\n return plot\n\n def show(self, pm: PrettyMIDI, filepath: str):\n \"\"\"\n Shows the pretty midi object as a plot file (html) in the browser. If\n the live reload option is activated, the opened page will periodically\n refresh.\n\n :param pm: the PrettyMIDI instance to plot\n :param filepath: the file path to save the resulting plot to\n :return: the bokeh plot layout\n \"\"\"\n plot = self.plot(pm)\n if self._live_reload:\n html = file_html(plot, CDN)\n html = html.replace(\"</head>\", \"\"\"\n <script type=\"text/javascript\">\n var liveReloadInterval = window.setInterval(function(){\n location.reload();\n }, 2000);\n </script>\n </head>\"\"\")\n with open(filepath, 'w') as file:\n file.write(html)\n if self._show_counter == 0:\n import webbrowser\n webbrowser.open(\"file://\" + os.path.realpath(filepath), new=2)\n else:\n output_file(filepath)\n show(plot)\n self._show_counter += 1\n return plot\n\n def show_notebook(self, pm: PrettyMIDI):\n \"\"\"\n Shows the pretty midi object as a plot file in the notebook.\n\n :param pm: the PrettyMIDI instance to plot\n :return: the bokeh plot layout\n \"\"\"\n plot = self.plot(pm)\n output_notebook()\n show(plot)\n return plot\n\n\ndef console_entry_point():\n flags_plot = [\n (\"qpm\", int),\n (\"plot_pitch_range_start\", int),\n (\"plot_pitch_range_stop\", int),\n (\"plot_bar_range_start\", int),\n (\"plot_bar_range_stop\", int),\n (\"plot_max_length_bar\", int),\n (\"bar_fill_alphas\", str, ast.literal_eval),\n (\"coloring\", str, Coloring.from_name),\n (\"show_velocity\", str, ast.literal_eval),\n (\"midi_time_signature\", str),\n (\"live_reload\", str, ast.literal_eval),\n ]\n flags_preset = [\n (\"plot_width\", int),\n (\"plot_height\", int),\n (\"row_height\", int),\n (\"show_bar\", str, ast.literal_eval),\n (\"show_beat\", str, ast.literal_eval),\n (\"title_text_font_size\", str),\n (\"axis_label_text_font_size\", str),\n (\"axis_x_major_tick_out\", int),\n (\"axis_y_major_tick_out\", int),\n (\"label_y_axis_offset_x\", float),\n (\"label_y_axis_offset_y\", float),\n (\"axis_y_label_standoff\", int),\n (\"label_text_font_size\", str),\n (\"label_text_font_style\", str),\n (\"toolbar_location\", str),\n (\"stop_live_reload_button\", str, ast.literal_eval),\n ]\n parser = argparse.ArgumentParser()\n [parser.add_argument(\"--\" + flag[0], type=flag[1]) for flag in flags_plot]\n [parser.add_argument(\"--\" + flag[0], type=flag[1]) for flag in flags_preset]\n parser.add_argument(\"files\", type=str, nargs='+')\n args = parser.parse_args()\n\n def _eval_parser_arg(flag: Tuple):\n value = None if getattr(args, flag[0]) == \"None\" else getattr(args, flag[0])\n if not value:\n return None\n if len(flag) == 3:\n try:\n return flag[2](value)\n except ValueError:\n raise Exception(\"Cannot transform flag '\" + str(flag[0])\n + \"' of type '\" + str(flag[1])\n + \"' with value '\" + str(value) + \"'\")\n return value\n\n kwargs_preset = {flag[0]: _eval_parser_arg(flag)\n for flag in flags_preset\n if getattr(args, flag[0], None)}\n preset = Preset(**kwargs_preset)\n\n kwargs_plotter = {flag[0]: _eval_parser_arg(flag)\n for flag in flags_plot\n if getattr(args, flag[0], None)}\n plotter = Plotter(preset=preset, **kwargs_plotter)\n\n for midi_file in args.files:\n plot_file = midi_file.replace(\".mid\", \".html\")\n print(\"Plotting midi file \" + midi_file + \" to \" + plot_file)\n pretty_midi = PrettyMIDI(midi_file)\n plotter.save(pretty_midi, plot_file)\n sys.exit(0)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "id": "9487691", "language": "Python", "matching_score": 4.572938442230225, "max_stars_count": 32, "path": "visual_midi/visual_midi.py" }, { "content": "from pretty_midi import PrettyMIDI\nfrom visual_midi import Plotter\nfrom visual_midi import Preset\n\n# Loading a file on disk using PrettyMidi\npm = PrettyMIDI(\"./example-01.mid\")\n\n# Plot the result using VisualMidi, modifying\n# the presentation with Preset\npreset = Preset(\n plot_width=1200,\n plot_height=500,\n show_beat=False,\n axis_label_text_font_size=\"14px\",\n label_text_font_size=\"12px\",\n toolbar_location=None,\n)\nplotter = Plotter(\n preset=preset,\n plot_max_length_bar=4,\n)\nplotter.show(pm, \"/tmp/example-02.html\")\n", "id": "10737641", "language": "Python", "matching_score": 2.783947467803955, "max_stars_count": 5, "path": "conferences/python-typing/code/example-02.py" }, { "content": "from pretty_midi import PrettyMIDI\nfrom visual_midi import Plotter\n\n# Loading a file on disk using PrettyMidi,\n# and show some information\npm = PrettyMIDI(\"./example-01.mid\")\nprint(pm.instruments)\nprint(pm.instruments[0].notes)\nprint(pm.instruments[0].notes[0])\nprint(pm.get_tempo_changes())\n\n# Plot the result using VisualMidi\nplotter = Plotter()\nplotter.show(pm, \"/tmp/example-01.html\")", "id": "6920646", "language": "Python", "matching_score": 1.2306514978408813, "max_stars_count": 5, "path": "conferences/python-typing/code/example-01.py" }, { "content": "from note_seq import midi_to_note_sequence\nfrom note_seq.midi_io import note_sequence_to_pretty_midi\nfrom note_seq.protobuf import music_pb2\nfrom visual_midi import Plotter\n\nsequence = music_pb2.NoteSequence()\n\n# Add the notes and configuration to the sequence.\nsequence.notes.add(pitch=60, start_time=0.0, end_time=0.5, velocity=80)\nsequence.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=80)\n...\nsequence.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)\nsequence.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)\nsequence.total_time = 8\nsequence.tempos.add(qpm=60)\n\n# Convert \"note-seq\" -> \"pretty-midi\"\npretty_midi = note_sequence_to_pretty_midi(sequence)\nprint(pretty_midi)\n\n# Convert \"pretty-midi\" -> \"note-seq\"\nsequence = midi_to_note_sequence(pretty_midi)\nprint(sequence)\n\n# Convert \"pretty-midi\" -> \"visual-midi\"\nplotter = Plotter()\nplotter.show(pretty_midi, \"/tmp/example-04.html\")\n", "id": "1583774", "language": "Python", "matching_score": 2.6169912815093994, "max_stars_count": 5, "path": "conferences/python-typing/code/example-04.py" }, { "content": "from note_seq.protobuf import music_pb2\n\nsequence = music_pb2.NoteSequence()\n\n# Add the notes and configuration to the sequence.\nsequence.notes.add(pitch=60, start_time=0.0, end_time=0.5, velocity=80)\nsequence.notes.add(pitch=60, start_time=0.5, end_time=1.0, velocity=80)\n...\nsequence.notes.add(pitch=62, start_time=6.5, end_time=7.0, velocity=80)\nsequence.notes.add(pitch=60, start_time=7.0, end_time=8.0, velocity=80)\nsequence.total_time = 8\nsequence.tempos.add(qpm=60)\n\nprint(sequence)\n", "id": "7522885", "language": "Python", "matching_score": 1.020415186882019, "max_stars_count": 5, "path": "conferences/python-typing/code/example-03.py" }, { "content": "import os\n\nimport magenta.music as mm\nfrom magenta.models.drums_rnn import drums_rnn_sequence_generator\nfrom magenta.models.melody_rnn import melody_rnn_sequence_generator\nfrom magenta.music import sequences_lib as ss\nfrom magenta.protobuf import generator_pb2\nfrom magenta.protobuf import music_pb2\nfrom magenta.protobuf.music_pb2 import NoteSequence\n\n\ndef reset(sequence: NoteSequence,\n loop_start_time: float,\n loop_end_time: float,\n seconds_per_loop: float):\n sequence = music_pb2.NoteSequence()\n sequence = loop(sequence,\n loop_start_time,\n loop_end_time,\n seconds_per_loop)\n return sequence\n\n\ndef loop(sequence: NoteSequence,\n loop_start_time: float,\n loop_end_time: float,\n seconds_per_loop: float):\n sequence = ss.trim_note_sequence(sequence,\n loop_start_time,\n loop_end_time)\n sequence = ss.shift_sequence_times(sequence,\n seconds_per_loop)\n return sequence\n\n\ndef generate(sequence: NoteSequence,\n name: str,\n bundle_filename: str,\n config_name: str,\n generation_start_time: float,\n generation_end_time: float):\n generator_options = generator_pb2.GeneratorOptions()\n generator_options.args['temperature'].float_value = 1\n generator_options.generate_sections.add(\n start_time=generation_start_time,\n end_time=generation_end_time)\n sequence_generator = get_sequence_generator(name,\n bundle_filename,\n config_name)\n sequence = sequence_generator.generate(sequence,\n generator_options)\n sequence = ss.trim_note_sequence(sequence,\n generation_start_time,\n generation_end_time)\n return sequence\n\n\ndef get_sequence_generator(name: str,\n bundle_filename: str,\n config_name: str):\n if name == \"drums\":\n generator = drums_rnn_sequence_generator\n elif name == \"melody\":\n generator = melody_rnn_sequence_generator\n else:\n raise Exception(f\"Unknown sequence generator {name}\")\n\n mm.notebook_utils.download_bundle(bundle_filename, \"bundles\")\n bundle = mm.sequence_generator_bundle.read_bundle_file(\n os.path.join(\"bundles\", bundle_filename))\n\n generator_map = generator.get_generator_map()\n sequence_generator = generator_map[config_name](\n checkpoint=None, bundle=bundle)\n sequence_generator.initialize()\n\n return sequence_generator\n", "id": "1577932", "language": "Python", "matching_score": 1.9941104650497437, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/sequences.py" }, { "content": "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Polyphonic RNN model.\"\"\"\n\nimport magenta\nfrom magenta.models.polyphony_rnn import polyphony_encoder_decoder\nfrom magenta.models.shared import events_rnn_model\nfrom magenta.music.protobuf import generator_pb2\n\nfrom tensorflow.contrib import training as contrib_training\n\n\nclass PolyphonyRnnModel(events_rnn_model.EventSequenceRnnModel):\n \"\"\"Class for RNN polyphonic sequence generation models.\"\"\"\n\n def generate_polyphonic_sequence(\n self, num_steps, primer_sequence, temperature=1.0, beam_size=1,\n branch_factor=1, steps_per_iteration=1, modify_events_callback=None):\n \"\"\"Generate a polyphonic track from a primer polyphonic track.\n\n Args:\n num_steps: The integer length in steps of the final track, after\n generation. Includes the primer.\n primer_sequence: The primer sequence, a PolyphonicSequence object.\n temperature: A float specifying how much to divide the logits by\n before computing the softmax. Greater than 1.0 makes tracks more\n random, less than 1.0 makes tracks less random.\n beam_size: An integer, beam size to use when generating tracks via\n beam search.\n branch_factor: An integer, beam search branch factor to use.\n steps_per_iteration: An integer, number of steps to take per beam search\n iteration.\n modify_events_callback: An optional callback for modifying the event list.\n Can be used to inject events rather than having them generated. If not\n None, will be called with 3 arguments after every event: the current\n EventSequenceEncoderDecoder, a list of current EventSequences, and a\n list of current encoded event inputs.\n Returns:\n The generated PolyphonicSequence object (which begins with the provided\n primer track).\n \"\"\"\n return self._generate_events(num_steps, primer_sequence, temperature,\n beam_size, branch_factor, steps_per_iteration,\n modify_events_callback=modify_events_callback)\n\n def polyphonic_sequence_log_likelihood(self, sequence):\n \"\"\"Evaluate the log likelihood of a polyphonic sequence.\n\n Args:\n sequence: The PolyphonicSequence object for which to evaluate the log\n likelihood.\n\n Returns:\n The log likelihood of `sequence` under this model.\n \"\"\"\n return self._evaluate_log_likelihood([sequence])[0]\n\n\ndefault_configs = {\n 'polyphony': events_rnn_model.EventSequenceRnnConfig(\n generator_pb2.GeneratorDetails(\n id='polyphony',\n description='Polyphonic RNN'),\n magenta.music.OneHotEventSequenceEncoderDecoder(\n polyphony_encoder_decoder.PolyphonyOneHotEncoding()),\n contrib_training.HParams(\n batch_size=64,\n rnn_layer_sizes=[256, 256, 256],\n dropout_keep_prob=0.5,\n clip_norm=5,\n learning_rate=0.001)),\n}\n", "id": "64359", "language": "Python", "matching_score": 2.295386791229248, "max_stars_count": 1, "path": "magenta/models/polyphony_rnn/polyphony_model.py" }, { "content": "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utility functions for working with polyphonic sequences.\"\"\"\n\nfrom __future__ import division\n\nimport collections\nimport copy\n\nfrom magenta.music import constants\nfrom magenta.music import events_lib\nfrom magenta.music import sequences_lib\nfrom magenta.music.protobuf import music_pb2\nfrom magenta.pipelines import statistics\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nDEFAULT_STEPS_PER_QUARTER = constants.DEFAULT_STEPS_PER_QUARTER\nMAX_MIDI_PITCH = constants.MAX_MIDI_PITCH\nMIN_MIDI_PITCH = constants.MIN_MIDI_PITCH\nSTANDARD_PPQ = constants.STANDARD_PPQ\n\n\nclass PolyphonicEvent(object):\n \"\"\"Class for storing events in a polyphonic sequence.\"\"\"\n\n # Beginning of the sequence.\n START = 0\n # End of the sequence.\n END = 1\n # End of a step within the sequence.\n STEP_END = 2\n # Start of a new note.\n NEW_NOTE = 3\n # Continuation of a note.\n CONTINUED_NOTE = 4\n\n def __init__(self, event_type, pitch):\n if not (PolyphonicEvent.START <= event_type <=\n PolyphonicEvent.CONTINUED_NOTE):\n raise ValueError('Invalid event type: %s' % event_type)\n if not (pitch is None or MIN_MIDI_PITCH <= pitch <= MAX_MIDI_PITCH):\n raise ValueError('Invalid pitch: %s' % pitch)\n\n self.event_type = event_type\n self.pitch = pitch\n\n def __repr__(self):\n return 'PolyphonicEvent(%r, %r)' % (self.event_type, self.pitch)\n\n def __eq__(self, other):\n if not isinstance(other, PolyphonicEvent):\n return False\n return (self.event_type == other.event_type and\n self.pitch == other.pitch)\n\n\nclass PolyphonicSequence(events_lib.EventSequence):\n \"\"\"Stores a polyphonic sequence as a stream of single-note events.\n\n Events are PolyphonicEvent tuples that encode event type and pitch.\n \"\"\"\n\n def __init__(self, quantized_sequence=None, steps_per_quarter=None,\n start_step=0):\n \"\"\"Construct a PolyphonicSequence.\n\n Either quantized_sequence or steps_per_quarter should be supplied.\n\n Args:\n quantized_sequence: a quantized NoteSequence proto.\n steps_per_quarter: how many steps a quarter note represents.\n start_step: The offset of this sequence relative to the\n beginning of the source sequence. If a quantized sequence is used as\n input, only notes starting after this step will be considered.\n \"\"\"\n assert (quantized_sequence, steps_per_quarter).count(None) == 1\n\n if quantized_sequence:\n sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)\n self._events = self._from_quantized_sequence(quantized_sequence,\n start_step)\n self._steps_per_quarter = (\n quantized_sequence.quantization_info.steps_per_quarter)\n else:\n self._events = [\n PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)]\n self._steps_per_quarter = steps_per_quarter\n\n self._start_step = start_step\n\n @property\n def start_step(self):\n return self._start_step\n\n @property\n def steps_per_quarter(self):\n return self._steps_per_quarter\n\n def trim_trailing_end_events(self):\n \"\"\"Removes the trailing END event if present.\n\n Should be called before using a sequence to prime generation.\n \"\"\"\n while self._events[-1].event_type == PolyphonicEvent.END:\n del self._events[-1]\n\n def _append_silence_steps(self, num_steps):\n \"\"\"Adds steps of silence to the end of the sequence.\"\"\"\n for _ in range(num_steps):\n self._events.append(\n PolyphonicEvent(event_type=PolyphonicEvent.STEP_END, pitch=None))\n\n def _trim_steps(self, num_steps):\n \"\"\"Trims a given number of steps from the end of the sequence.\"\"\"\n steps_trimmed = 0\n for i in reversed(range(len(self._events))):\n if self._events[i].event_type == PolyphonicEvent.STEP_END:\n if steps_trimmed == num_steps:\n del self._events[i + 1:]\n break\n steps_trimmed += 1\n elif i == 0:\n self._events = [\n PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)]\n break\n\n def set_length(self, steps, from_left=False):\n \"\"\"Sets the length of the sequence to the specified number of steps.\n\n If the event sequence is not long enough, pads with silence to make the\n sequence the specified length. If it is too long, it will be truncated to\n the requested length.\n\n Note that this will append a STEP_END event to the end of the sequence if\n there is an unfinished step.\n\n Args:\n steps: How many quantized steps long the event sequence should be.\n from_left: Whether to add/remove from the left instead of right.\n \"\"\"\n if from_left:\n raise NotImplementedError('from_left is not supported')\n\n # First remove any trailing end events.\n self.trim_trailing_end_events()\n # Then add an end step event, to close out any incomplete steps.\n self._events.append(\n PolyphonicEvent(event_type=PolyphonicEvent.STEP_END, pitch=None))\n # Then trim or pad as needed.\n if self.num_steps < steps:\n self._append_silence_steps(steps - self.num_steps)\n elif self.num_steps > steps:\n self._trim_steps(self.num_steps - steps)\n # Then add a trailing end event.\n self._events.append(\n PolyphonicEvent(event_type=PolyphonicEvent.END, pitch=None))\n assert self.num_steps == steps\n\n def append(self, event):\n \"\"\"Appends the event to the end of the sequence.\n\n Args:\n event: The polyphonic event to append to the end.\n Raises:\n ValueError: If `event` is not a valid polyphonic event.\n \"\"\"\n if not isinstance(event, PolyphonicEvent):\n raise ValueError('Invalid polyphonic event: %s' % event)\n self._events.append(event)\n\n def __len__(self):\n \"\"\"How many events are in this sequence.\n\n Returns:\n Number of events as an integer.\n \"\"\"\n return len(self._events)\n\n def __getitem__(self, i):\n \"\"\"Returns the event at the given index.\"\"\"\n return self._events[i]\n\n def __iter__(self):\n \"\"\"Return an iterator over the events in this sequence.\"\"\"\n return iter(self._events)\n\n def __str__(self):\n strs = []\n for event in self:\n if event.event_type == PolyphonicEvent.START:\n strs.append('START')\n elif event.event_type == PolyphonicEvent.END:\n strs.append('END')\n elif event.event_type == PolyphonicEvent.STEP_END:\n strs.append('|||')\n elif event.event_type == PolyphonicEvent.NEW_NOTE:\n strs.append('(%s, NEW)' % event.pitch)\n elif event.event_type == PolyphonicEvent.CONTINUED_NOTE:\n strs.append('(%s, CONTINUED)' % event.pitch)\n else:\n raise ValueError('Unknown event type: %s' % event.event_type)\n return '\\n'.join(strs)\n\n @property\n def end_step(self):\n return self.start_step + self.num_steps\n\n @property\n def num_steps(self):\n \"\"\"Returns how many steps long this sequence is.\n\n Does not count incomplete steps (i.e., steps that do not have a terminating\n STEP_END event).\n\n Returns:\n Length of the sequence in quantized steps.\n \"\"\"\n steps = 0\n for event in self:\n if event.event_type == PolyphonicEvent.STEP_END:\n steps += 1\n return steps\n\n @property\n def steps(self):\n \"\"\"Return a Python list of the time step at each event in this sequence.\"\"\"\n step = self.start_step\n result = []\n for event in self:\n result.append(step)\n if event.event_type == PolyphonicEvent.STEP_END:\n step += 1\n return result\n\n @staticmethod\n def _from_quantized_sequence(quantized_sequence, start_step=0):\n \"\"\"Populate self with events from the given quantized NoteSequence object.\n\n Sequences start with START.\n\n Within a step, new pitches are started with NEW_NOTE and existing\n pitches are continued with CONTINUED_NOTE. A step is ended with\n STEP_END. If an active pitch is not continued, it is considered to\n have ended.\n\n Sequences end with END.\n\n Args:\n quantized_sequence: A quantized NoteSequence instance.\n start_step: Start converting the sequence at this time step.\n Assumed to be the beginning of a bar.\n\n Returns:\n A list of events.\n \"\"\"\n pitch_start_steps = collections.defaultdict(list)\n pitch_end_steps = collections.defaultdict(list)\n\n for note in quantized_sequence.notes:\n if note.quantized_start_step < start_step:\n continue\n pitch_start_steps[note.quantized_start_step].append(note.pitch)\n pitch_end_steps[note.quantized_end_step].append(note.pitch)\n\n events = [PolyphonicEvent(event_type=PolyphonicEvent.START, pitch=None)]\n\n # Use a list rather than a set because one pitch may be active multiple\n # times.\n active_pitches = []\n for step in range(start_step,\n quantized_sequence.total_quantized_steps):\n step_events = []\n\n for pitch in pitch_end_steps[step]:\n active_pitches.remove(pitch)\n\n for pitch in active_pitches:\n step_events.append(\n PolyphonicEvent(event_type=PolyphonicEvent.CONTINUED_NOTE,\n pitch=pitch))\n\n for pitch in pitch_start_steps[step]:\n active_pitches.append(pitch)\n step_events.append(PolyphonicEvent(event_type=PolyphonicEvent.NEW_NOTE,\n pitch=pitch))\n\n events.extend(sorted(step_events, key=lambda e: e.pitch, reverse=True))\n events.append(\n PolyphonicEvent(event_type=PolyphonicEvent.STEP_END, pitch=None))\n events.append(PolyphonicEvent(event_type=PolyphonicEvent.END, pitch=None))\n\n return events\n\n def to_sequence(self,\n velocity=100,\n instrument=0,\n program=0,\n qpm=constants.DEFAULT_QUARTERS_PER_MINUTE,\n base_note_sequence=None):\n \"\"\"Converts the PolyphonicSequence to NoteSequence proto.\n\n Assumes that the sequences ends with a STEP_END followed by an END event. To\n ensure this is true, call set_length before calling this method.\n\n Args:\n velocity: Midi velocity to give each note. Between 1 and 127 (inclusive).\n instrument: Midi instrument to give each note.\n program: Midi program to give each note.\n qpm: Quarter notes per minute (float).\n base_note_sequence: A NoteSequence to use a starting point. Must match the\n specified qpm.\n\n Raises:\n ValueError: if an unknown event is encountered.\n\n Returns:\n A NoteSequence proto.\n \"\"\"\n seconds_per_step = 60.0 / qpm / self._steps_per_quarter\n\n sequence_start_time = self.start_step * seconds_per_step\n\n if base_note_sequence:\n sequence = copy.deepcopy(base_note_sequence)\n if sequence.tempos[0].qpm != qpm:\n raise ValueError(\n 'Supplied QPM (%d) does not match QPM of base_note_sequence (%d)'\n % (qpm, sequence.tempos[0].qpm))\n else:\n sequence = music_pb2.NoteSequence()\n sequence.tempos.add().qpm = qpm\n sequence.ticks_per_quarter = STANDARD_PPQ\n\n step = 0\n # Use lists rather than sets because one pitch may be active multiple times.\n pitch_start_steps = []\n pitches_to_end = []\n for i, event in enumerate(self):\n if event.event_type == PolyphonicEvent.START:\n if i != 0:\n tf.logging.debug(\n 'Ignoring START marker not at beginning of sequence at position '\n '%d' % i)\n elif event.event_type == PolyphonicEvent.END and i < len(self) - 1:\n tf.logging.debug(\n 'Ignoring END maker before end of sequence at position %d' % i)\n elif event.event_type == PolyphonicEvent.NEW_NOTE:\n pitch_start_steps.append((event.pitch, step))\n elif event.event_type == PolyphonicEvent.CONTINUED_NOTE:\n try:\n pitches_to_end.remove(event.pitch)\n except ValueError:\n tf.logging.debug(\n 'Attempted to continue pitch %s at step %s, but pitch was not '\n 'active. Ignoring.' % (event.pitch, step))\n elif (event.event_type == PolyphonicEvent.STEP_END or\n event.event_type == PolyphonicEvent.END):\n # Find active pitches that should end. Create notes for them, based on\n # when they started.\n # Make a copy of pitch_start_steps so we can remove things from it while\n # iterating.\n for pitch_start_step in list(pitch_start_steps):\n if pitch_start_step[0] in pitches_to_end:\n pitches_to_end.remove(pitch_start_step[0])\n pitch_start_steps.remove(pitch_start_step)\n\n note = sequence.notes.add()\n note.start_time = (pitch_start_step[1] * seconds_per_step +\n sequence_start_time)\n note.end_time = step * seconds_per_step + sequence_start_time\n note.pitch = pitch_start_step[0]\n note.velocity = velocity\n note.instrument = instrument\n note.program = program\n\n assert not pitches_to_end\n\n # Increment the step counter.\n step += 1\n\n # All active pitches are eligible for ending unless continued.\n pitches_to_end = [ps[0] for ps in pitch_start_steps]\n else:\n raise ValueError('Unknown event type: %s' % event.event_type)\n\n if pitch_start_steps:\n raise ValueError(\n 'Sequence ended, but not all pitches were ended. This likely means '\n 'the sequence was missing a STEP_END event before the end of the '\n 'sequence. To ensure a well-formed sequence, call set_length first.')\n\n sequence.total_time = seconds_per_step * (step - 1) + sequence_start_time\n if sequence.notes:\n assert sequence.total_time >= sequence.notes[-1].end_time\n\n return sequence\n\n\ndef extract_polyphonic_sequences(\n quantized_sequence, start_step=0, min_steps_discard=None,\n max_steps_discard=None):\n \"\"\"Extracts a polyphonic track from the given quantized NoteSequence.\n\n Currently, this extracts only one polyphonic sequence from a given track.\n\n Args:\n quantized_sequence: A quantized NoteSequence.\n start_step: Start extracting a sequence at this time step. Assumed\n to be the beginning of a bar.\n min_steps_discard: Minimum length of tracks in steps. Shorter tracks are\n discarded.\n max_steps_discard: Maximum length of tracks in steps. Longer tracks are\n discarded.\n\n Returns:\n poly_seqs: A python list of PolyphonicSequence instances.\n stats: A dictionary mapping string names to `statistics.Statistic` objects.\n \"\"\"\n sequences_lib.assert_is_relative_quantized_sequence(quantized_sequence)\n\n stats = dict((stat_name, statistics.Counter(stat_name)) for stat_name in\n ['polyphonic_tracks_discarded_too_short',\n 'polyphonic_tracks_discarded_too_long',\n 'polyphonic_tracks_discarded_more_than_1_program'])\n\n steps_per_bar = sequences_lib.steps_per_bar_in_quantized_sequence(\n quantized_sequence)\n\n # Create a histogram measuring lengths (in bars not steps).\n stats['polyphonic_track_lengths_in_bars'] = statistics.Histogram(\n 'polyphonic_track_lengths_in_bars',\n [0, 1, 10, 20, 30, 40, 50, 100, 200, 500, 1000])\n\n # Allow only 1 program.\n programs = set()\n for note in quantized_sequence.notes:\n programs.add(note.program)\n if len(programs) > 1:\n stats['polyphonic_tracks_discarded_more_than_1_program'].increment()\n return [], stats.values()\n\n # Translate the quantized sequence into a PolyphonicSequence.\n poly_seq = PolyphonicSequence(quantized_sequence,\n start_step=start_step)\n\n poly_seqs = []\n num_steps = poly_seq.num_steps\n\n if min_steps_discard is not None and num_steps < min_steps_discard:\n stats['polyphonic_tracks_discarded_too_short'].increment()\n elif max_steps_discard is not None and num_steps > max_steps_discard:\n stats['polyphonic_tracks_discarded_too_long'].increment()\n else:\n poly_seqs.append(poly_seq)\n stats['polyphonic_track_lengths_in_bars'].increment(\n num_steps // steps_per_bar)\n\n return poly_seqs, stats.values()\n", "id": "3998435", "language": "Python", "matching_score": 3.019728660583496, "max_stars_count": 1, "path": "magenta/models/polyphony_rnn/polyphony_lib.py" }, { "content": "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Parser for ABC files.\n\nhttp://abcnotation.com/wiki/abc:standard:v2.1\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport fractions\nimport re\n\nfrom magenta.music import constants\nfrom magenta.music.protobuf import music_pb2\nimport six\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\n\nFraction = fractions.Fraction\n\n\nclass ABCParseError(Exception):\n \"\"\"Exception thrown when ABC contents cannot be parsed.\"\"\"\n pass\n\n\nclass MultiVoiceError(ABCParseError):\n \"\"\"Exception when a multi-voice directive is encountered.\"\"\"\n\n\nclass RepeatParseError(ABCParseError):\n \"\"\"Exception when a repeat directive could not be parsed.\"\"\"\n\n\nclass VariantEndingError(ABCParseError):\n \"\"\"Variant endings are not yet supported.\"\"\"\n\n\nclass PartError(ABCParseError):\n \"\"\"ABC Parts are not yet supported.\"\"\"\n\n\nclass InvalidCharacterError(ABCParseError):\n \"\"\"Invalid character.\"\"\"\n\n\nclass ChordError(ABCParseError):\n \"\"\"Chords are not supported.\"\"\"\n\n\nclass DuplicateReferenceNumberError(ABCParseError):\n \"\"\"Found duplicate reference numbers.\"\"\"\n\n\nclass TupletError(ABCParseError):\n \"\"\"Tuplets are not supported.\"\"\"\n\n\ndef parse_abc_tunebook_file(filename):\n \"\"\"Parse an ABC Tunebook file.\n\n Args:\n filename: File path to an ABC tunebook.\n\n Returns:\n tunes: A dictionary of reference number to NoteSequence of parsed ABC tunes.\n exceptions: A list of exceptions for tunes that could not be parsed.\n\n Raises:\n DuplicateReferenceNumberError: If the same reference number appears more\n than once in the tunebook.\n \"\"\"\n # 'r' mode will decode the file as utf-8 in py3.\n return parse_abc_tunebook(tf.gfile.Open(filename, 'r').read())\n\n\ndef parse_abc_tunebook(tunebook):\n \"\"\"Parse an ABC Tunebook string.\n\n Args:\n tunebook: The ABC tunebook as a string.\n\n Returns:\n tunes: A dictionary of reference number to NoteSequence of parsed ABC tunes.\n exceptions: A list of exceptions for tunes that could not be parsed.\n\n Raises:\n DuplicateReferenceNumberError: If the same reference number appears more\n than once in the tunebook.\n \"\"\"\n # Split tunebook into sections based on empty lines.\n sections = []\n current_lines = []\n for line in tunebook.splitlines():\n line = line.strip()\n if not line:\n if current_lines:\n sections.append(current_lines)\n current_lines = []\n else:\n current_lines.append(line)\n if current_lines:\n sections.append(current_lines)\n\n # If there are multiple sections, the first one may be a header.\n # The first section is a header if it does not contain an X information field.\n header = []\n if len(sections) > 1 and not any(\n [line.startswith('X:') for line in sections[0]]):\n header = sections.pop(0)\n\n tunes = {}\n exceptions = []\n\n for tune in sections:\n try:\n # The header sets default values for each tune, so prepend it to every\n # tune that is being parsed.\n abc_tune = ABCTune(header + tune)\n except ABCParseError as e:\n exceptions.append(e)\n else:\n ns = abc_tune.note_sequence\n if ns.reference_number in tunes:\n raise DuplicateReferenceNumberError(\n 'ABC Reference number {} appears more than once in this '\n 'tunebook'.format(ns.reference_number))\n tunes[ns.reference_number] = ns\n\n return tunes, exceptions\n\n\nclass ABCTune(object):\n \"\"\"Class for parsing an individual ABC tune.\"\"\"\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#decorations\n DECORATION_TO_VELOCITY = {\n '!pppp!': 30,\n '!ppp!': 30,\n '!pp!': 45,\n '!p!': 60,\n '!mp!': 75,\n '!mf!': 90,\n '!f!': 105,\n '!ff!': 120,\n '!fff!': 127,\n '!ffff!': 127,\n }\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#pitch\n ABC_NOTE_TO_MIDI = {\n 'C': 60,\n 'D': 62,\n 'E': 64,\n 'F': 65,\n 'G': 67,\n 'A': 69,\n 'B': 71,\n 'c': 72,\n 'd': 74,\n 'e': 76,\n 'f': 77,\n 'g': 79,\n 'a': 81,\n 'b': 83,\n }\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#kkey\n SIG_TO_KEYS = {\n 7: ['C#', 'A#m', 'G#Mix', 'D#Dor', 'E#Phr', 'F#Lyd', 'B#Loc'],\n 6: ['F#', 'D#m', 'C#Mix', 'G#Dor', 'A#Phr', 'BLyd', 'E#Loc'],\n 5: ['B', 'G#m', 'F#Mix', 'C#Dor', 'D#Phr', 'ELyd', 'A#Loc'],\n 4: ['E', 'C#m', 'BMix', 'F#Dor', 'G#Phr', 'ALyd', 'D#Loc'],\n 3: ['A', 'F#m', 'EMix', 'BDor', 'C#Phr', 'DLyd', 'G#Loc'],\n 2: ['D', 'Bm', 'AMix', 'EDor', 'F#Phr', 'GLyd', 'C#Loc'],\n 1: ['G', 'Em', 'DMix', 'ADor', 'BPhr', 'CLyd', 'F#Loc'],\n 0: ['C', 'Am', 'GMix', 'DDor', 'EPhr', 'FLyd', 'BLoc'],\n -1: ['F', 'Dm', 'CMix', 'GDor', 'APhr', 'BbLyd', 'ELoc'],\n -2: ['Bb', 'Gm', 'FMix', 'CDor', 'DPhr', 'EbLyd', 'ALoc'],\n -3: ['Eb', 'Cm', 'BbMix', 'FDor', 'GPhr', 'AbLyd', 'DLoc'],\n -4: ['Ab', 'Fm', 'EbMix', 'BbDor', 'CPhr', 'DbLyd', 'GLoc'],\n -5: ['Db', 'Bbm', 'AbMix', 'EbDor', 'FPhr', 'GbLyd', 'CLoc'],\n -6: ['Gb', 'Ebm', 'DbMix', 'AbDor', 'BbPhr', 'CbLyd', 'FLoc'],\n -7: ['Cb', 'Abm', 'GbMix', 'DbDor', 'EbPhr', 'FbLyd', 'BbLoc'],\n }\n\n KEY_TO_SIG = {}\n for sig, keys in six.iteritems(SIG_TO_KEYS):\n for key in keys:\n KEY_TO_SIG[key.lower()] = sig\n\n KEY_TO_PROTO_KEY = {\n 'c': music_pb2.NoteSequence.KeySignature.C,\n 'c#': music_pb2.NoteSequence.KeySignature.C_SHARP,\n 'db': music_pb2.NoteSequence.KeySignature.D_FLAT,\n 'd': music_pb2.NoteSequence.KeySignature.D,\n 'd#': music_pb2.NoteSequence.KeySignature.D_SHARP,\n 'eb': music_pb2.NoteSequence.KeySignature.E_FLAT,\n 'e': music_pb2.NoteSequence.KeySignature.E,\n 'f': music_pb2.NoteSequence.KeySignature.F,\n 'f#': music_pb2.NoteSequence.KeySignature.F_SHARP,\n 'gb': music_pb2.NoteSequence.KeySignature.G_FLAT,\n 'g': music_pb2.NoteSequence.KeySignature.G,\n 'g#': music_pb2.NoteSequence.KeySignature.G_SHARP,\n 'ab': music_pb2.NoteSequence.KeySignature.A_FLAT,\n 'a': music_pb2.NoteSequence.KeySignature.A,\n 'a#': music_pb2.NoteSequence.KeySignature.A_SHARP,\n 'bb': music_pb2.NoteSequence.KeySignature.B_FLAT,\n 'b': music_pb2.NoteSequence.KeySignature.B,\n }\n\n SHARPS_ORDER = 'FCGDAEB'\n FLATS_ORDER = 'BEADGCF'\n\n INFORMATION_FIELD_PATTERN = re.compile(r'([A-Za-z]):\\s*(.*)')\n\n def __init__(self, tune_lines):\n self._ns = music_pb2.NoteSequence()\n # Standard ABC fields.\n self._ns.source_info.source_type = (\n music_pb2.NoteSequence.SourceInfo.SCORE_BASED)\n self._ns.source_info.encoding_type = (\n music_pb2.NoteSequence.SourceInfo.ABC)\n self._ns.source_info.parser = (\n music_pb2.NoteSequence.SourceInfo.MAGENTA_ABC)\n self._ns.ticks_per_quarter = constants.STANDARD_PPQ\n\n self._current_time = 0\n self._accidentals = ABCTune._sig_to_accidentals(0)\n self._bar_accidentals = {}\n self._current_unit_note_length = None\n self._current_expected_repeats = None\n\n # Default dynamic should be !mf! as per:\n # http://abcnotation.com/wiki/abc:standard:v2.1#decorations\n self._current_velocity = ABCTune.DECORATION_TO_VELOCITY['!mf!']\n\n self._in_header = True\n self._header_tempo_unit = None\n self._header_tempo_rate = None\n for line in tune_lines:\n line = re.sub('%.*$', '', line) # Strip comments.\n line = line.strip() # Strip whitespace.\n if not line:\n continue\n\n # If the lines begins with a letter and a colon, it's an information\n # field. Extract it.\n info_field_match = ABCTune.INFORMATION_FIELD_PATTERN.match(line)\n if info_field_match:\n self._parse_information_field(\n info_field_match.group(1), info_field_match.group(2))\n else:\n if self._in_header:\n self._set_values_from_header()\n self._in_header = False\n self._parse_music_code(line)\n if self._in_header:\n self._set_values_from_header()\n\n self._finalize()\n\n if self._ns.notes:\n self._ns.total_time = self._ns.notes[-1].end_time\n\n @property\n def note_sequence(self):\n return self._ns\n\n @staticmethod\n def _sig_to_accidentals(sig):\n accidentals = {pitch: 0 for pitch in 'ABCDEFG'}\n if sig > 0:\n for i in range(sig):\n accidentals[ABCTune.SHARPS_ORDER[i]] = 1\n elif sig < 0:\n for i in range(abs(sig)):\n accidentals[ABCTune.FLATS_ORDER[i]] = -1\n return accidentals\n\n @property\n def _qpm(self):\n \"\"\"Returns the current QPM.\"\"\"\n if self._ns.tempos:\n return self._ns.tempos[-1].qpm\n else:\n # No QPM has been specified, so will use the default one.\n return constants.DEFAULT_QUARTERS_PER_MINUTE\n\n def _set_values_from_header(self):\n # Set unit note length. May depend on the current meter, so this has to be\n # calculated at the end of the header.\n self._set_unit_note_length_from_header()\n\n # Set the tempo if it was specified in the header. May depend on current\n # unit note length, so has to be calculated after that is set.\n # _header_tempo_unit may be legitimately None, so check _header_tempo_rate.\n if self._header_tempo_rate:\n self._add_tempo(self._header_tempo_unit, self._header_tempo_rate)\n\n def _set_unit_note_length_from_header(self):\n \"\"\"Sets the current unit note length.\n\n Should be called immediately after parsing the header.\n\n Raises:\n ABCParseError: If multiple time signatures were set in the header.\n \"\"\"\n # http://abcnotation.com/wiki/abc:standard:v2.1#lunit_note_length\n\n if self._current_unit_note_length:\n # If it has been set explicitly, leave it as is.\n pass\n elif not self._ns.time_signatures:\n # For free meter, the default unit note length is 1/8.\n self._current_unit_note_length = Fraction(1, 8)\n else:\n # Otherwise, base it on the current meter.\n if len(self._ns.time_signatures) != 1:\n raise ABCParseError('Multiple time signatures set in header.')\n current_ts = self._ns.time_signatures[0]\n ratio = current_ts.numerator / current_ts.denominator\n if ratio < 0.75:\n self._current_unit_note_length = Fraction(1, 16)\n else:\n self._current_unit_note_length = Fraction(1, 8)\n\n def _add_tempo(self, tempo_unit, tempo_rate):\n if tempo_unit is None:\n tempo_unit = self._current_unit_note_length\n\n tempo = self._ns.tempos.add()\n tempo.time = self._current_time\n tempo.qpm = float((tempo_unit / Fraction(1, 4)) * tempo_rate)\n\n def _add_section(self, time):\n \"\"\"Adds a new section to the NoteSequence.\n\n If the most recently added section is for the same time, a new section will\n not be created.\n\n Args:\n time: The time at which to create the new section.\n\n Returns:\n The id of the newly created section, or None if no new section was\n created.\n \"\"\"\n if not self._ns.section_annotations and time > 0:\n # We're in a piece with sections, need to add a section marker at the\n # beginning of the piece if there isn't one there already.\n sa = self._ns.section_annotations.add()\n sa.time = 0\n sa.section_id = 0\n\n if self._ns.section_annotations:\n if self._ns.section_annotations[-1].time == time:\n tf.logging.debug('Ignoring duplicate section at time {}'.format(time))\n return None\n new_id = self._ns.section_annotations[-1].section_id + 1\n else:\n new_id = 0\n\n sa = self._ns.section_annotations.add()\n sa.time = time\n sa.section_id = new_id\n return new_id\n\n def _finalize(self):\n \"\"\"Do final cleanup. To be called at the end of the tune.\"\"\"\n self._finalize_repeats()\n self._finalize_sections()\n\n def _finalize_repeats(self):\n \"\"\"Handle any pending repeats.\"\"\"\n # If we're still expecting a repeat at the end of the tune, that's an error\n # in the file.\n if self._current_expected_repeats:\n raise RepeatParseError(\n 'Expected a repeat at the end of the file, but did not get one.')\n\n def _finalize_sections(self):\n \"\"\"Handle any pending sections.\"\"\"\n # If a new section was started at the very end of the piece, delete it\n # because it will contain no notes and is meaningless.\n # This happens if the last line in the piece ends with a :| symbol. A new\n # section is set up to handle upcoming notes, but then the end of the piece\n # is reached.\n if (self._ns.section_annotations and\n self._ns.section_annotations[-1].time == self._ns.notes[-1].end_time):\n del self._ns.section_annotations[-1]\n\n # Make sure the final section annotation is referenced in a section group.\n # If it hasn't been referenced yet, it just needs to be played once.\n # This checks that the final section_annotation is referenced in the final\n # section_group.\n # At this point, all of our section_groups have only 1 section, so this\n # logic will need to be updated when we support parts and start creating\n # more complex section_groups.\n if (self._ns.section_annotations and self._ns.section_groups and\n self._ns.section_groups[-1].sections[0].section_id !=\n self._ns.section_annotations[-1].section_id):\n sg = self._ns.section_groups.add()\n sg.sections.add(\n section_id=self._ns.section_annotations[-1].section_id)\n sg.num_times = 1\n\n def _apply_broken_rhythm(self, broken_rhythm):\n \"\"\"Applies a broken rhythm symbol to the two most recently added notes.\"\"\"\n # http://abcnotation.com/wiki/abc:standard:v2.1#broken_rhythm\n\n if len(self._ns.notes) < 2:\n raise ABCParseError(\n 'Cannot apply a broken rhythm with fewer than 2 notes')\n\n note1 = self._ns.notes[-2]\n note2 = self._ns.notes[-1]\n note1_len = note1.end_time - note1.start_time\n note2_len = note2.end_time - note2.start_time\n if note1_len != note2_len:\n raise ABCParseError(\n 'Cannot apply broken rhythm to two notes of different lengths')\n\n time_adj = note1_len / (2 ** len(broken_rhythm))\n if broken_rhythm[0] == '<':\n note1.end_time -= time_adj\n note2.start_time -= time_adj\n elif broken_rhythm[0] == '>':\n note1.end_time += time_adj\n note2.start_time += time_adj\n else:\n raise ABCParseError('Could not parse broken rhythm token: {}'.format(\n broken_rhythm))\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#pitch\n NOTE_PATTERN = re.compile(\n r'(__|_|=|\\^|\\^\\^)?([A-Ga-g])([\\',]*)(\\d*/*\\d*)')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#chords_and_unisons\n CHORD_PATTERN = re.compile(r'\\[(' + NOTE_PATTERN.pattern + r')+\\]')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#broken_rhythm\n BROKEN_RHYTHM_PATTERN = re.compile(r'(<+|>+)')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#use_of_fields_within_the_tune_body\n INLINE_INFORMATION_FIELD_PATTERN = re.compile(r'\\[([A-Za-z]):\\s*([^\\]]+)\\]')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#repeat_bar_symbols\n\n # Pattern for matching variant endings with an associated bar symbol.\n BAR_AND_VARIANT_ENDINGS_PATTERN = re.compile(r'(:*)[\\[\\]|]+\\s*([0-9,-]+)')\n # Pattern for matching repeat symbols with an associated bar symbol.\n BAR_AND_REPEAT_SYMBOLS_PATTERN = re.compile(r'(:*)([\\[\\]|]+)(:*)')\n # Pattern for matching repeat symbols without an associated bar symbol.\n REPEAT_SYMBOLS_PATTERN = re.compile(r'(:+)')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#chord_symbols\n # http://abcnotation.com/wiki/abc:standard:v2.1#annotations\n TEXT_ANNOTATION_PATTERN = re.compile(r'\"([^\"]*)\"')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#decorations\n DECORATION_PATTERN = re.compile(r'[.~HLMOPSTuv]')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#ties_and_slurs\n # Either an opening parenthesis (not followed by a digit, since that indicates\n # a tuplet) or a closing parenthesis.\n SLUR_PATTERN = re.compile(r'\\((?!\\d)|\\)')\n TIE_PATTERN = re.compile(r'-')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#duplets_triplets_quadruplets_etc\n TUPLET_PATTERN = re.compile(r'\\(\\d')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#typesetting_line-breaks\n LINE_CONTINUATION_PATTERN = re.compile(r'\\\\$')\n\n def _parse_music_code(self, line):\n \"\"\"Parse the music code within an ABC file.\"\"\"\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#the_tune_body\n pos = 0\n broken_rhythm = None\n while pos < len(line):\n match = None\n for regex in [\n ABCTune.NOTE_PATTERN,\n ABCTune.CHORD_PATTERN,\n ABCTune.BROKEN_RHYTHM_PATTERN,\n ABCTune.INLINE_INFORMATION_FIELD_PATTERN,\n ABCTune.BAR_AND_VARIANT_ENDINGS_PATTERN,\n ABCTune.BAR_AND_REPEAT_SYMBOLS_PATTERN,\n ABCTune.REPEAT_SYMBOLS_PATTERN,\n ABCTune.TEXT_ANNOTATION_PATTERN,\n ABCTune.DECORATION_PATTERN,\n ABCTune.SLUR_PATTERN,\n ABCTune.TIE_PATTERN,\n ABCTune.TUPLET_PATTERN,\n ABCTune.LINE_CONTINUATION_PATTERN]:\n match = regex.match(line, pos)\n if match:\n break\n\n if not match:\n if not line[pos].isspace():\n raise InvalidCharacterError(\n 'Unexpected character: [{}]'.format(line[pos].encode('utf-8')))\n pos += 1\n continue\n\n pos = match.end()\n if match.re == ABCTune.NOTE_PATTERN:\n note = self._ns.notes.add()\n note.velocity = self._current_velocity\n note.start_time = self._current_time\n\n note.pitch = ABCTune.ABC_NOTE_TO_MIDI[match.group(2)]\n note_name = match.group(2).upper()\n\n # Accidentals\n if match.group(1):\n pitch_change = 0\n for accidental in match.group(1).split():\n if accidental == '^':\n pitch_change += 1\n elif accidental == '_':\n pitch_change -= 1\n elif accidental == '=':\n pass\n else:\n raise ABCParseError(\n 'Invalid accidental: {}'.format(accidental))\n note.pitch += pitch_change\n self._bar_accidentals[note_name] = pitch_change\n elif note_name in self._bar_accidentals:\n note.pitch += self._bar_accidentals[note_name]\n else:\n # No accidentals, so modify according to current key.\n note.pitch += self._accidentals[note_name]\n\n # Octaves\n if match.group(3):\n for octave in match.group(3):\n if octave == '\\'':\n note.pitch += 12\n elif octave == ',':\n note.pitch -= 12\n else:\n raise ABCParseError('Invalid octave: {}'.format(octave))\n\n if (note.pitch < constants.MIN_MIDI_PITCH or\n note.pitch > constants.MAX_MIDI_PITCH):\n raise ABCParseError('pitch {} is invalid'.format(note.pitch))\n\n # Note length\n length = self._current_unit_note_length\n # http://abcnotation.com/wiki/abc:standard:v2.1#note_lengths\n if match.group(4):\n slash_count = match.group(4).count('/')\n if slash_count == len(match.group(4)):\n # Handle A// shorthand case.\n length /= 2 ** slash_count\n elif match.group(4).startswith('/'):\n length /= int(match.group(4)[1:])\n elif slash_count == 1:\n fraction = match.group(4).split('/', 1)\n # If no denominator is specified (e.g., \"3/\"), default to 2.\n if not fraction[1]:\n fraction[1] = 2\n length *= Fraction(int(fraction[0]), int(fraction[1]))\n elif slash_count == 0:\n length *= int(match.group(4))\n else:\n raise ABCParseError(\n 'Could not parse note length: {}'.format(match.group(4)))\n\n # Advance clock based on note length.\n self._current_time += (1 / (self._qpm / 60)) * (length / Fraction(1, 4))\n\n note.end_time = self._current_time\n\n if broken_rhythm:\n self._apply_broken_rhythm(broken_rhythm)\n broken_rhythm = None\n elif match.re == ABCTune.CHORD_PATTERN:\n raise ChordError('Chords are not supported.')\n elif match.re == ABCTune.BROKEN_RHYTHM_PATTERN:\n if broken_rhythm:\n raise ABCParseError(\n 'Cannot specify a broken rhythm twice in a row.')\n broken_rhythm = match.group(1)\n elif match.re == ABCTune.INLINE_INFORMATION_FIELD_PATTERN:\n self._parse_information_field(match.group(1), match.group(2))\n elif match.re == ABCTune.BAR_AND_VARIANT_ENDINGS_PATTERN:\n raise VariantEndingError(\n 'Variant ending {} is not supported.'.format(match.group(0)))\n elif (match.re == ABCTune.BAR_AND_REPEAT_SYMBOLS_PATTERN or\n match.re == ABCTune.REPEAT_SYMBOLS_PATTERN):\n if match.re == ABCTune.REPEAT_SYMBOLS_PATTERN:\n colon_count = len(match.group(1))\n if colon_count % 2 != 0:\n raise RepeatParseError(\n 'Colon-only repeats must be divisible by 2: {}'.format(\n match.group(1)))\n backward_repeats = forward_repeats = int((colon_count / 2) + 1)\n elif match.re == ABCTune.BAR_AND_REPEAT_SYMBOLS_PATTERN:\n # We're in a new bar, so clear the bar-wise accidentals.\n self._bar_accidentals.clear()\n\n is_repeat = ':' in match.group(1) or match.group(3)\n if not is_repeat:\n if len(match.group(2)) >= 2:\n # This is a double bar that isn't a repeat.\n if not self._current_expected_repeats and self._current_time > 0:\n # There was no previous forward repeat symbol.\n # Add a new section so that if there is a backward repeat later\n # on, it will repeat to this bar.\n new_section_id = self._add_section(self._current_time)\n if new_section_id is not None:\n sg = self._ns.section_groups.add()\n sg.sections.add(\n section_id=self._ns.section_annotations[-2].section_id)\n sg.num_times = 1\n\n # If this isn't a repeat, no additional work to do.\n continue\n\n # Count colons on either side.\n if match.group(1):\n backward_repeats = len(match.group(1)) + 1\n else:\n backward_repeats = None\n\n if match.group(3):\n forward_repeats = len(match.group(3)) + 1\n else:\n forward_repeats = None\n else:\n raise ABCParseError('Unexpected regex. Should not happen.')\n\n if (self._current_expected_repeats and\n backward_repeats != self._current_expected_repeats):\n raise RepeatParseError(\n 'Mismatched forward/backward repeat symbols. '\n 'Expected {} but got {}.'.format(\n self._current_expected_repeats, backward_repeats))\n\n # A repeat implies the start of a new section, so make one.\n new_section_id = self._add_section(self._current_time)\n\n if backward_repeats:\n if self._current_time == 0:\n raise RepeatParseError(\n 'Cannot have a backward repeat at time 0')\n sg = self._ns.section_groups.add()\n sg.sections.add(\n section_id=self._ns.section_annotations[-2].section_id)\n sg.num_times = backward_repeats\n elif self._current_time > 0 and new_section_id is not None:\n # There were not backward repeats, but we still want to play the\n # previous section once.\n # If new_section_id is None (implying that a section at the current\n # time was created elsewhere), this is not needed because it should\n # have been done when the section was created.\n sg = self._ns.section_groups.add()\n sg.sections.add(\n section_id=self._ns.section_annotations[-2].section_id)\n sg.num_times = 1\n\n self._current_expected_repeats = forward_repeats\n elif match.re == ABCTune.TEXT_ANNOTATION_PATTERN:\n # Text annotation\n # http://abcnotation.com/wiki/abc:standard:v2.1#chord_symbols\n # http://abcnotation.com/wiki/abc:standard:v2.1#annotations\n annotation = match.group(1)\n ta = self._ns.text_annotations.add()\n ta.time = self._current_time\n ta.text = annotation\n if annotation and annotation[0] in ABCTune.ABC_NOTE_TO_MIDI:\n # http://abcnotation.com/wiki/abc:standard:v2.1#chord_symbols\n ta.annotation_type = (\n music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL)\n else:\n ta.annotation_type = (\n music_pb2.NoteSequence.TextAnnotation.UNKNOWN)\n elif match.re == ABCTune.DECORATION_PATTERN:\n # http://abcnotation.com/wiki/abc:standard:v2.1#decorations\n # We don't currently do anything with decorations.\n pass\n elif match.re == ABCTune.SLUR_PATTERN:\n # http://abcnotation.com/wiki/abc:standard:v2.1#ties_and_slurs\n # We don't currently do anything with slurs.\n pass\n elif match.re == ABCTune.TIE_PATTERN:\n # http://abcnotation.com/wiki/abc:standard:v2.1#ties_and_slurs\n # We don't currently do anything with ties.\n # TODO(fjord): Ideally, we would extend the duration of the previous\n # note to include the duration of the next note.\n pass\n elif match.re == ABCTune.TUPLET_PATTERN:\n raise TupletError('Tuplets are not supported.')\n elif match.re == ABCTune.LINE_CONTINUATION_PATTERN:\n # http://abcnotation.com/wiki/abc:standard:v2.1#typesetting_line-breaks\n # Line continuations are only for typesetting, so we can ignore them.\n pass\n else:\n raise ABCParseError('Unknown regex match!')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#kkey\n KEY_PATTERN = re.compile(\n r'([A-G])\\s*([#b]?)\\s*'\n r'((?:(?:maj|ion|min|aeo|mix|dor|phr|lyd|loc|m)[^ ]*)?)',\n re.IGNORECASE)\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#kkey\n KEY_ACCIDENTALS_PATTERN = re.compile(r'(__|_|=|\\^|\\^\\^)?([A-Ga-g])')\n\n @staticmethod\n def parse_key(key):\n \"\"\"Parse an ABC key string.\"\"\"\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#kkey\n key_match = ABCTune.KEY_PATTERN.match(key)\n if not key_match:\n raise ABCParseError('Could not parse key: {}'.format(key))\n\n key_components = list(key_match.groups())\n\n # Shorten the mode to be at most 3 letters long.\n mode = key_components[2][:3].lower()\n\n # \"Minor\" and \"Aeolian\" are special cases that are abbreviated to 'm'.\n # \"Major\" and \"Ionian\" are special cases that are abbreviated to ''.\n if mode in ('min', 'aeo'):\n mode = 'm'\n elif mode in ('maj', 'ion'):\n mode = ''\n\n sig = ABCTune.KEY_TO_SIG[''.join(key_components[0:2] + [mode]).lower()]\n\n proto_key = ABCTune.KEY_TO_PROTO_KEY[''.join(key_components[0:2]).lower()]\n\n if mode == '': # pylint: disable=g-explicit-bool-comparison\n proto_mode = music_pb2.NoteSequence.KeySignature.MAJOR\n elif mode == 'm':\n proto_mode = music_pb2.NoteSequence.KeySignature.MINOR\n elif mode == 'mix':\n proto_mode = music_pb2.NoteSequence.KeySignature.MIXOLYDIAN\n elif mode == 'dor':\n proto_mode = music_pb2.NoteSequence.KeySignature.DORIAN\n elif mode == 'phr':\n proto_mode = music_pb2.NoteSequence.KeySignature.PHRYGIAN\n elif mode == 'lyd':\n proto_mode = music_pb2.NoteSequence.KeySignature.LYDIAN\n elif mode == 'loc':\n proto_mode = music_pb2.NoteSequence.KeySignature.LOCRIAN\n else:\n raise ABCParseError('Unknown mode: {}'.format(mode))\n\n # Match the rest of the string for possible modifications.\n pos = key_match.end()\n exppos = key[pos:].find('exp')\n if exppos != -1:\n # Only explicit accidentals will be used.\n accidentals = ABCTune._sig_to_accidentals(0)\n pos += exppos + 3\n else:\n accidentals = ABCTune._sig_to_accidentals(sig)\n\n while pos < len(key):\n note_match = ABCTune.KEY_ACCIDENTALS_PATTERN.match(key, pos)\n if note_match:\n pos += len(note_match.group(0))\n\n note = note_match.group(2).upper()\n if note_match.group(1):\n if note_match.group(1) == '^':\n accidentals[note] = 1\n elif note_match.group(1) == '_':\n accidentals[note] = -1\n elif note_match.group(1) == '=':\n accidentals[note] = 0\n else:\n raise ABCParseError(\n 'Invalid accidental: {}'.format(note_match.group(1)))\n else:\n pos += 1\n\n return accidentals, proto_key, proto_mode\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#outdated_information_field_syntax\n # This syntax is deprecated but must still be supported.\n TEMPO_DEPRECATED_PATTERN = re.compile(r'C?\\s*=?\\s*(\\d+)$')\n\n # http://abcnotation.com/wiki/abc:standard:v2.1#qtempo\n TEMPO_PATTERN = re.compile(r'(?:\"[^\"]*\")?\\s*((?:\\d+/\\d+\\s*)+)\\s*=\\s*(\\d+)')\n TEMPO_PATTERN_STRING_ONLY = re.compile(r'\"([^\"]*)\"$')\n\n def _parse_information_field(self, field_name, field_content):\n \"\"\"Parses information field.\"\"\"\n # http://abcnotation.com/wiki/abc:standard:v2.1#information_fields\n if field_name == 'A':\n pass\n elif field_name == 'B':\n pass\n elif field_name == 'C':\n # Composer\n # http://abcnotation.com/wiki/abc:standard:v2.1#ccomposer\n self._ns.sequence_metadata.composers.append(field_content)\n\n # The first composer will be set as the primary artist.\n if not self._ns.sequence_metadata.artist:\n self._ns.sequence_metadata.artist = field_content\n elif field_name == 'D':\n pass\n elif field_name == 'F':\n pass\n elif field_name == 'G':\n pass\n elif field_name == 'H':\n pass\n elif field_name == 'I':\n pass\n elif field_name == 'K':\n # Key\n # http://abcnotation.com/wiki/abc:standard:v2.1#kkey\n accidentals, proto_key, proto_mode = ABCTune.parse_key(field_content)\n self._accidentals = accidentals\n ks = self._ns.key_signatures.add()\n ks.key = proto_key\n ks.mode = proto_mode\n ks.time = self._current_time\n elif field_name == 'L':\n # Unit note length\n # http://abcnotation.com/wiki/abc:standard:v2.1#lunit_note_length\n length = field_content.split('/', 1)\n\n # Handle the case of L:1 being equivalent to L:1/1\n if len(length) < 2:\n length.append('1')\n\n try:\n numerator = int(length[0])\n denominator = int(length[1])\n except ValueError as e:\n raise ABCParseError(\n e, 'Could not parse unit note length: {}'.format(field_content))\n\n self._current_unit_note_length = Fraction(numerator, denominator)\n elif field_name == 'M':\n # Meter\n # http://abcnotation.com/wiki/abc:standard:v2.1#mmeter\n if field_content.upper() == 'C':\n ts = self._ns.time_signatures.add()\n ts.numerator = 4\n ts.denominator = 4\n ts.time = self._current_time\n elif field_content.upper() == 'C|':\n ts = self._ns.time_signatures.add()\n ts.numerator = 2\n ts.denominator = 2\n ts.time = self._current_time\n elif field_content.lower() == 'none':\n pass\n else:\n timesig = field_content.split('/', 1)\n if len(timesig) != 2:\n raise ABCParseError(\n 'Could not parse meter: {}'.format(field_content))\n\n ts = self._ns.time_signatures.add()\n ts.time = self._current_time\n try:\n ts.numerator = int(timesig[0])\n ts.denominator = int(timesig[1])\n except ValueError as e:\n raise ABCParseError(\n e, 'Could not parse meter: {}'.format(field_content))\n elif field_name == 'm':\n pass\n elif field_name == 'N':\n pass\n elif field_name == 'O':\n pass\n elif field_name == 'P':\n # TODO(fjord): implement part parsing.\n raise PartError('ABC parts are not yet supported.')\n elif field_name == 'Q':\n # Tempo\n # http://abcnotation.com/wiki/abc:standard:v2.1#qtempo\n\n tempo_match = ABCTune.TEMPO_PATTERN.match(field_content)\n deprecated_tempo_match = ABCTune.TEMPO_DEPRECATED_PATTERN.match(\n field_content)\n tempo_string_only_match = ABCTune.TEMPO_PATTERN_STRING_ONLY.match(\n field_content)\n if tempo_match:\n tempo_rate = int(tempo_match.group(2))\n tempo_unit = Fraction(0)\n for beat in tempo_match.group(1).split():\n tempo_unit += Fraction(beat)\n elif deprecated_tempo_match:\n # http://abcnotation.com/wiki/abc:standard:v2.1#outdated_information_field_syntax\n # In the deprecated syntax, the tempo is interpreted based on the unit\n # note length, which is potentially dependent on the current meter.\n # Set tempo_unit to None for now, and the current unit note length will\n # be filled in later.\n tempo_unit = None\n tempo_rate = int(deprecated_tempo_match.group(1))\n elif tempo_string_only_match:\n tf.logging.warning(\n 'Ignoring string-only tempo marking: {}'.format(field_content))\n return\n else:\n raise ABCParseError(\n 'Could not parse tempo: {}'.format(field_content))\n\n if self._in_header:\n # If we're in the header, save these until we've finished parsing the\n # header. The deprecated syntax relies on the unit note length and\n # meter, which may not be set yet. At the end of the header, we'll fill\n # in the necessary information and add these.\n self._header_tempo_unit = tempo_unit\n self._header_tempo_rate = tempo_rate\n else:\n self._add_tempo(tempo_unit, tempo_rate)\n elif field_name == 'R':\n pass\n elif field_name == 'r':\n pass\n elif field_name == 'S':\n pass\n elif field_name == 's':\n pass\n elif field_name == 'T':\n # Title\n # http://abcnotation.com/wiki/abc:standard:v2.1#ttune_title\n\n if not self._in_header:\n # TODO(fjord): Non-header titles are used to name parts of tunes, but\n # NoteSequence doesn't currently have any place to put that information.\n tf.logging.warning(\n 'Ignoring non-header title: {}'.format(field_content))\n return\n\n # If there are multiple titles, separate them with semicolons.\n if self._ns.sequence_metadata.title:\n self._ns.sequence_metadata.title += '; ' + field_content\n else:\n self._ns.sequence_metadata.title = field_content\n elif field_name == 'U':\n pass\n elif field_name == 'V':\n raise MultiVoiceError(\n 'Multi-voice files are not currently supported.')\n elif field_name == 'W':\n pass\n elif field_name == 'w':\n pass\n elif field_name == 'X':\n # Reference number\n # http://abcnotation.com/wiki/abc:standard:v2.1#xreference_number\n self._ns.reference_number = int(field_content)\n elif field_name == 'Z':\n pass\n else:\n tf.logging.warning(\n 'Unknown field name {} with content {}'.format(\n field_name, field_content))\n", "id": "6304362", "language": "Python", "matching_score": 2.066175937652588, "max_stars_count": 1, "path": "magenta/music/abc_parser.py" }, { "content": "import threading\nimport time\nfrom typing import List\n\nimport tensorflow as tf\nfrom magenta.music import DEFAULT_QUARTERS_PER_MINUTE\nfrom magenta.music import DEFAULT_STEPS_PER_BAR\nfrom magenta.music import DEFAULT_STEPS_PER_QUARTER\n\n\nclass Timing:\n\n def __init__(self,\n qpm: float = DEFAULT_QUARTERS_PER_MINUTE,\n steps_per_quarter: int = DEFAULT_STEPS_PER_QUARTER,\n steps_per_bar: int = DEFAULT_STEPS_PER_BAR):\n self.qpm = qpm\n self.steps_per_quarter = steps_per_quarter\n self.steps_per_bar = steps_per_bar\n\n def get_seconds_per_step(self) -> float:\n return 60.0 / self.qpm / self.steps_per_quarter\n\n def get_seconds_per_bar(self) -> float:\n return self.steps_per_bar * self.get_seconds_per_step()\n\n def get_expected_start_time(self,\n bar_count: int) -> float:\n return bar_count * self.get_seconds_per_bar()\n\n def get_relative_wall_time(self,\n wall_start_time: float) -> float:\n return time.time() - wall_start_time\n\n def get_diff_time(self,\n wall_start_time: float,\n bar_count: int) -> float:\n expected_start_time = self.get_expected_start_time(bar_count)\n relative_wall_time = self.get_relative_wall_time(wall_start_time)\n return expected_start_time - relative_wall_time\n\n def get_sleep_time(self,\n wall_start_time: float) -> float:\n seconds_per_bar = self.get_seconds_per_bar()\n relative_wall_time = self.get_relative_wall_time(wall_start_time)\n return seconds_per_bar - (relative_wall_time % seconds_per_bar)\n\n def get_timing_args(self,\n wall_start_time: float,\n bar_count: int) -> List[tuple]:\n return [\n (\"qpm\", self.qpm),\n (\"wall_start_time\", wall_start_time),\n (\"bar_count\", bar_count),\n (\"seconds_per_step\", self.get_seconds_per_step()),\n (\"seconds_per_bar\", self.get_seconds_per_bar()),\n (\"expected_start_time\", self.get_expected_start_time(bar_count)),\n (\"relative_wall_time\", self.get_relative_wall_time(wall_start_time)),\n (\"diff_time\", self.get_diff_time(wall_start_time, bar_count)),\n (\"sleep_time\", self.get_sleep_time(wall_start_time)),\n ]\n\n\nclass Metronome(threading.Thread):\n\n def __init__(self,\n bar_start_event,\n timing: Timing):\n super(Metronome, self).__init__()\n self._stop_signal = False\n self._bar_start_event = bar_start_event\n self._timing = timing\n\n def stop(self):\n self._stop_signal = True\n\n def run(self):\n wall_start_time = time.time()\n\n bar_count = 0\n while not self._stop_signal:\n tf.logging.debug(\"Waking up \" + str(\n self._timing.get_timing_args(wall_start_time, bar_count)))\n\n # Releases the waiting threads at the beginning of the bar\n self._bar_start_event.set()\n\n # Clears the signal so that threads block when they loop\n self._bar_start_event.clear()\n\n # Sleeps for the proper remaining time by removing the time it\n # took to execute this loop out of the expected next tick\n sleep_time = self._timing.get_sleep_time(wall_start_time)\n time.sleep(sleep_time)\n\n bar_count = bar_count + 1\n\n # Releases the waiting threads to exit\n self._bar_start_event.set()\n", "id": "8917674", "language": "Python", "matching_score": 2.6021502017974854, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/timing.py" }, { "content": "import os\nimport threading\nimport time\n\nimport magenta.music as mm\nimport pretty_midi as pm\nimport tensorflow as tf\nfrom magenta.interfaces.midi.midi_interaction import adjust_sequence_times\nfrom magenta.protobuf import music_pb2\nfrom visual_midi import Plotter, Preset\n\nimport sequences\nfrom timing import Timing\nfrom type import ActionType\nfrom ws import ActionServer\n\n\nclass SequenceLooper(threading.Thread):\n def __init__(self,\n name: str,\n bar_start_event,\n action_server: ActionServer,\n midi_hub,\n bundle_name: str,\n config_name: str,\n timing: Timing,\n midi_channel: int = 1,\n bar_per_loop: int = 2):\n super(SequenceLooper, self).__init__()\n self.name = name\n self._stop_signal = False\n self._bar_start_event = bar_start_event\n self._action_server = action_server\n self._midi_hub = midi_hub\n self._bundle_filename = bundle_name + \".mag\"\n self._config_name = config_name\n self._timing = timing\n self._midi_channel = midi_channel\n self._bar_per_loop = bar_per_loop\n model_dir = os.path.join(\"output\", \"models\")\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n self._output_plot = os.path.join(\"output\", \"models\", self.name + \".html\")\n self._output_midi = os.path.join(\"output\", \"models\", self.name + \".mid\")\n self._plotter = Plotter(plot_max_length_bar=bar_per_loop,\n live_reload=True,\n preset_name=\"PRESET_SMALL\")\n\n def stop(self):\n self._stop_signal = True\n\n def run(self):\n sequence = music_pb2.NoteSequence()\n player = self._midi_hub.start_playback(sequence, allow_updates=True)\n player._channel = self._midi_channel\n\n pretty_midi = pm.PrettyMIDI()\n pretty_midi.instruments.append(pm.Instrument(0))\n\n # Wait for the dreamer and store the time with the delta\n wall_start_time = time.time()\n self._bar_start_event.wait()\n\n bar_count = 0\n while not self._stop_signal:\n # Number of seconds we should be at the beginning of this loop\n expected_start_time = self._timing.get_expected_start_time(bar_count)\n # Number of actual seconds since we started this thread from wall clock,\n # which is smaller then the expected start time\n # The difference is between: the actual wakeup time and the expected\n # (calculated) start time. By keeping this we can adjust the sequence\n # according to the drift.\n diff_time = self._timing.get_diff_time(wall_start_time, bar_count)\n\n tf.logging.debug(\"Playing \" + str(self._timing.get_timing_args(\n wall_start_time, bar_count)))\n\n # Player\n sequence_adjusted = music_pb2.NoteSequence()\n sequence_adjusted.CopyFrom(sequence)\n sequence_adjusted = adjust_sequence_times(sequence_adjusted,\n wall_start_time - diff_time)\n player.update_sequence(sequence_adjusted, start_time=expected_start_time)\n\n # Plotter\n pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)\n self._plotter.save(pretty_midi, self._output_plot)\n pretty_midi.write(self._output_midi)\n\n # Sets timing\n seconds_per_bar = self._timing.get_seconds_per_bar()\n seconds_per_loop = self._bar_per_loop * seconds_per_bar\n loop_start_time = expected_start_time\n loop_end_time = loop_start_time + seconds_per_loop\n generation_start_time = loop_end_time\n generation_end_time = generation_start_time + seconds_per_loop\n\n action = self._action_server.context.get(self.name, None)\n\n tf.logging.debug(str(action) + \" \" + str([\n (\"expected_start_time\", expected_start_time),\n (\"loop_start_time\", loop_start_time),\n (\"loop_end_time\", loop_end_time),\n (\"generation_start_time\", generation_start_time),\n (\"generation_end_time\", generation_end_time)]))\n\n if not action:\n pass\n elif action is ActionType.LOOP:\n sequence = sequences.loop(sequence,\n loop_start_time,\n loop_end_time,\n seconds_per_loop)\n elif action is ActionType.GENERATE:\n sequence = sequences.generate(sequence,\n self.name,\n self._bundle_filename,\n self._config_name,\n generation_start_time,\n generation_end_time)\n elif action is ActionType.GENERATE_ONCE:\n sequence = sequences.generate(sequence,\n self.name,\n self._bundle_filename,\n self._config_name,\n generation_start_time,\n generation_end_time)\n self._action_server.context[self.name] = ActionType.LOOP\n elif action is ActionType.RESET_ONCE:\n sequence = sequences.reset(sequence,\n loop_start_time,\n loop_end_time,\n seconds_per_loop)\n self._action_server.context[self.name] = ActionType.LOOP\n elif action is ActionType.RESET_GENERATE:\n sequence = sequences.reset(sequence,\n loop_start_time,\n loop_end_time,\n seconds_per_loop)\n self._action_server.context[self.name] = ActionType.GENERATE_ONCE\n else:\n raise Exception(f\"Unknown action {action}\")\n\n while True:\n # Unlock at the start of the bar\n self._bar_start_event.wait()\n bar_count += 1\n if bar_count % self._bar_per_loop == 0:\n break\n", "id": "12074182", "language": "Python", "matching_score": 3.6264374256134033, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/model.py" }, { "content": "from enum import Enum, auto\n\n\nclass ActionType(Enum):\n LOOP = auto()\n GENERATE = auto()\n GENERATE_ONCE = auto()\n RESET_ONCE = auto()\n RESET_GENERATE = auto()\n STOP = auto()\n", "id": "9567196", "language": "Python", "matching_score": 0.14913995563983917, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/type.py" }, { "content": "import platform\nimport threading\nimport time\n\nimport mido\nimport tensorflow as tf\nfrom magenta.interfaces.midi import midi_hub\nfrom magenta.models.drums_rnn import drums_rnn_sequence_generator\nfrom magenta.models.melody_rnn import melody_rnn_sequence_generator\n\nfrom constants import MIDI_INPUT_PORT, MIDI_OUTPUT_PORT\nfrom model import SequenceLooper\nfrom timing import Metronome\nfrom timing import Timing\nfrom ws import ActionServer\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string(\n \"log\", \"INFO\",\n \"The threshold for what messages will be logged. DEBUG, INFO, WARN, ERROR, \"\n \"or FATAL.\")\n\n\ndef app(unused_argv):\n tf.logging.debug(\"Starting app\")\n\n # Start action server\n action_server = ActionServer()\n action_server.start()\n\n # Init midi ports, keep direct references to output_ports for\n # direct sending without the hub player\n if platform.system() == \"Windows\":\n input_ports = [port for port in midi_hub.get_available_input_ports()\n if MIDI_INPUT_PORT in port]\n output_ports = [port for port in midi_hub.get_available_output_ports()\n if MIDI_OUTPUT_PORT in port]\n if len(input_ports) is not 1 or len(output_ports) is not 1:\n raise Exception(f\"Need exactly 1 midi input ({input_ports}) \"\n f\"matching {MIDI_INPUT_PORT}\"\n f\"and 1 midi output port ({output_ports}) \"\n f\"matching {MIDI_OUTPUT_PORT},\"\n f\"you can use LoopMIDI for that\")\n else:\n input_ports = [MIDI_INPUT_PORT]\n output_ports = [MIDI_OUTPUT_PORT]\n hub = midi_hub.MidiHub(input_ports, output_ports, None)\n output_port = hub._outport.ports[0]\n\n # Panic to stop all current messages (note off everywhere)\n [output_port.send(message) for message in mido.ports.panic_messages()]\n\n # Synchronise event for all the loopers, controlled by the metronome\n bar_start_event = threading.Event()\n\n # Common stuff\n qpm = 80\n timing = Timing(qpm)\n\n loopers = []\n try:\n # Init and start the loopers, they block on the event\n drum_looper = SequenceLooper(\n \"drums\", bar_start_event, action_server, hub,\n \"drum_kit_rnn\", \"drum_kit\",\n timing, midi_channel=9, bar_per_loop=2)\n melody_looper = SequenceLooper(\n \"melody\", bar_start_event, action_server, hub,\n \"attention_rnn\", \"attention_rnn\",\n timing, midi_channel=0, bar_per_loop=4)\n\n loopers.append(drum_looper)\n loopers.append(melody_looper)\n [looper.start() for looper in loopers]\n\n tf.logging.debug(\"Loopers started \" + str([\n (\"drum_looper\", drum_looper),\n (\"melody_looper\", melody_looper)]))\n\n # Start metronome (wait to make sure everything is started)\n time.sleep(1)\n metronome = Metronome(bar_start_event, timing)\n loopers.append(metronome)\n metronome.start()\n\n tf.logging.debug(\"Metronome started \" + str([(\"metronome\", metronome)]))\n\n # Wait for the loopers\n [looper.join() for looper in loopers]\n except KeyboardInterrupt:\n print(\"SIGINT received, stopping action server, loopers and stuff\")\n action_server.stop()\n [looper.stop() for looper in loopers]\n return 1\n\n return 0\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(FLAGS.log)\n tf.app.run(app)\n", "id": "10279434", "language": "Python", "matching_score": 3.2649762630462646, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/app.py" }, { "content": "# MIDI input and output ports, on windows you'll have to\n# start them manually using LoopMIDI or another software\nMIDI_INPUT_PORT: str = \"magenta_in\"\nMIDI_OUTPUT_PORT: str = \"magenta_out\"\n\n# The server GUI for the app\nWS_SERVER_HOST: str = \"127.0.0.1\"\nWS_SERVER_PORT: int = 5000\n", "id": "9700419", "language": "Python", "matching_score": 1.875874638557434, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/constants.py" }, { "content": "import threading\nimport webbrowser\n\nfrom eventlet import wsgi\nimport eventlet\nimport socketio\nimport tensorflow as tf\n\nfrom constants import WS_SERVER_HOST, WS_SERVER_PORT\nfrom type import ActionType\n\n\nclass ActionServer(threading.Thread):\n\n def __init__(self,\n host: str = WS_SERVER_HOST,\n port: int = WS_SERVER_PORT):\n super(ActionServer, self).__init__()\n self.context = {}\n self._host = host\n self._port = port\n self._socket = socketio.Server()\n self._app = socketio.WSGIApp(self._socket, static_files={\n '/': 'app.html',\n '/static/js/jquery': 'node_modules/jquery/dist',\n '/static/js/socket.io': 'node_modules/socket.io-client/dist',\n '/output/models/drums': 'output/models/drums.html',\n '/output/models/melody': 'output/models/melody.html',\n '/output/models/bass': 'output/models/bass.html',\n })\n namespace = ServerNamespace('/', self.context, self._socket)\n self._socket.register_namespace(namespace)\n\n def run(self):\n wsgi.server(eventlet.listen((self._host, self._port)), self._app,\n log=None, log_output=False)\n webbrowser.open(f\"http://{self._host}:{self._port}\", new=2)\n\n def stop(self):\n eventlet.wsgi.is_accepting = False\n del self._socket\n del self._app\n\nclass ServerNamespace(socketio.Namespace):\n\n def __init__(self, namespace, context, socket):\n super(ServerNamespace, self).__init__(namespace)\n self.context = context\n self.socket = socket\n\n def on_connect(self, sid, environ):\n tf.logging.debug(f\"Client {sid} with {environ} connected\")\n\n def on_disconnect(self, sid):\n tf.logging.debug(f\"Client {sid} disconnected\")\n\n def on_model(self, sid, data):\n tf.logging.info(f\"On model {data} as {sid}\")\n model, value = data[\"model\"], data[\"value\"]\n try:\n value = ActionType[value.upper()]\n except KeyError:\n raise Exception(f\"Unknown action {value}\")\n self.context[model] = value\n", "id": "1521217", "language": "Python", "matching_score": 1.2074776887893677, "max_stars_count": 5, "path": "conferences/music-generation-with-magenta/code/ws.py" }, { "content": "\"\"\"Small example OSC server\n\nThis program listens to several addresses, and prints some information about\nreceived packets.\n\npython3 oscserver.py --port 57120\n\"\"\"\nimport argparse\nimport math\n\nfrom pythonosc import dispatcher\nfrom pythonosc import osc_server\n\ndef print_volume_handler(unused_addr, args, volume):\n print(\"[{0}] ~ {1}\".format(args[0], volume))\n\ndef print_compute_handler(unused_addr, args, volume):\n try:\n print(\"[{0}] ~ {1}\".format(args[0], args[1](volume)))\n except ValueError: pass\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ip\",\n default=\"127.0.0.1\", help=\"The ip to listen on\")\n parser.add_argument(\"--port\",\n type=int, default=5005, help=\"The port to listen on\")\n args = parser.parse_args()\n\n dispatcher = dispatcher.Dispatcher()\n dispatcher.map(\"*\", print)\n\n server = osc_server.ThreadingOSCUDPServer(\n (args.ip, args.port), dispatcher)\n print(\"Serving on {}\".format(server.server_address))\n server.serve_forever()\n\n", "id": "12223657", "language": "Python", "matching_score": 1.0850307941436768, "max_stars_count": 5, "path": "projects/algorave/oscserver.py" } ]
2.180781
minji-o-j
[ { "content": "anchors = [[1.3221, 1.73145], [3.19275, 4.00944], [5.05587, 8.09892], [9.47112, 4.84053], [11.2364, 10.0071]]\n\nobject_scale = 5\nnoobject_scale = 1\nclass_scale = 1\ncoord_scale = 1\n\nsaturation = 1.5\nexposure = 1.5\nhue = .1\n\njitter = 0.3\n\nthresh = .6\n\nbatch_size = 16\n\nlr = 0.0001\n\ndecay_lrs = {\n 60: 0.00001,\n 90: 0.000001\n}\n\nmomentum = 0.9\nweight_decay = 0.0005\n\n\n# multi-scale training:\n# {k: epoch, v: scale range}\nmulti_scale = True\n\n# number of steps to change input size\nscale_step = 40\n\nscale_range = (3, 4)\n\nepoch_scale = {\n 1: (3, 4),\n 15: (2, 5),\n 30: (1, 6),\n 60: (0, 7),\n 75: (0, 9)\n}\n\ninput_sizes = [(320, 320),\n (352, 352),\n (384, 384),\n (416, 416),\n (448, 448),\n (480, 480),\n (512, 512),\n (544, 544),\n (576, 576)]\n\ninput_size = (416, 416)\n\ntest_input_size = (416, 416)\n\nstrides = 32\n\ndebug = False\n\n", "id": "5780193", "language": "Python", "matching_score": 1.2409833669662476, "max_stars_count": 62, "path": "config/config.py" }, { "content": "# -*- coding: utf-8 -*- \n# --------------------------------------------------------\n# Pytorch Yolov2\n# Licensed under The MIT License [see LICENSE for details]\n# Written by <NAME>\n# --------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nfrom util.bbox import generate_all_anchors, xywh2xxyy, box_transform_inv, xxyy2xywh\nfrom util.bbox import box_ious\nimport time\n#from config import config as cfg\nimport config as cfg\n\ndef yolo_filter_boxes(boxes_pred, conf_pred, classes_pred, confidence_threshold=0.6):\n \"\"\"\n Filter boxes whose confidence is lower than a given threshold\n\n Arguments:\n boxes_pred -- tensor of shape (H * W * num_anchors, 4) (x1, y1, x2, y2) predicted boxes\n conf_pred -- tensor of shape (H * W * num_anchors, 1)\n classes_pred -- tensor of shape (H * W * num_anchors, num_classes)\n threshold -- float, threshold used to filter boxes\n\n Returns:\n filtered_boxes -- tensor of shape (num_positive, 4)\n filtered_conf -- tensor of shape (num_positive, 1)\n filtered_cls_max_conf -- tensor of shape (num_positive, num_classes)\n filtered_cls_max_id -- tensor of shape (num_positive, num_classes)\n \"\"\"\n\n # multiply class scores and objectiveness score\n # use class confidence score\n # TODO: use objectiveness (IOU) score or class confidence score\n cls_max_conf, cls_max_id = torch.max(classes_pred, dim=-1, keepdim=True)\n cls_conf = conf_pred * cls_max_conf\n\n pos_inds = (cls_conf > confidence_threshold).view(-1)\n\n filtered_boxes = boxes_pred[pos_inds, :]\n\n filtered_conf = conf_pred[pos_inds, :]\n\n filtered_cls_max_conf = cls_max_conf[pos_inds, :]\n\n filtered_cls_max_id = cls_max_id[pos_inds, :]\n\n return filtered_boxes, filtered_conf, filtered_cls_max_conf, filtered_cls_max_id.float()\n\n\ndef yolo_nms(boxes, scores, threshold):\n \"\"\"\n Apply Non-Maximum-Suppression on boxes according to their scores\n\n Arguments:\n boxes -- tensor of shape (N, 4) (x1, y1, x2, y2)\n scores -- tensor of shape (N) confidence\n threshold -- float. NMS threshold\n\n Returns:\n keep -- tensor of shape (None), index of boxes which should be retain.\n \"\"\"\n\n score_sort_index = torch.sort(scores, dim=0, descending=True)[1]\n\n keep = []\n\n while score_sort_index.numel() > 0:\n\n i = score_sort_index[0]\n keep.append(i)\n\n if score_sort_index.numel() == 1:\n break\n\n cur_box = boxes[score_sort_index[0], :].view(-1, 4)\n res_box = boxes[score_sort_index[1:], :].view(-1, 4)\n\n ious = box_ious(cur_box, res_box).view(-1)\n\n inds = torch.nonzero(ious < threshold).squeeze()\n\n score_sort_index = score_sort_index[inds + 1].view(-1)\n\n return torch.LongTensor(keep)\n\n\ndef generate_prediction_boxes(deltas_pred):\n \"\"\"\n Apply deltas prediction to pre-defined anchors\n\n Arguments:\n deltas_pred -- tensor of shape (H * W * num_anchors, 4) σ(t_x), σ(t_y), σ(t_w), σ(t_h)\n\n Returns:\n boxes_pred -- tensor of shape (H * W * num_anchors, 4) (x1, y1, x2, y2)\n \"\"\"\n\n H = int(cfg.test_input_size[0] / cfg.strides)\n W = int(cfg.test_input_size[1] / cfg.strides)\n\n anchors = torch.FloatTensor(cfg.anchors)\n all_anchors_xywh = generate_all_anchors(anchors, H, W) # shape: (H * W * num_anchors, 4), format: (x, y, w, h)\n\n all_anchors_xywh = deltas_pred.new(*all_anchors_xywh.size()).copy_(all_anchors_xywh)\n\n boxes_pred = box_transform_inv(all_anchors_xywh, deltas_pred)\n\n return boxes_pred\n\n\ndef scale_boxes(boxes, im_info):\n \"\"\"\n scale predicted boxes\n\n Arguments:\n boxes -- tensor of shape (N, 4) xxyy format\n im_info -- dictionary {width:, height:}\n\n Returns:\n scaled_boxes -- tensor of shape (N, 4) xxyy format\n\n \"\"\"\n\n h = im_info['height']\n w = im_info['width']\n\n input_h, input_w = cfg.test_input_size\n scale_h, scale_w = input_h / h, input_w / w\n\n # scale the boxes\n boxes *= cfg.strides\n\n boxes[:, 0::2] /= scale_w\n boxes[:, 1::2] /= scale_h\n\n boxes = xywh2xxyy(boxes)\n\n # clamp boxes\n boxes[:, 0::2].clamp_(0, w-1)\n boxes[:, 1::2].clamp_(0, h-1)\n\n return boxes\n\n\ndef yolo_eval(yolo_output, im_info, conf_threshold=0.6, nms_threshold=0.4):\n \"\"\"\n Evaluate the yolo output, generate the final predicted boxes\n\n Arguments:\n yolo_output -- list of tensors (deltas_pred, conf_pred, classes_pred)\n\n deltas_pred -- tensor of shape (H * W * num_anchors, 4) σ(t_x), σ(t_y), σ(t_w), σ(t_h)\n conf_pred -- tensor of shape (H * W * num_anchors, 1)\n classes_pred -- tensor of shape (H * W * num_anchors, num_classes)\n\n im_info -- dictionary {w:, h:}\n\n threshold -- float, threshold used to filter boxes\n\n\n Returns:\n detections -- tensor of shape (None, 7) (x1, y1, x2, y2, cls_conf, cls)\n \"\"\"\n\n deltas = yolo_output[0].cpu()\n conf = yolo_output[1].cpu()\n classes = yolo_output[2].cpu()\n\n num_classes = classes.size(1)\n # apply deltas to anchors\n\n boxes = generate_prediction_boxes(deltas)\n\n if cfg.debug:\n print('check box: ', boxes.view(13*13, 5, 4).permute(1, 0, 2).contiguous().view(-1,4)[0:10,:])\n print('check conf: ', conf.view(13*13, 5).permute(1,0).contiguous().view(-1)[:10])\n\n # filter boxes on confidence score\n boxes, conf, cls_max_conf, cls_max_id = yolo_filter_boxes(boxes, conf, classes, conf_threshold)\n\n # no detection !\n if boxes.size(0) == 0:\n return []\n\n # scale boxes\n boxes = scale_boxes(boxes, im_info)\n\n if cfg.debug:\n all_boxes = torch.cat([boxes, conf, cls_max_conf, cls_max_id], dim=1)\n print('check all boxes: ', all_boxes)\n print('check all boxes len: ', len(all_boxes))\n #\n # apply nms\n # keep = yolo_nms(boxes, conf.view(-1), nms_threshold)\n # boxes_keep = boxes[keep, :]\n # conf_keep = conf[keep, :]\n # cls_max_conf = cls_max_conf[keep, :]\n # cls_max_id = cls_max_id.view(-1, 1)[keep, :]\n #\n # if cfg.debug:\n # print('check nms all boxes len: ', len(boxes_keep))\n #\n # seq = [boxes_keep, conf_keep, cls_max_conf, cls_max_id.float()]\n #\n # return torch.cat(seq, dim=1)\n\n detections = []\n\n cls_max_id = cls_max_id.view(-1)\n\n # apply NMS classwise\n for cls in range(num_classes):\n cls_mask = cls_max_id == cls\n inds = torch.nonzero(cls_mask).squeeze()\n\n if inds.numel() == 0:\n continue\n\n boxes_pred_class = boxes[inds, :].view(-1, 4)\n conf_pred_class = conf[inds, :].view(-1, 1)\n cls_max_conf_class = cls_max_conf[inds].view(-1, 1)\n classes_class = cls_max_id[inds].view(-1, 1)\n\n nms_keep = yolo_nms(boxes_pred_class, conf_pred_class.view(-1), nms_threshold)\n\n boxes_pred_class_keep = boxes_pred_class[nms_keep, :]\n conf_pred_class_keep = conf_pred_class[nms_keep, :]\n cls_max_conf_class_keep = cls_max_conf_class.view(-1, 1)[nms_keep, :]\n classes_class_keep = classes_class.view(-1, 1)[nms_keep, :]\n\n seq = [boxes_pred_class_keep, conf_pred_class_keep, cls_max_conf_class_keep, classes_class_keep.float()]\n\n detections_cls = torch.cat(seq, dim=-1)\n detections.append(detections_cls)\n\n return torch.cat(detections, dim=0)\n\n\n\n\n\n\n\n\n\n\n", "id": "11079997", "language": "Python", "matching_score": 1.7623578310012817, "max_stars_count": 1, "path": "yolo_eval.py" }, { "content": "# --------------------------------------------------------\n# Pytorch Yolov2\n# Licensed under The MIT License [see LICENSE for details]\n# Written by <NAME>\n# --------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch.nn as nn\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch\nimport torch.nn.functional as F\nfrom util.network import WeightLoader\n\n\ndef conv_bn_leaky(in_channels, out_channels, kernel_size, return_module=False):\n padding = int((kernel_size - 1) / 2)\n layers = [nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size,\n stride=1, padding=padding, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.LeakyReLU(0.1, inplace=True)]\n if return_module:\n return nn.Sequential(*layers)\n else:\n return layers\n\n\nclass GlobalAvgPool2d(nn.Module):\n def __init__(self):\n super(GlobalAvgPool2d, self).__init__()\n\n def forward(self, x):\n N = x.data.size(0)\n C = x.data.size(1)\n H = x.data.size(2)\n W = x.data.size(3)\n x = F.avg_pool2d(x, (H, W))\n x = x.view(N, C)\n return x\n\n\nclass Darknet19(nn.Module):\n\n cfg = {\n 'layer0': [32],\n 'layer1': ['M', 64],\n 'layer2': ['M', 128, 64, 128],\n 'layer3': ['M', 256, 128, 256],\n 'layer4': ['M', 512, 256, 512, 256, 512],\n 'layer5': ['M', 1024, 512, 1024, 512, 1024]\n }\n\n def __init__(self, num_classes=1000):\n super(Darknet19, self).__init__()\n self.in_channels = 3\n\n self.layer0 = self._make_layers(self.cfg['layer0'])\n self.layer1 = self._make_layers(self.cfg['layer1'])\n self.layer2 = self._make_layers(self.cfg['layer2'])\n self.layer3 = self._make_layers(self.cfg['layer3'])\n self.layer4 = self._make_layers(self.cfg['layer4'])\n self.layer5 = self._make_layers(self.cfg['layer5'])\n\n self.conv = nn.Conv2d(self.in_channels, num_classes, kernel_size=1, stride=1)\n self.avgpool = GlobalAvgPool2d()\n self.softmax = nn.Softmax(dim=1)\n\n def forward(self, x):\n x = self.layer0(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.layer5(x)\n\n x = self.conv(x)\n x = self.avgpool(x)\n x = self.softmax(x)\n\n return x\n\n def _make_layers(self, layer_cfg):\n layers = []\n\n # set the kernel size of the first conv block = 3\n kernel_size = 3\n for v in layer_cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n layers += conv_bn_leaky(self.in_channels, v, kernel_size)\n kernel_size = 1 if kernel_size == 3 else 3\n self.in_channels = v\n return nn.Sequential(*layers)\n\n # very ugly code !! need to reconstruct\n def load_weights(self, weights_file):\n weights_loader = WeightLoader()\n weights_loader.load(self, weights_file)\n\nif __name__ == '__main__':\n im = np.random.randn(1, 3, 224, 224)\n im_variable = Variable(torch.from_numpy(im)).float()\n model = Darknet19()\n out = model(im_variable)\n print(out.size())\n print(model)\n", "id": "498864", "language": "Python", "matching_score": 1.011307716369629, "max_stars_count": 1, "path": "darknet.py" }, { "content": "\"\"\"Transform a roidb into a trainable roidb by adding a bunch of metadata.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport cv2\nimport PIL\nfrom PIL import Image\nimport numpy as np\nfrom torch.utils.data import Dataset\n#from config import config as cfg\nfrom util.augmentation import augment_img\nimport config as cfg\n\nclass RoiDataset(Dataset):\n def __init__(self, imdb, train=True):\n super(RoiDataset, self).__init__()\n self._imdb = imdb\n self._roidb = imdb.roidb\n self.train = train\n self._image_paths = [self._imdb.image_path_at(i) for i in range(len(self._roidb))]\n\n def roi_at(self, i):\n image_path = self._image_paths[i]\n im_data = Image.open(image_path)\n boxes = self._roidb[i]['boxes']\n gt_classes = self._roidb[i]['gt_classes']\n\n return im_data, boxes, gt_classes\n\n def __getitem__(self, i):\n im_data, boxes, gt_classes = self.roi_at(i)\n # w, h\n im_info = torch.FloatTensor([im_data.size[0], im_data.size[1]])\n\n if self.train:\n im_data, boxes, gt_classes = augment_img(im_data, boxes, gt_classes)\n\n w, h = im_data.size[0], im_data.size[1]\n boxes[:, 0::2] = np.clip(boxes[:, 0::2] / w, 0.001, 0.999)\n boxes[:, 1::2] = np.clip(boxes[:, 1::2] / h, 0.001, 0.999)\n\n # resize image\n input_h, input_w = cfg.input_size\n im_data = im_data.resize((input_w, input_h))\n im_data_resize = torch.from_numpy(np.array(im_data)).float() / 255\n im_data_resize = im_data_resize.permute(2, 0, 1)\n boxes = torch.from_numpy(boxes)\n gt_classes = torch.from_numpy(gt_classes)\n num_obj = torch.Tensor([boxes.size(0)]).long()\n return im_data_resize, boxes, gt_classes, num_obj\n\n else:\n input_h, input_w = cfg.test_input_size\n im_data = im_data.resize((input_w, input_h))\n im_data_resize = torch.from_numpy(np.array(im_data)).float() / 255\n im_data_resize = im_data_resize.permute(2, 0, 1)\n return im_data_resize, im_info\n\n def __len__(self):\n return len(self._roidb)\n\n def __add__(self, other):\n self._roidb = self._roidb + other._roidb\n self._image_paths = self._image_paths + other._image_paths\n return self\n\n\ndef detection_collate(batch):\n \"\"\"\n Collate data of different batch, it is because the boxes and gt_classes have changeable length.\n This function will pad the boxes and gt_classes with zero.\n\n Arguments:\n batch -- list of tuple (im, boxes, gt_classes)\n\n im_data -- tensor of shape (3, H, W)\n boxes -- tensor of shape (N, 4)\n gt_classes -- tensor of shape (N)\n num_obj -- tensor of shape (1)\n\n Returns:\n\n tuple\n 1) tensor of shape (batch_size, 3, H, W)\n 2) tensor of shape (batch_size, N, 4)\n 3) tensor of shape (batch_size, N)\n 4) tensor of shape (batch_size, 1)\n\n \"\"\"\n\n # kind of hack, this will break down a list of tuple into\n # individual list\n bsize = len(batch)\n im_data, boxes, gt_classes, num_obj = zip(*batch)\n max_num_obj = max([x.item() for x in num_obj])\n padded_boxes = torch.zeros((bsize, max_num_obj, 4))\n padded_classes = torch.zeros((bsize, max_num_obj,))\n\n for i in range(bsize):\n padded_boxes[i, :num_obj[i], :] = boxes[i]\n padded_classes[i, :num_obj[i]] = gt_classes[i]\n\n return torch.stack(im_data, 0), padded_boxes, padded_classes, torch.stack(num_obj, 0)\n\n\nclass TinyRoiDataset(RoiDataset):\n def __init__(self, imdb, num_roi):\n super(TinyRoiDataset, self).__init__(imdb)\n self._roidb = self._roidb[:num_roi]\n\n\n\n\n\n\n\n", "id": "6898343", "language": "Python", "matching_score": 1.864814281463623, "max_stars_count": 1, "path": "dataset/roidb.py" }, { "content": "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by <NAME>\n# --------------------------------------------------------\n# --------------------------------------------------------\n# Modified by <NAME>\n# --------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport cv2\nimport numpy as np\nimport torch\nimport PIL\nimport PIL.ImageDraw as ImageDraw\nimport PIL.ImageFont as ImageFont\n\nSTANDARD_COLORS = [\n 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',\n 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',\n 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',\n 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',\n 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',\n 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',\n 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',\n 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',\n 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',\n 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',\n 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',\n 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',\n 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',\n 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',\n 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',\n 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',\n 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',\n 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',\n 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',\n 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',\n 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',\n 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',\n 'WhiteSmoke', 'Yellow', 'YellowGreen'\n]\n\nNUM_COLORS = len(STANDARD_COLORS)\n\nfont_path = os.path.join(os.path.dirname(__file__), 'arial.ttf')\n\nFONT = ImageFont.truetype(font_path, 20)\n\n\ndef _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, font=FONT, color='black', thickness=2):\n draw = ImageDraw.Draw(image)\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n draw.line([(left, top), (left, bottom), (right, bottom),\n (right, top), (left, top)], width=thickness, fill=color)\n text_bottom = bottom\n # Reverse list and print from bottom to top.\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n\n # draw.rectangle(\n # [(left, text_bottom - text_height - 2 * margin), (left + text_width,\n # text_bottom)],fill=color)\n\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str,\n fill=color,\n font=font)\n\n return image\n\n\ndef draw_detection_boxes(image, boxes, gt_classes=None, class_names=None):\n \"\"\"\n Draw bounding boxes via PIL.Image library\n\n Arguments:\n im_data -- PIL.Image object\n boxes -- numpy array of shape (N, 5) N is number of boxes, (x1, y1, x2, y2, cls_score)\n gt_classes -- numpy array of shape (N). ground truth class index 0 ~ (N-1)\n class_names -- list of string. class names\n\n Return:\n im_data -- image data with boxes\n \"\"\"\n\n num_boxes = boxes.shape[0]\n disp_image = image\n for i in range(num_boxes):\n bbox = tuple(np.round(boxes[i, :4]).astype(np.int64))\n score = boxes[i, 4]\n gt_class_ind = gt_classes[i]\n class_name = class_names[gt_class_ind]\n disp_str = '{}: {:.2f}'.format(class_name, score)\n disp_image = _draw_single_box(disp_image,\n bbox[0],\n bbox[1],\n bbox[2],\n bbox[3],\n disp_str,\n FONT,\n color=STANDARD_COLORS[gt_class_ind % NUM_COLORS])\n return disp_image\n\n\ndef plot_boxes(im_data, boxes, gt_classes=None, class_names=None):\n \"\"\"\n Visualize the bounding boxes of objects in a image\n\n Arguments:\n im_data -- PIL.Image object or np.ndarray (read from cv2)\n boxes -- numpy array of shape (N, 4) N is number of boxes, (x1, y1, x2, y2)\n gt_classes -- numpy array of shape (N). ground truth class index 0 ~ (N-1)\n class_names -- list of string. class names\n\n Or:\n im_data -- tensor of shape (3, H, W)\n boxes -- tensor\n gt_classes -- tensor\n\n Return:\n\n im_data -- image data with boxes\n \"\"\"\n if isinstance(im_data, torch.Tensor):\n im_data = im_data.permute(1, 2, 0).numpy() * 255\n im_data = im_data.astype(np.uint8)\n boxes = boxes.numpy()\n gt_classes = gt_classes.numpy() if gt_classes is not None else None\n elif isinstance(im_data, PIL.JpegImagePlugin.JpegImageFile):\n im_data = np.copy(np.array(im_data))\n elif isinstance(im_data, np.ndarray):\n im_data = np.copy(np.array(im_data))\n else:\n raise NotImplementedError\n num_boxes = boxes.shape[0]\n for i in range(num_boxes):\n bbox = tuple(np.round(boxes[i, :]).astype(np.int64))\n cv2.rectangle(im_data, bbox[0:2], bbox[2:4], (0, 205, 0), 2)\n if gt_classes is not None:\n class_name = class_names[gt_classes[i]]\n cv2.putText(im_data, '%s' % class_name, (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_PLAIN,\n 2.0, (0, 0, 255), thickness=1)\n return im_data\n", "id": "2223732", "language": "Python", "matching_score": 1.1403276920318604, "max_stars_count": 62, "path": "util/visualize.py" }, { "content": "import os\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nimport json\nimport os\n# npm install vega-lite vega-cli canvas\n\n\nclass BarGraph:\n\n def __init__(self, yearly_data):\n self.yearly_data = yearly_data\n\n def build_graph(self):\n \n with open(os.path.join(os.path.dirname(__file__), 'colors.json')) as f:\n colors = json.load(f)\n allColorsValues = []\n\n # filter data\n max_languages = 5\n top_languages = {}\n for year in self.yearly_data.keys():\n for quarter in self.yearly_data[year].keys():\n for language in sorted(list(self.yearly_data[year][quarter].keys()),\n key=lambda lang: self.yearly_data[year][quarter][lang], reverse=True)[\n 0:max_languages]:\n if 'top' not in self.yearly_data[year][quarter]:\n self.yearly_data[year][quarter]['top'] = {}\n if self.yearly_data[year][quarter][language] != 0:\n self.yearly_data[year][quarter]['top'][language] = self.yearly_data[year][quarter][language]\n\n if language not in top_languages:\n top_languages[language] = 1\n top_languages[language] += 1\n\n # print(self.yearly_data)\n\n all_languages = list(top_languages.keys())\n\n for language in all_languages:\n if colors[language]['color'] is not None:\n allColorsValues.append(colors[language]['color'])\n\n languages_all_loc = {}\n\n for language in all_languages:\n language_year = []\n for year in self.yearly_data.keys():\n language_quarter = [0, 0, 0, 0]\n for quarter in self.yearly_data[year].keys():\n if language in self.yearly_data[year][quarter]['top']:\n language_quarter[quarter - 1] = self.yearly_data[year][quarter]['top'][language]\n else:\n language_quarter[quarter - 1] = 0\n language_year.append(language_quarter)\n languages_all_loc[language] = language_year\n\n # print(languages_all_loc)\n\n language_df = {}\n\n def prep_df(df, name):\n df = df.stack().reset_index()\n df.columns = ['c1', 'c2', 'values']\n df['Language'] = name\n return df\n\n for language in languages_all_loc.keys():\n language_df[language] = pd.DataFrame(languages_all_loc[language], index=list(self.yearly_data.keys()),\n columns=[\"Q1\", \"Q2\", \"Q3\", \"Q4\"])\n\n for language in language_df.keys():\n language_df[language] = prep_df(language_df[language], language)\n\n df = pd.concat(list(language_df.values()))\n\n\n chart = alt.Chart(df).mark_bar().encode(\n\n # tell Altair which field to group columns on\n x=alt.X('c2:N', title=None),\n\n # tell Altair which field to use as Y values and how to calculate\n y=alt.Y('sum(values):Q',\n axis=alt.Axis(\n grid=False,\n title='LOC added')),\n\n # tell Altair which field to use to use as the set of columns to be represented in each group\n column=alt.Column('c1:N', title=None),\n\n # tell Altair which field to use for color segmentation\n color=alt.Color('Language:N',\n scale=alt.Scale(\n domain=all_languages,\n # make it look pretty with an enjoyable color pallet\n range=allColorsValues,\n ),\n )) \\\n .configure_view(\n # remove grid lines around column clusters\n strokeOpacity=0\n )\n chart.save('bar_graph.png')\n return 'bar_graph.png'\n", "id": "91338", "language": "Python", "matching_score": 0.03760866820812225, "max_stars_count": 3, "path": "make_bar_graph.py" }, { "content": "import os\nimport torch\nfrom gluonnlp.data import SentencepieceTokenizer\nfrom kogpt2.model.sample import sample_sequence\nfrom kogpt2.utils import get_tokenizer\nfrom kogpt2.utils import download, tokenizer\nfrom kogpt2.model.torch_gpt2 import GPT2Config, GPT2LMHeadModel\nimport gluonnlp\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--temperature', type=float, default=0.7,\n help=\"temperature 를 통해서 글의 창의성을 조절합니다.\")\nparser.add_argument('--top_p', type=float, default=0.9,\n help=\"top_p 를 통해서 글의 표현 범위를 조절합니다.\")\nparser.add_argument('--top_k', type=int, default=40,\n help=\"top_k 를 통해서 글의 표현 범위를 조절합니다.\")\nparser.add_argument('--text_size', type=int, default=250,#20,\n help=\"결과물의 길이를 조정합니다.\")\nparser.add_argument('--loops', type=int, default=-1,\n help=\"글을 몇 번 반복할지 지정합니다. -1은 무한반복입니다.\")\nparser.add_argument('--tmp_sent', type=str, default=\"사랑\",\n help=\"글의 시작 문장입니다.\")\nparser.add_argument('--load_path', type=str, default=\"./checkpoint/KoGPT2_checkpoint_37000.tar\",\n help=\"학습된 결과물을 저장하는 경로입니다.\")\n\nargs = parser.parse_args()\n'''\npytorch_kogpt2 = {\n 'url':\n 'checkpoint/pytorch_kogpt2_676e9bcfa7.params',\n 'fname': 'pytorch_kogpt2_676e9bcfa7.params',\n 'chksum': '676e9bcfa7'\n}\n'''\npytorch_kogpt2 = {\n 'url':\n 'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',\n 'fname': 'pytorch_kogpt2_676e9bcfa7.params',\n 'chksum': '676e9bcfa7'\n}\n\nkogpt2_config = {\n \"initializer_range\": 0.02,\n \"layer_norm_epsilon\": 1e-05,\n \"n_ctx\": 1024,\n \"n_embd\": 768,\n \"n_head\": 12,\n \"n_layer\": 12,\n \"n_positions\": 1024,\n \"vocab_size\": 50000\n}\n\ndef auto_enter(text):\n text = (text.replace(\" \", \"\\n\"))\n text = text.split(\"\\n\")\n\n text = [t.lstrip() for t in text if t != '']\n return \"\\n\\n\".join(text)\n\ndef main(temperature = 0.7, top_p = 0.8, top_k = 40, tmp_sent = \"\", text_size = 100, loops = 0, load_path = \"\"):\n ctx = 'cuda'\n cachedir = '~/kogpt2/'\n save_path = './checkpoint/'\n # download model\n model_info = pytorch_kogpt2\n model_path = download(model_info['url'],\n model_info['fname'],\n model_info['chksum'],\n cachedir=cachedir)\n # download vocab\n vocab_info = tokenizer\n vocab_path = download(vocab_info['url'],\n vocab_info['fname'],\n vocab_info['chksum'],\n cachedir=cachedir)\n # Device 설정\n device = torch.device(ctx)\n # 저장한 Checkpoint 불러오기\n checkpoint = torch.load(load_path, map_location=device)\n\n # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언\n kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))\n kogpt2model.load_state_dict(checkpoint['model_state_dict'])\n\n kogpt2model.eval()\n vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(vocab_path,\n mask_token=None,\n sep_token=None,\n cls_token=None,\n unknown_token='<unk>',\n padding_token='<pad>',\n bos_token='<s>',\n eos_token='</s>')\n\n tok_path = get_tokenizer()\n model, vocab = kogpt2model, vocab_b_obj\n tok = SentencepieceTokenizer(tok_path)\n\n if loops:\n num = 1\n else:\n num = 0\n\n try:\n load_path.split(\"/\")[-2]\n except:\n pass\n else:\n load_path = load_path.split(\"/\")[-2]\n\n print(\"ok : \",load_path)\n\n if not(os.path.isdir(\"samples/\"+ load_path)):\n os.makedirs(os.path.join(\"samples/\"+ load_path))\n\n while 1:\n sent =''\n if tmp_sent == \"\":\n tmp_sent = input('input : ')\n sent = sent+tmp_sent\n\n toked = tok(sent)\n\n if len(toked) > 1022:\n break\n\n sent = sample_sequence(model, tok, vocab, sent, text_size, temperature, top_p, top_k)\n sent = sent.replace(\"//\", \"\\n\") # 비효율적이지만 엔터를 위해서 등장\n sent = sent.replace(\"</s>\", \"\") \n sent = auto_enter(sent)\n try:\n int(sent[len(sent)-1]) #마지막글자를 숫자로 바꾸는게 오류가 나지 않는다?? :숫자 // ex) : 신나는 음악모음 2\n sent=sent[:len(sent)-1] #숫자제거\n print('del num')#동작 확인용\n except:\n pass\n \n if(sent[len(sent)-1]==' '): #마지막글자 공백일경우\n sent=sent[:len(sent)-1] #공백제거\n \n print(sent)\n\n now = [int(n) for n in os.listdir(\"./samples/\" + load_path)]\n \n try:\n now = max(now)\n except:\n now = 1\n\n f = open(\"samples/\"+ load_path + \"/\" + str(now + 1), 'w', encoding=\"utf-8\")\n \n head = [load_path, tmp_sent, text_size, temperature, top_p, top_k]\n head = [str(h) for h in head]\n f.write(\",\".join(head))\n f.write(\",\")\n f.write(sent)\n f.close()\n\n #tmp_sent = \"\"\n\n if num != 0:\n num += 1\n if num >= loops:\n print(\"good\")\n return\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main(temperature=args.temperature, top_p=args.top_p, top_k=args.top_k, tmp_sent=args.tmp_sent, text_size=args.text_size, loops=args.loops+1, load_path=args.load_path)", "id": "6339080", "language": "Python", "matching_score": 6.07787561416626, "max_stars_count": 0, "path": "generator.py" }, { "content": "import torch\nfrom torch.utils.data import DataLoader # 데이터로더\nfrom gluonnlp.data import SentencepieceTokenizer \nfrom kogpt2.utils import get_tokenizer\nfrom kogpt2.utils import download, tokenizer\nfrom kogpt2.model.torch_gpt2 import GPT2Config, GPT2LMHeadModel\nfrom kogpt2.data import Read_Dataseimport torch\nfrom torch.utils.data import DataLoader # 데이터로더\nfrom gluonnlp.data import SentencepieceTokenizer \nfrom kogpt2.utils import get_tokenizer\nfrom kogpt2.utils import download, tokenizer\nfrom kogpt2.model.torch_gpt2 import GPT2Config, GPT2LMHeadModel\nfrom kogpt2.data import Read_Dataset\nimport gluonnlp\nfrom kogpt2.model.sample import sample_sequence\nfrom tqdm import tqdm\nimport subprocess\nimport os\nfrom tensorboardX import SummaryWriter\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epoch', type=int, default=200,\n help=\"epoch 를 통해서 학습 범위를 조절합니다.\")\nparser.add_argument('--save_path', type=str, default='./checkpoint/',\n help=\"학습 결과를 저장하는 경로입니다.\")\nparser.add_argument('--load_path', type=str, default='./checkpoint/KoGPT2_checkpoint_100000.tar', #\n help=\"학습된 결과를 불러오는 경로입니다.\")\nparser.add_argument('--samples', type=str, default=\"samples/\",\n help=\"생성 결과를 저장할 경로입니다.\")\nparser.add_argument('--data_file_path', type=str, default='dataset/lyrics_dataset.txt',\n help=\"학습할 데이터를 불러오는 경로입니다.\")\nparser.add_argument('--batch_size', type=int, default=8,\n help=\"batch_size 를 지정합니다.\")\nargs = parser.parse_args()\n\n'''\npytorch_kogpt2 = {\n 'url':\n 'checkpoint/pytorch_kogpt2_676e9bcfa7.params',\n 'fname': 'pytorch_kogpt2_676e9bcfa7.params',\n 'chksum': '676e9bcfa7'\n}\n'''\npytorch_kogpt2 = {\n 'url':\n 'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',\n 'fname': 'pytorch_kogpt2_676e9bcfa7.params',\n 'chksum': '676e9bcfa7'\n}\n\n\nkogpt2_config = {\n \"initializer_range\": 0.02,\n \"layer_norm_epsilon\": 1e-05,\n \"n_ctx\": 1024,\n \"n_embd\": 768,\n \"n_head\": 12,\n \"n_layer\": 12,\n \"n_positions\": 1024,\n \"vocab_size\": 50000\n}\n\ndef auto_enter(text):\n text = (text.replace(\" \", \"\\n\"))\n text = text.split(\"\\n\")\n\n text = [t.lstrip() for t in text if t != '']\n return \"\\n\\n\".join(text)\n\ndef get_gpu_memory_map():\n \"\"\"Get the current gpu usage.\n\n Returns\n -------\n usage: dict\n Keys are device ids as integers.\n Values are memory usage as integers in MB.\n \"\"\"\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map\n\ndef main(epoch, save_path, load_path, samples, data_file_path, batch_size):\n ctx = 'cuda'\n cachedir = '~/kogpt2/'\n\n summary = SummaryWriter()\n\n # download model\n model_info = pytorch_kogpt2\n model_path = download(model_info['url'],\n model_info['fname'],\n model_info['chksum'],\n cachedir=cachedir)\n # download vocab\n vocab_info = tokenizer\n vocab_path = download(vocab_info['url'],\n vocab_info['fname'],\n vocab_info['chksum'],\n cachedir=cachedir)\n\n # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언\n kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))\n\n # model_path 로부터 다운로드 받은 내용을 load_state_dict 으로 업로드\n kogpt2model.load_state_dict(torch.load(model_path))\n\n device = torch.device(ctx)\n kogpt2model.to(device)\n\n # 불러오기 부분\n try:\n checkpoint = torch.load(load_path, map_location=device)\n\n # KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언\n kogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))\n kogpt2model.load_state_dict(checkpoint['model_state_dict'])\n\n kogpt2model.eval()\n except:\n count = 0\n else:\n count = int(re.findall(\"\\d+\", load_path)[1])\n\n print(count)\n # 추가로 학습하기 위해 .train() 사용\n kogpt2model.train()\n vocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(vocab_path,\n mask_token=None,\n sep_token=None,\n cls_token=None,\n unknown_token='<unk>',\n padding_token='<pad>',\n bos_token='<s>',\n eos_token='</s>')\n\n\n tok_path = get_tokenizer()\n model, vocab = kogpt2model, vocab_b_obj\n tok = SentencepieceTokenizer(tok_path)\n\n dataset = Read_Dataset(data_file_path, vocab, tok)\n print(\"Read_Dataset ok\")\n data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)\n\n\n\n learning_rate = 3e-5\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n print('KoGPT-2 Transfer Learning Start')\n avg_loss = (0.0, 0.0)\n\n for epoch in range(epoch):\n for data in data_loader:\n optimizer.zero_grad()\n #print(data) \n data = torch.stack(data) # list of Tensor로 구성되어 있기 때문에 list를 stack을 통해 변환해준다.\n data = data.transpose(1,0)\n data = data.to(ctx)\n model = model.to(ctx)\n\n outputs = model(data, labels=data)\n loss, logits = outputs[:2]\n loss = loss.to(ctx)\n loss.backward()\n avg_loss = (avg_loss[0] * 0.99 + loss, avg_loss[1] * 0.99 + 1.0)\n optimizer.step()\n if count % 10 == 0:\n print('epoch no.{0} train no.{1} loss = {2:.5f} avg_loss = {3:.5f}' . format(epoch, count, loss, avg_loss[0] / avg_loss[1]))\n summary.add_scalar('loss/avg_loss', avg_loss[0] / avg_loss[1], count)\n summary.add_scalar('loss/loss', loss, count)\n\n # generator 진행\n if (count > 0 and count % 1000 == 0) or (len(data) < batch_size):\n sent = sample_sequence(model.to(\"cpu\"), tok,vocab, sent=\"추억의\", text_size=100, temperature=0.7, top_p=0.8, top_k=40)\n sent = sent.replace(\"<unused0>\", \"\\n\") # 비효율적이지만 엔터를 위해서 등장\n sent = auto_enter(sent)\n print(sent)\n\n summary.add_text('Text', sent, count)\n\n if count > 20000:\n now = []\n for n in os.listdir(samples): #ipynb.checkpoint 땜에ㅠ\n try:\n #print(int(n))\n now.append(int(n))\n except:\n continue\n if(len(now)==0):\n now=0\n else:\n now = max(now)\n f = open(samples + str(now + 1), 'w', encoding=\"utf-8\")\n f.write(sent)\n f.close()\n #########################################\n count += 1\n\n if (count > 0 and count % 20000 == 0 and count>400000) or (len(data) < batch_size):\n # 모델 저장\n try:\n torch.save({\n 'epoch': epoch,\n 'train_no': count,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss\n }, save_path + 'KoGPT2_checkpoint_' + str(count) + '.tar')\n except:\n pass\n\nif __name__ == \"__main__\":\n main(args.epoch, args.save_path, args.load_path, args.samples, args.data_file_path, args.batch_size)t\nimport gluonnlp\nfrom kogpt2.model.sample import sample_sequence\nfrom tqdm import tqdm\nimport subprocess\nimport os\nfrom tensorboardX import SummaryWriter\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epoch', type=int, default=200,\n\t\t\t\t\thelp=\"epoch 를 통해서 학습 범위를 조절합니다.\")\nparser.add_argument('--save_path', type=str, default='./checkpoint/',\n\t\t\t\t\thelp=\"학습 결과를 저장하는 경로입니다.\")\nparser.add_argument('--load_path', type=str, default='./checkpoint/KoGPT2_checkpoint_100000.tar', #\n\t\t\t\t\thelp=\"학습된 결과를 불러오는 경로입니다.\")\nparser.add_argument('--samples', type=str, default=\"samples/\",\n\t\t\t\t\thelp=\"생성 결과를 저장할 경로입니다.\")\nparser.add_argument('--data_file_path', type=str, default='dataset/lyrics_dataset.txt',\n\t\t\t\t\thelp=\"학습할 데이터를 불러오는 경로입니다.\")\nparser.add_argument('--batch_size', type=int, default=8,\n\t\t\t\t\thelp=\"batch_size 를 지정합니다.\")\nargs = parser.parse_args()\n\n'''\npytorch_kogpt2 = {\n\t'url':\n\t'checkpoint/pytorch_kogpt2_676e9bcfa7.params',\n\t'fname': 'pytorch_kogpt2_676e9bcfa7.params',\n\t'chksum': '676e9bcfa7'\n}\n'''\npytorch_kogpt2 = {\n\t'url':\n\t'https://kobert.blob.core.windows.net/models/kogpt2/pytorch/pytorch_kogpt2_676e9bcfa7.params',\n\t'fname': 'pytorch_kogpt2_676e9bcfa7.params',\n\t'chksum': '676e9bcfa7'\n}\n\n\nkogpt2_config = {\n\t\"initializer_range\": 0.02,\n\t\"layer_norm_epsilon\": 1e-05,\n\t\"n_ctx\": 1024,\n\t\"n_embd\": 768,\n\t\"n_head\": 12,\n\t\"n_layer\": 12,\n\t\"n_positions\": 1024,\n\t\"vocab_size\": 50000\n}\n\ndef auto_enter(text):\n\ttext = (text.replace(\" \", \"\\n\"))\n\ttext = text.split(\"\\n\")\n\n\ttext = [t.lstrip() for t in text if t != '']\n\treturn \"\\n\\n\".join(text)\n\ndef get_gpu_memory_map():\n\t\"\"\"Get the current gpu usage.\n\n\tReturns\n\t-------\n\tusage: dict\n\t\tKeys are device ids as integers.\n\t\tValues are memory usage as integers in MB.\n\t\"\"\"\n\tresult = subprocess.check_output(\n\t\t[\n\t\t\t'nvidia-smi', '--query-gpu=memory.used',\n\t\t\t'--format=csv,nounits,noheader'\n\t\t], encoding='utf-8')\n\t# Convert lines into a dictionary\n\tgpu_memory = [int(x) for x in result.strip().split('\\n')]\n\tgpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n\treturn gpu_memory_map\n\ndef main(epoch, save_path, load_path, samples, data_file_path, batch_size):\n\tctx = 'cuda'\n\tcachedir = '~/kogpt2/'\n\n\tsummary = SummaryWriter()\n\n\t# download model\n\tmodel_info = pytorch_kogpt2\n\tmodel_path = download(model_info['url'],\n\t\t\t\t\t\t model_info['fname'],\n\t\t\t\t\t\t model_info['chksum'],\n\t\t\t\t\t\t cachedir=cachedir)\n\t# download vocab\n\tvocab_info = tokenizer\n\tvocab_path = download(vocab_info['url'],\n\t\t\t\t\t\t vocab_info['fname'],\n\t\t\t\t\t\t vocab_info['chksum'],\n\t\t\t\t\t\t cachedir=cachedir)\n\n\t# KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언\n\tkogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))\n\n\t# model_path 로부터 다운로드 받은 내용을 load_state_dict 으로 업로드\n\tkogpt2model.load_state_dict(torch.load(model_path))\n\n\tdevice = torch.device(ctx)\n\tkogpt2model.to(device)\n\n\t# 불러오기 부분\n\ttry:\n\t\tcheckpoint = torch.load(load_path, map_location=device)\n\n\t\t# KoGPT-2 언어 모델 학습을 위한 GPT2LMHeadModel 선언\n\t\tkogpt2model = GPT2LMHeadModel(config=GPT2Config.from_dict(kogpt2_config))\n\t\tkogpt2model.load_state_dict(checkpoint['model_state_dict'])\n\n\t\tkogpt2model.eval()\n\texcept:\n\t\tcount = 0\n\telse:\n\t\tcount = int(re.findall(\"\\d+\", load_path)[1])\n\n\tprint(count)\n\t# 추가로 학습하기 위해 .train() 사용\n\tkogpt2model.train()\n\tvocab_b_obj = gluonnlp.vocab.BERTVocab.from_sentencepiece(vocab_path,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t mask_token=None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t sep_token=None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t cls_token=None,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t unknown_token='<unk>',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t padding_token='<pad>',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t bos_token='<s>',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t eos_token='</s>')\n\n\n\ttok_path = get_tokenizer()\n\tmodel, vocab = kogpt2model, vocab_b_obj\n\ttok = SentencepieceTokenizer(tok_path)\n\n\tdataset = Read_Dataset(data_file_path, vocab, tok)\n\tprint(\"Read_Dataset ok\")\n\tdata_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)\n\n\n\n\tlearning_rate = 3e-5\n\tcriterion = torch.nn.CrossEntropyLoss()\n\toptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n\tprint('KoGPT-2 Transfer Learning Start')\n\tavg_loss = (0.0, 0.0)\n\n\tfor epoch in range(epoch):\n\t\tfor data in data_loader:\n\t\t\toptimizer.zero_grad()\n\t\t\t#print(data) \n\t\t\tdata = torch.stack(data) # list of Tensor로 구성되어 있기 때문에 list를 stack을 통해 변환해준다.\n\t\t\tdata = data.transpose(1,0)\n\t\t\tdata = data.to(ctx)\n\t\t\tmodel = model.to(ctx)\n\n\t\t\toutputs = model(data, labels=data)\n\t\t\tloss, logits = outputs[:2]\n\t\t\tloss = loss.to(ctx)\n\t\t\tloss.backward()\n\t\t\tavg_loss = (avg_loss[0] * 0.99 + loss, avg_loss[1] * 0.99 + 1.0)\n\t\t\toptimizer.step()\n\t\t\tif count % 10 == 0:\n\t\t\t\tprint('epoch no.{0} train no.{1} loss = {2:.5f} avg_loss = {3:.5f}' . format(epoch, count, loss, avg_loss[0] / avg_loss[1]))\n\t\t\t\tsummary.add_scalar('loss/avg_loss', avg_loss[0] / avg_loss[1], count)\n\t\t\t\tsummary.add_scalar('loss/loss', loss, count)\n\n\t\t\t# generator 진행\n\t\t\tif (count > 0 and count % 1000 == 0) or (len(data) < batch_size):\n\t\t\t\tsent = sample_sequence(model.to(\"cpu\"), tok,vocab, sent=\"추억의\", text_size=100, temperature=0.7, top_p=0.8, top_k=40)\n\t\t\t\tsent = sent.replace(\"<unused0>\", \"\\n\") # 비효율적이지만 엔터를 위해서 등장\n\t\t\t\tsent = auto_enter(sent)\n\t\t\t\tprint(sent)\n\n\t\t\t\tsummary.add_text('Text', sent, count)\n\n\t\t\t\tif count > 20000:\n\t\t\t\t\tnow = []\n\t\t\t\t\tfor n in os.listdir(samples): #ipynb.checkpoint 땜에ㅠ\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t#print(int(n))\n\t\t\t\t\t\t\tnow.append(int(n))\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\tif(len(now)==0):\n\t\t\t\t\t\tnow=0\n\t\t\t\t\telse:\n\t\t\t\t\t\tnow = max(now)\n\t\t\t\t\tf = open(samples + str(now + 1), 'w', encoding=\"utf-8\")\n\t\t\t\t\tf.write(sent)\n\t\t\t\t\tf.close()\n\t\t\t#########################################\n\t\t\tcount += 1\n\n\t\t\tif (count > 0 and count % 20000 == 0 and count>400000) or (len(data) < batch_size):\n\t\t\t\t# 모델 저장\n\t\t\t\ttry:\n\t\t\t\t\ttorch.save({\n\t\t\t\t\t\t'epoch': epoch,\n\t\t\t\t\t\t'train_no': count,\n\t\t\t\t\t\t'model_state_dict': model.state_dict(),\n\t\t\t\t\t\t'optimizer_state_dict': optimizer.state_dict(),\n\t\t\t\t\t\t'loss': loss\n\t\t\t\t\t}, save_path + 'KoGPT2_checkpoint_' + str(count) + '.tar')\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\nif __name__ == \"__main__\":\n\tmain(args.epoch, args.save_path, args.load_path, args.samples, args.data_file_path, args.batch_size)", "id": "9074150", "language": "Python", "matching_score": 2.0028955936431885, "max_stars_count": 0, "path": "main.py" }, { "content": "import os\nimport argparse\nimport time\nimport torch\nfrom torch.autograd import Variable\nfrom PIL import Image\nfrom test import prepare_im_data\nfrom yolov2 import Yolov2\nfrom yolo_eval import yolo_eval\nfrom util.visualize import draw_detection_boxes\nimport matplotlib.pyplot as plt\nfrom util.network import WeightLoader\nimport cv2\nfrom matplotlib.ticker import NullLocator\nimport numpy as np\nimport pandas as pd\n\ndef parse_args():\n\n parser = argparse.ArgumentParser('Yolo v2')\n parser.add_argument('--output_dir', dest='output_dir',\n default='output', type=str)\n parser.add_argument('--model_name', dest='model_name',\n default=False, type=str)\n parser.add_argument('--cuda', dest='use_cuda',\n default=False, type=bool)\n #parser.add_argument(\"--image_folder\", type=str, default=\"images/setframe\", help=\"path to dataset\")\n parser.add_argument(\"--video_path\", type=str, default=False, help=\"video to detect\")\n parser.add_argument(\"--export_video_frame\",default=False,type=bool) #비디오 프레임부터 꺼내야할때\n parser.add_argument(\"--save_video_name\",default=False,type=str) #비디오 이름 지정\n args = parser.parse_args()\n return args\n\n\ndef demo():\n args = parse_args()\n print('call with args: {}'.format(args))\n\n # set model\n\n classes = ('aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n model = Yolov2()\n #weight_loader = WeightLoader()\n #weight_loader.load(model, 'output/'+args.model_name+'.pth')#'yolo-voc.weights')\n \n model_path = os.path.join(args.output_dir, args.model_name+'.pth')\n print('loading model from {}'.format(model_path))\n if torch.cuda.is_available():\n checkpoint = torch.load(model_path)\n else:\n checkpoint = torch.load(model_path, map_location='cpu')\n model.load_state_dict(checkpoint['model'])\n\n \n if args.use_cuda:\n model.cuda()\n\n model.eval()\n print('model loaded')\n\n images_dir = f'images/{args.save_video_name}/setframe'\n \n ##-----save video frame\n if(args.export_video_frame==True):\n vidcap = cv2.VideoCapture(args.video_path)\n def getFrame(sec,imgarr):\n vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000)\n hasFrames,image = vidcap.read()\n if hasFrames:\n cv2.imwrite(os.path.join(images_dir, str(count) + '.png'), image) # save frame as png file\n return hasFrames\n\n sec = 0\n frameRate = 0.042 #it will capture image in each 0.042 second (**24frame정도?)\n count=1\n imgarr=[]\n success = getFrame(sec,imgarr)\n\n num=0\n while success:\n if num%100==0:\n print(num)\n num+=1\n count = count + 1\n sec = sec + frameRate\n sec = round(sec, 2)\n success = getFrame(sec,imgarr)\n\n print(\"frame: \"+str(num))\n ##-----\n \n \n images_names = os.listdir(images_dir)\n \n #숫자 이름대로 정렬\n #str to int\n for i in range(len(images_names)):\n images_names[i]=int(images_names[i].replace(\".png\",\"\")) #testlist에서 \".png\" 제거, 정수로 변환\n images_names.sort()\n \n for i in range(len(images_names)):\n images_names[i]=str(images_names[i])+\".png\"\n \n framenum=0\n \n list_dict={} #df\n coordinate_dict={}\n len_dict={}\n \n #testid=0 #\n \n for image_name in images_names:\n #test용: 일찍끝내기\n #---\n '''\n testid+=1\n if (testid>5):\n break;\n '''\n #---\n image_path = os.path.join(images_dir, image_name)\n img = Image.open(image_path)\n im_data, im_info = prepare_im_data(img)\n\n if args.use_cuda:\n im_data_variable = Variable(im_data).cuda()\n else:\n im_data_variable = Variable(im_data)\n\n tic = time.time()\n\n yolo_output = model(im_data_variable)\n yolo_output = [item[0].data for item in yolo_output]\n detections = yolo_eval(yolo_output, im_info, conf_threshold=0.6, nms_threshold=0.4)\n\n toc = time.time()\n cost_time = toc - tic\n framenum+=1\n print(framenum,': im detect, cost time {:4f}, FPS: {}'.format(\n toc-tic, int(1 / cost_time)))\n \n \n \n if (len(detections)>0):\n det_boxes = detections[:, :5].cpu().numpy()\n det_classes = detections[:, -1].long().cpu().numpy()\n else:\n det_boxes=np.array([])\n det_classes=None\n \n #---\n #df에 저장\n detect_list=[]\n coordinate_list=[]\n \n detect_list.append(cost_time)\n detect_list.append(int(1 / cost_time))\n for i in range(det_boxes.shape[0]):\n coordinate=[]\n #print(bbox[0],bbox[1],bbox[2],bbox[3])\n bbox=tuple(np.round(det_boxes[i, :4]).astype(np.int64))\n coordinate.append(bbox[0])\n coordinate.append(bbox[1])\n coordinate.append(bbox[2])\n coordinate.append(bbox[3])\n coordinate_list.append(coordinate)\n detect_list.append(classes[det_classes[i]])\n \n #print(coordinate_list)\n coordinate_dict[image_name]=str(coordinate_list) #2차원 배열 dataframe에 유지되기 위함\n #print(coordinate_dict)\n list_dict[image_name]=detect_list\n len_dict[image_name]=det_boxes.shape[0]\n \n # ---\n \n #img 저장\n im2show = draw_detection_boxes(img, det_boxes, det_classes, class_names=classes)\n plt.figure(figsize=(33,19)) #plot size\n plt.gca().xaxis.set_major_locator(NullLocator()) # delete axis\n plt.gca().yaxis.set_major_locator(NullLocator()) # delete axis\n plt.imshow(im2show)\n #plt.show()\n \n \n #save detected img\n path='/images/'\n filename = image_path.split(\"/\")[-1].split('\\\\')[-1].split(\".\")[0]\n plt.savefig(f\"images/{args.save_video_name}/detected_img/{filename}.png\", bbox_inches=\"tight\", pad_inches=0.0)\n #plt.savefig(f\"images/testimg.png\", bbox_inches=\"tight\", pad_inches=0.0)\n \n imgarr=[]\n \n #딕셔너리 df 생성\n print(\"making dataframe...\")\n res = pd.DataFrame.from_dict(list_dict, orient='index')\n countimg=list(res.columns)\n \n for i in range(0,len(countimg)):\n countimg[i]=countimg[i]-1\n \n countimg[0]='cost time'\n countimg[1]='FPS' \n res.columns=countimg\n \n res2= pd.DataFrame.from_dict(coordinate_dict, orient='index')\n # 2차원 배열 형태로 넣고 싶음 (dataframe 안나눠지게)\n \n res2=res2.rename(columns={0:'coordinate'}) #초기 설정 0으로 되어있음\n #print(res2)\n #print(coordinate_dict)\n \n res3= pd.DataFrame.from_dict(len_dict, orient='index')\n res3=res3.rename(columns={0:'len'}) #초기 설정 0으로 되어있음\n \n res['coordinate']=None\n res['coordinate']=res2['coordinate']\n \n res['len']=None\n res['len']=res3['len']\n \n #csv 생성\n print(\"making csv file...\")\n res.to_csv(f'csv/{args.model_name}_{args.save_video_name}.csv', mode='w')\n \n '''\n path_dir = 'images/detected_img/'\n file_list = os.listdir(path_dir)\n \n #숫자 이름대로 정렬\n #str to int\n for i in range(len(file_list)):\n file_list[i]=int(file_list[i].replace(\".png\",\"\")) #testlist에서 \".png\" 제거, 정수로 변환\n file_list.sort()\n \n for i in range(len(file_list)):\n file_list[i]=str(file_list[i])+\".png\"\n \n #print(file_list) #숫자 순서대로 정렬된 것 확인함\n \n for png in file_list:\n #print(png)\n #image = Image.open(path_dir + png).convert(\"RGB\")\n image=cv2.imread(path_dir + png)\n #print(image)\n pixel = np.array(image)\n #print(np.shape(pixel))\n #pixel2=np.delete(pixel, 3, axis = 2)\n print(np.shape(pixel))\n if(np.shape(pixel)!=(1443, 2562, 3)):\n #print(\"hello\")\n pixel=pixel[0:1443,0:2562,0:3]\n \n if(np.shape(pixel)!=(283, 500, 3)):\n #print(\"hello\")\n pixel=pixel[0:283,0:500,0:3]\n \n\n #print(np.shape(pixel2))\n imgarr.append(pixel)\n \n #print(np.shape(imgarr))\n\n \n\n fps = 24 #24 #frame per second\n \n pathOut = f'images/{args.model_name}_{args.save_video_name}_fixsize.mp4'\n size=(2562,1443)\n out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps, size)\n for i in range(len(imgarr)):\n # writing to a image array\n out.write(imgarr[i])\n #print(imgarr[i])\n \n out.release()\n \nif __name__ == '__main__':\n demo()\n'''\n\nif __name__ == '__main__':\n demo()", "id": "2851204", "language": "Python", "matching_score": 5.743839263916016, "max_stars_count": 1, "path": "video_saveframe.py" }, { "content": "import os\nimport argparse\nimport time\nimport numpy as np\nimport pickle\nimport torch\nfrom torch.autograd import Variable\nfrom PIL import Image\nfrom yolov2 import Yolov2\nfrom dataset.factory import get_imdb\nfrom dataset.roidb import RoiDataset\nfrom yolo_eval import yolo_eval\nfrom util.visualize import draw_detection_boxes\nimport matplotlib.pyplot as plt\nfrom util.network import WeightLoader\nfrom torch.utils.data import DataLoader\n#from config import config as cfg\nimport config as cfg\n\ndef parse_args():\n\n parser = argparse.ArgumentParser('Yolo v2')\n parser.add_argument('--dataset', dest='dataset',\n default='voc07test', type=str)\n parser.add_argument('--output_dir', dest='output_dir',\n default='output', type=str)\n parser.add_argument('--model_name', dest='model_name',\n default='yolov2_epoch_160', type=str)\n parser.add_argument('--nw', dest='num_workers',\n help='number of workers to load training data',\n default=1, type=int)\n parser.add_argument('--bs', dest='batch_size',\n default=2, type=int)\n parser.add_argument('--cuda', dest='use_cuda',\n default=False, type=bool)\n parser.add_argument('--vis', dest='vis',\n default=False, type=bool)\n\n args = parser.parse_args()\n return args\n\n\ndef prepare_im_data(img):\n \"\"\"\n Prepare image data that will be feed to network.\n\n Arguments:\n img -- PIL.Image object\n\n Returns:\n im_data -- tensor of shape (3, H, W).\n im_info -- dictionary {height, width}\n\n \"\"\"\n\n im_info = dict()\n im_info['width'], im_info['height'] = img.size\n\n # resize the image\n H, W = cfg.input_size\n im_data = img.resize((H, W))\n \n \n # to torch tensor\n im_data = torch.from_numpy(np.array(im_data)).float() / 255\n\n im_data = im_data.permute(2, 0, 1).unsqueeze(0)\n\n return im_data, im_info\n\n\ndef test():\n args = parse_args()\n args.conf_thresh = 0.005\n args.nms_thresh = 0.45\n if args.vis:\n args.conf_thresh = 0.5\n print('Called with args:')\n print(args)\n\n # prepare dataset\n\n if args.dataset == 'voc07trainval':\n args.imdbval_name = 'voc_2007_trainval'\n\n elif args.dataset == 'voc07test':\n args.imdbval_name = 'voc_2007_test'\n\n else:\n raise NotImplementedError\n\n val_imdb = get_imdb(args.imdbval_name)\n\n val_dataset = RoiDataset(val_imdb, train=False)\n val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)\n\n # load model\n model = Yolov2()\n # weight_loader = WeightLoader()\n # weight_loader.load(model, 'yolo-voc.weights')\n # print('loaded')\n\n model_path = os.path.join(args.output_dir, args.model_name+'.pth')\n print('loading model from {}'.format(model_path))\n if torch.cuda.is_available():\n checkpoint = torch.load(model_path)\n else:\n checkpoint = torch.load(model_path, map_location='cpu')\n model.load_state_dict(checkpoint['model'])\n\n if args.use_cuda:\n model.cuda()\n\n model.eval()\n print('model loaded')\n\n dataset_size = len(val_imdb.image_index)\n\n all_boxes = [[[] for _ in range(dataset_size)] for _ in range(val_imdb.num_classes)]\n\n det_file = os.path.join(args.output_dir, 'detections.pkl')\n\n img_id = -1\n with torch.no_grad():\n for batch, (im_data, im_infos) in enumerate(val_dataloader):\n if args.use_cuda:\n im_data_variable = Variable(im_data).cuda()\n else:\n im_data_variable = Variable(im_data)\n\n yolo_outputs = model(im_data_variable)\n for i in range(im_data.size(0)):\n img_id += 1\n output = [item[i].data for item in yolo_outputs]\n im_info = {'width': im_infos[i][0], 'height': im_infos[i][1]}\n detections = yolo_eval(output, im_info, conf_threshold=args.conf_thresh,\n nms_threshold=args.nms_thresh)\n print('im detect [{}/{}]'.format(img_id+1, len(val_dataset)))\n if len(detections) > 0:\n for cls in range(val_imdb.num_classes):\n inds = torch.nonzero(detections[:, -1] == cls).view(-1)\n if inds.numel() > 0:\n cls_det = torch.zeros((inds.numel(), 5))\n cls_det[:, :4] = detections[inds, :4]\n cls_det[:, 4] = detections[inds, 4] * detections[inds, 5]\n all_boxes[cls][img_id] = cls_det.cpu().numpy()\n\n if args.vis:\n img = Image.open(val_imdb.image_path_at(img_id))\n if len(detections) == 0:\n continue\n det_boxes = detections[:, :5].cpu().numpy()\n det_classes = detections[:, -1].long().cpu().numpy()\n im2show = draw_detection_boxes(img, det_boxes, det_classes, class_names=val_imdb.classes)\n plt.figure()\n plt.imshow(im2show)\n plt.show()\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n val_imdb.evaluate_detections(all_boxes, output_dir=args.output_dir)\n\nif __name__ == '__main__':\n test()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "id": "1823090", "language": "Python", "matching_score": 6.378086090087891, "max_stars_count": 1, "path": "test.py" }, { "content": "import os\nimport argparse\nimport time\nimport torch\nfrom torch.autograd import Variable\nfrom PIL import Image\nfrom test import prepare_im_data\nfrom yolov2 import Yolov2\nfrom yolo_eval import yolo_eval\nfrom util.visualize import draw_detection_boxes\nimport matplotlib.pyplot as plt\nfrom util.network import WeightLoader\n\n\ndef parse_args():\n\n parser = argparse.ArgumentParser('Yolo v2')\n parser.add_argument('--output_dir', dest='output_dir',\n default='output', type=str)\n parser.add_argument('--model_name', dest='model_name',\n default='yolov2_epoch_160', type=str)\n parser.add_argument('--cuda', dest='use_cuda',\n default=False, type=bool)\n\n args = parser.parse_args()\n return args\n\n\ndef demo():\n args = parse_args()\n print('call with args: {}'.format(args))\n\n # input images\n images_dir = 'images'\n images_names = ['image1.jpg', 'image2.jpg']\n\n classes = ('aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\n model = Yolov2()\n weight_loader = WeightLoader()\n weight_loader.load(model, 'yolo-voc.weights')\n print('loaded')\n\n # model_path = os.path.join(args.output_dir, args.model_name + '.pth')\n # print('loading model from {}'.format(model_path))\n # if torch.cuda.is_available():\n # checkpoint = torch.load(model_path)\n # else:\n # checkpoint = torch.load(model_path, map_location='cpu')\n # model.load_state_dict(checkpoint['model'])\n\n if args.use_cuda:\n model.cuda()\n\n model.eval()\n print('model loaded')\n\n for image_name in images_names:\n image_path = os.path.join(images_dir, image_name)\n img = Image.open(image_path)\n im_data, im_info = prepare_im_data(img)\n\n if args.use_cuda:\n im_data_variable = Variable(im_data).cuda()\n else:\n im_data_variable = Variable(im_data)\n\n tic = time.time()\n\n yolo_output = model(im_data_variable)\n yolo_output = [item[0].data for item in yolo_output]\n detections = yolo_eval(yolo_output, im_info, conf_threshold=0.6, nms_threshold=0.4)\n\n toc = time.time()\n cost_time = toc - tic\n print('im detect, cost time {:4f}, FPS: {}'.format(\n toc-tic, int(1 / cost_time)))\n\n det_boxes = detections[:, :5].cpu().numpy()\n det_classes = detections[:, -1].long().cpu().numpy()\n im2show = draw_detection_boxes(img, det_boxes, det_classes, class_names=classes)\n plt.figure()\n plt.imshow(im2show)\n plt.show()\n\nif __name__ == '__main__':\n demo()\n", "id": "384950", "language": "Python", "matching_score": 0.47022107243537903, "max_stars_count": 62, "path": "demo.py" }, { "content": "import tensorflow as tf\r\ntf.enable_eager_execution()\r\n\r\n##Data\r\nx_data=[1,3,5,7,9] #y=2x+3\r\ny_data=[5,9,13,17,21]\r\n\r\nW=tf.Variable(3.7) #초기 랜덤값\r\nb=tf.Variable(7.8)\r\n\r\nlearning_rate=0.01 #Gradient 값을 얼마나 반영할것인지\r\n\r\nfor i in range(2501):\r\n \r\n ##경사하강법\r\n with tf.GradientTape() as tape:\r\n \r\n hypothesis=W*x_data+b #가설함수\r\n cost=tf.reduce_mean(tf.square(hypothesis-y_data)) #cost 구하는 공식\r\n \r\n W_grad,b_grad=tape.gradient(cost,[W,b])\r\n \r\n W.assign_sub(learning_rate*W_grad) #W와 b를 업데이트 해줌, P.assign_sub(Q)=>P-=Q를 의미\r\n b.assign_sub(learning_rate*b_grad)\r\n \r\n if(i%100==0): #i가 100의 배수일때마다 출력\r\n print(\"{:5}|{:10.5f}|{:10.4}|{:10.6f}\".format(i,W.numpy(),b.numpy(),cost))\r\n #{:5}-->5만큼 정렬, 문자열이면 왼쪽정렬 숫자면 오른쪽정렬\r\n #{:>5}--> 5 크기만큼 우측정렬, 남는공간 공백으로 채움\r\n #{:05}-->5만큼 우측정렬, 남는공간 0으로 채움(0 자리에 다른거X)\r\n #{:<5}--> 5 크기만큼 좌측정렬\r\n #{:^5}--> 5 크기만큼 중앙정렬\r\n #{:x>5}, {:x<5}-->남는공간 x로 채움\r\n #{:10.4f}--> 10 크기만큼 우측정렬, 소숫점 아래 4자리까지 출력\r\n #{:10.4}---> 10 크기만큼 우측정렬, 소숫점 아래 3자리까지 출력, 뒤에 3.010인경우 3.01, 3.000인 경우 3.0만 출력\r\n #{:010.4f}-->10 크기만큼 우측정렬, 소숫점 아래 4째자리까지 출력, 남는공간 0으로 채움, '.'포함 10자리\r\n #{1:~}{2:~}{0:~}{3:~}.format(0번째, 1번째, 2번째, 3번째) 인 경우 1번째,2번째,0번째,3번째 순으로 출력됨\r\n #ㄴ>없으면 순서대로\r\n \r\n \r\n'''\r\n출력결과\r\n 0| 2.09800| 7.534|200.009995\r\n 100| 1.58088| 5.746| 1.846184\r\n 200| 1.73936| 4.708| 0.713970\r\n 300| 1.83792| 4.062| 0.276111\r\n 400| 1.89920| 3.66| 0.106780\r\n 500| 1.93732| 3.411| 0.041295\r\n 600| 1.96102| 3.255| 0.015970\r\n 700| 1.97576| 3.159| 0.006176\r\n 800| 1.98493| 3.099| 0.002388\r\n 900| 1.99063| 3.061| 0.000924\r\n 1000| 1.99417| 3.038| 0.000357\r\n 1100| 1.99637| 3.024| 0.000138\r\n 1200| 1.99775| 3.015| 0.000053\r\n 1300| 1.99860| 3.009| 0.000021\r\n 1400| 1.99913| 3.006| 0.000008\r\n 1500| 1.99946| 3.004| 0.000003\r\n 1600| 1.99966| 3.002| 0.000001\r\n 1700| 1.99979| 3.001| 0.000000\r\n 1800| 1.99987| 3.001| 0.000000\r\n 1900| 1.99992| 3.001| 0.000000\r\n 2000| 1.99995| 3.0| 0.000000\r\n 2100| 1.99997| 3.0| 0.000000\r\n 2200| 1.99998| 3.0| 0.000000\r\n 2300| 1.99999| 3.0| 0.000000\r\n 2400| 1.99999| 3.0| 0.000000\r\n 2500| 2.00000| 3.0| 0.000000\r\n'''", "id": "6258304", "language": "Python", "matching_score": 3.0844807624816895, "max_stars_count": 0, "path": "Simple Linear Regression.py" }, { "content": "# -*- coding: utf-8 -*-\r\nimport tensorflow as tf\r\ntf.enable_eager_execution()\r\n\r\ntf.set_random_seed(0) # for reproducibility\r\n\r\nx_data = [1., 2., 3., 4.]\r\ny_data = [1., 3., 5., 7.]\r\n\r\nW = tf.Variable(tf.random_normal([1], -100., 100.))\r\n\r\nfor step in range(300):\r\n hypothesis = W * x_data\r\n cost = tf.reduce_mean(tf.square(hypothesis - y_data))\r\n \r\n alpha = 0.01\r\n gradient = tf.reduce_mean(tf.multiply(tf.multiply(W, x_data) - y_data, x_data))\r\n descent = W - tf.multiply(alpha, gradient)\r\n W.assign(descent)\r\n if step % 10 == 0:\r\n print('{:5} | {:10.4f} | {:10.6f}'.format(step, cost.numpy(), W.numpy()[0]))\r\n\r\n", "id": "10104892", "language": "Python", "matching_score": 1.412329912185669, "max_stars_count": 0, "path": "Gradient Descent.py" }, { "content": "# -*- coding: utf-8 -*-\r\nimport tensorflow as tf\r\ntf.enable_eager_execution()\r\nimport numpy as np\r\n\r\nX=np.array([1,2,3])\r\nY=np.array([1,2,3])\r\n\r\ndef cost_func(W,X,Y):\r\n hypothesis=X*W\r\n return tf.reduce_mean(tf.square(hypothesis-Y))\r\nW_values=np.linspace(-3,5,num=15)\r\ncost_values=[]\r\n\r\nfor feed_W in W_values:\r\n curr_cost = cost_func(feed_W, X, Y)\r\n cost_values.append(curr_cost)\r\n print(\"{:6.3f} | {:10.5f}\".format(feed_W, curr_cost))\r\n\r\n'''\r\n-3.000 | 74.66667\r\n-2.429 | 54.85714\r\n-1.857 | 38.09524\r\n-1.286 | 24.38095\r\n-0.714 | 13.71429\r\n-0.143 | 6.09524\r\n 0.429 | 1.52381\r\n 1.000 | 0.00000\r\n 1.571 | 1.52381\r\n 2.143 | 6.09524\r\n 2.714 | 13.71429\r\n 3.286 | 24.38095\r\n 3.857 | 38.09524\r\n 4.429 | 54.85714\r\n 5.000 | 74.66667\r\n '''", "id": "9965737", "language": "Python", "matching_score": 1.8373069763183594, "max_stars_count": 0, "path": "Cost function.py" } ]
1.799832
yaq007
[ { "content": "\"\"\"Extremely simple model where all parameters are from convolutions.\n\"\"\"\n\nimport math\nimport tensorflow as tf\nimport numpy as np\nfrom cleverhans import initializers\nfrom cleverhans.serial import NoRefModel\n\nclass Layer(object):\n def get_output_shape(self):\n return self.output_shape\n\nclass Conv2D(Layer):\n \"\"\"\n A simple model that uses only convolution and downsampling---no batch norm or other techniques\n \"\"\"\n def __init__(self, scope, nb_filters): \n self.nb_filters = nb_filters\n self.scope = scope\n \n def set_input_shape(self, input_shape):\n batch_size, rows, cols, input_channels = input_shape\n input_shape = list(input_shape)\n input_shape[0] = 1\n dummy_batch = tf.zeros(input_shape)\n dummy_output = self.fprop(dummy_batch)\n output_shape = [int(e) for e in dummy_output.get_shape()]\n output_shape[0] = batch_size\n self.output_shape = tuple(output_shape)\n \n def fprop(self, x):\n conv_args = dict(\n activation=tf.nn.leaky_relu,\n kernel_initializer=initializers.HeReLuNormalInitializer,\n kernel_size=3,\n padding='same')\n \n\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n for scale in range(3):\n x = tf.layers.conv2d(x, self.nb_filters << scale, **conv_args)\n x = tf.layers.conv2d(x, self.nb_filters << (scale + 1), **conv_args)\n x = tf.layers.average_pooling2d(x, 2, 2)\n \n # reshape the output of conv to be the input of capsule\n num_capsules = x.get_shape().as_list()[1] * x.get_shape().as_list()[2]\n input_atoms = x.get_shape().as_list()[3]\n x = tf.reshape(x, [-1, num_capsules, input_atoms])\n return x\n\nclass Capsule(Layer):\n \"\"\" Capsule layer\n input dim: batch_size, num_capsules_input, input_atoms\n output dim: batch_size, num_capsules_output, output_atoms\n \"\"\"\n\n def __init__(self, scope, num_capsules_output, output_atoms, num_routing):\n self.scope = scope\n self.num_capsules_output = num_capsules_output\n self.output_atoms = output_atoms\n self.num_routing = num_routing\n\n def set_input_shape(self, input_shape):\n batch_size, num_capsules_input, input_atoms = input_shape\n self.num_capsules_input = num_capsules_input\n self.input_atoms = input_atoms\n self.output_shape = [batch_size, self.num_capsules_output, self.output_atoms]\n self.make_vars()\n \n def make_vars(self):\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n w = tf.get_variable('DW', [self.num_capsules_input, self.input_atoms, self.num_capsules_output * self.output_atoms], initializer=tf.initializers.truncated_normal(stddev=0.03))\n b = tf.get_variable('bias', [self.num_capsules_output, self.output_atoms], initializer=tf.initializers.constant())\n return w, b\n\n def _squash(self, input_tensor):\n \"\"\"Applies norm nonlinearity (squash) to a capsule layer.\n Args:\n input_tensor: Input tensor. Shape is [batch, num_channels, num_atoms] for a\n fully connected capsule layer or\n [batch, num_channels, num_atoms, height, width] for a convolutional\n capsule layer.\n Returns:\n A tensor with same shape as input (rank 3) for output of this layer.\n \"\"\"\n \n norm = tf.norm(input_tensor, axis=2, keep_dims=True)\n norm_squared = norm * norm\n return (input_tensor / norm) * (norm_squared / (1 + norm_squared))\n\n\n def _leaky_routing(self, logits, output_dim):\n \n leak = tf.zeros_like(logits, optimize=True)\n leak = tf.reduce_sum(leak, axis=2, keep_dims=True)\n leaky_logits = tf.concat([leak, logits], axis=2)\n leaky_routing = tf.nn.softmax(leaky_logits, dim=2)\n return tf.split(leaky_routing, [1, output_dim], 2)[1]\n\n\n def _update_routing(self, votes, biases, logit_shape, num_dims, input_dim, output_dim,\n num_routing, leaky):\n votes_t_shape = [3, 0, 1, 2]\n for i in range(num_dims - 4):\n votes_t_shape += [i + 4]\n r_t_shape = [1, 2, 3, 0]\n for i in range(num_dims - 4):\n r_t_shape += [i + 4]\n votes_trans = tf.transpose(votes, votes_t_shape)\n\n def _body(i, logits, activations):\n \"\"\"Routing while loop.\"\"\"\n # route: [batch, input_dim, output_dim, ...]\n if leaky:\n route = self._leaky_routing(logits, output_dim)\n else:\n route = tf.nn.softmax(logits, dim=2)\n preactivate_unrolled = route * votes_trans\n #route.shape (16,?, 49, 32)\n #votes_trans.shape (16, ?, 49, 32)\n preact_trans = tf.transpose(preactivate_unrolled, r_t_shape)\n preactivate = tf.reduce_sum(preact_trans, axis=1) + biases\n activation = self._squash(preactivate)\n activations = activations.write(i, activation)\n # distances: [batch, input_dim, output_dim]\n act_3d = tf.expand_dims(activation, 1)\n tile_shape = np.ones(num_dims, dtype=np.int32).tolist()\n tile_shape[1] = input_dim\n act_replicated = tf.tile(act_3d, tile_shape)\n distances = tf.reduce_sum(votes * act_replicated, axis=3)\n logits += distances\n return (i + 1, logits, activations)\n\n activations = tf.TensorArray(\n dtype=tf.float32, size=num_routing, clear_after_read=False)\n logits = tf.fill(logit_shape, 0.0)\n i = tf.constant(0, dtype=tf.int32)\n _, logits, activations = tf.while_loop(\n lambda i, logits, activations: i < num_routing,\n _body,\n loop_vars=[i, logits, activations],\n swap_memory=True)\n\n return activations.read(num_routing - 1)\n\n\n def fprop(self, x):\n for i in range(1):\n \n with tf.name_scope(self.scope):\n weights, biases = self.make_vars()\n input_tiled = tf.tile(\n tf.expand_dims(x, -1),\n [1, 1, 1, self.num_capsules_output * self.output_atoms])\n votes = tf.reduce_sum(input_tiled * weights, axis=2)\n votes_reshaped = tf.reshape(votes,\n [-1, self.num_capsules_input, self.num_capsules_output, self.output_atoms])\n \n input_shape = tf.shape(x)\n logit_shape = tf.stack([input_shape[0], self.num_capsules_input, self.num_capsules_output])\n activations = self._update_routing(\n votes=votes_reshaped,\n biases=biases,\n logit_shape=logit_shape,\n num_dims=4,\n input_dim=self.num_capsules_input,\n output_dim=self.num_capsules_output,\n num_routing=self.num_routing,\n leaky=True)\n return activations\n\nclass Reconstruction(Layer):\n ''' Reconstruction Network:\n return: a concatenation of nb_classes logits and the winning-capsule recontruction\n shape: (batch_size, nb_classes + 1d image shape)\n\n '''\n def __init__(self, scope, nb_classes):\n self.scope = scope\n self.nb_classes = nb_classes\n \n \n def set_input_shape(self, input_shape):\n self.batch_size, _, self.num_atoms = input_shape\n input_shape = list(input_shape)\n input_shape[0] = 1\n dummy_batch = tf.zeros(input_shape)\n dummy_output = self.fprop(dummy_batch)\n output_shape = dummy_output.get_shape().as_list()\n output_shape[0] = self.batch_size\n self.output_shape = tuple(output_shape)\n\n def fprop(self, x, **kwargs):\n # the first num_classes capsules are used for classification\n logit = tf.norm(x[:, :self.nb_classes, :], axis=-1)\n \n # use the predicted label to construct the mask\n mask = tf.one_hot(tf.argmax(logit, axis=-1), self.nb_classes)\n bg = tf.ones_like(x[:, self.nb_classes:, 0])\n mask_bg = tf.concat([mask, bg], axis=-1)\n capsule_mask_3d = tf.expand_dims(mask_bg, -1)\n atom_mask = tf.tile(capsule_mask_3d, [1, 1, self.num_atoms])\n filtered_embedding = x * atom_mask\n filtered_embedding_2d = tf.contrib.layers.flatten(filtered_embedding)\n\n # feed the extracted class capsule + background capsules into the reconstruction network\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n net = tf.contrib.layers.fully_connected(filtered_embedding_2d, 1024)\n net = tf.contrib.layers.fully_connected(net, 32//4 * 32//4 * 256)\n net = tf.reshape(net, [-1, 32//4, 32//4, 256])\n net = tf.contrib.layers.conv2d_transpose(net, 64, [4, 4], stride=2)\n net = tf.contrib.layers.conv2d_transpose(net, 32, [4, 4], stride=2)\n net = tf.layers.conv2d(net, 3, kernel_size=4, padding='same')\n net = tf.sigmoid(net) \n reconstruction_2d = tf.layers.flatten(net)\n return tf.concat([logit, reconstruction_2d], axis=-1)\n \n# extract the class logits for classification\nclass IdentityRecons(Layer):\n def __init__(self, nb_classes):\n self.nb_classes = nb_classes\n \n def set_input_shape(self, input_shape):\n batch_size, _ = input_shape\n self.output_shape = [batch_size, self.nb_classes]\n \n def fprop(self, x):\n return x[:, :self.nb_classes]\n \nclass Network(NoRefModel):\n \"\"\"CapsNet model.\"\"\"\n\n def __init__(self, layers, input_shape, nb_classes, scope=None):\n \"\"\"\n :param layers: a list of layers in CleverHans format\n each with set_input_shape() and fprop() methods.\n :param input_shape: 4-tuple describing input shape (e.g None, 32, 32, 3)\n :param scope: string name of scope for Variables\n \"\"\"\n super(Network, self).__init__(scope, nb_classes, {}, scope is not None) \n with tf.variable_scope(self.scope):\n self.build(layers, input_shape, nb_classes)\n\n def get_vars(self):\n if hasattr(self, \"vars\"):\n return self.vars\n return super(Network, self).get_vars()\n\n def build(self, layers, input_shape, nb_classes):\n self.layer_names = []\n self.layers = layers\n self.input_shape = input_shape\n self.nb_classes = nb_classes\n layers[-2].name = 'recons' \n layers[-1].name = 'logits'\n for i, layer in enumerate(self.layers):\n if hasattr(layer, 'name'):\n name = layer.name\n else:\n name = layer.__class__.__name__ + str(i)\n layer.name = name\n self.layer_names.append(name)\n\n layer.set_input_shape(input_shape)\n input_shape = layer.get_output_shape()\n\n def make_input_placeholder(self):\n return tf.placeholder(tf.float32, (None, self.input_shape[1], self.input_shape[2], self.input_shape[3]))\n\n def make_label_placeholder(self):\n return tf.placeholder(tf.float32, (None, self.nb_classes))\n\n def fprop(self, x, set_ref=False, **kwargs):\n if self.scope is not None:\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n return self._fprop(x, set_ref, **kwargs)\n return self._prop(x, set_ref)\n\n def _fprop(self, x, set_ref=False, **kwargs):\n states = []\n for layer in self.layers:\n if set_ref:\n layer.ref = x\n else:\n x = layer.fprop(x)\n assert x is not None\n states.append(x)\n states = dict(zip(self.layer_names, states))\n return states\n\n def add_internal_summaries(self):\n pass\n\n\n# Convolutional Layers + CapsLayer + Reconstruction \ndef CapsNetRecons(scope, nb_classes, nb_filters, input_shape, num_capsules_output, output_atoms, num_routing):\n layers=[Conv2D(scope, nb_filters),\n Capsule(\"CapsLayer\", num_capsules_output, output_atoms, num_routing),\n Reconstruction(\"ReconsLayer\", nb_classes),\n IdentityRecons(nb_classes)]\n model = Network(layers, input_shape, nb_classes, scope)\n return model\n\n\n\n", "id": "5040080", "language": "Python", "matching_score": 0, "max_stars_count": 21, "path": "cleverhans/model_zoo/capsnet_deflect.py" }, { "content": "\"\"\"\nThis tutorial shows how to train a deflecting model based on CapsLayer with Tensorflow.\nThe original paper can be found at:\n\n\"\"\"\n# pylint: disable=missing-docstring\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport numpy as np\nimport tensorflow as tf\nimport sys\nsys.path.append(\"/home/yaoqin/cleverhans/\")\nfrom cleverhans.augmentation import random_shift, random_horizontal_flip\nfrom cleverhans.compat import flags\nfrom cleverhans.dataset import SVHN, CIFAR10\nfrom cleverhans.loss import MarginCycLoss\nfrom cleverhans.model_zoo.capsnet_deflect import CapsNetRecons\nfrom cleverhans.train import train\nfrom cleverhans.utils_tf import model_eval\nfrom cleverhans_tutorials import check_installation\n\nFLAGS = flags.FLAGS\nDATASET = 'SVHN'\nif DATASET == 'SVHN':\n BATCH_SIZE = 64\n IMAGE_SIZE = 64000000\n NB_EPOCHS = int(IMAGE_SIZE/50000.)\n NUM_CAPSULES_OUTPUT = 25\n OUTPUT_ATOMS = 4\n NUM_ROUTING = 1\n LEARNING_RATE = 0.0001\n NB_FILTERS = 64\n TRAIN_END = 73257\n TEST_END = 26032\nelif DATASET == 'CIFAR10':\n BATCH_SIZE = 128\n IMAGE_SIZE = 64000000\n NB_EPOCHS = int(IMAGE_SIZE/50000.)\n NUM_CAPSULES_OUTPUT = 25\n OUTPUT_ATOMS = 8\n NUM_ROUTING = 1\n LEARNING_RATE = 0.0002\n NB_FILTERS = 128\n TRAIN_END = 60000\n TEST_END = 10000\nelse:\n print(\"Only SVHN and CIFAR10 are supported!!\")\n\n\ndef train_deflecting(dataset_name=DATASET, train_start=0, train_end=TRAIN_END, test_start=0,\n test_end=TEST_END, nb_epochs=NB_EPOCHS, batch_size=BATCH_SIZE,\n num_capsules_output=NUM_CAPSULES_OUTPUT,\n output_atoms = OUTPUT_ATOMS,\n num_routing = NUM_ROUTING,\n learning_rate=LEARNING_RATE,\n nb_filters=NB_FILTERS, num_threads=None):\n \"\"\"\n SVHN cleverhans tutorial to train a deflecting model based on CapsLayer\n :dataset_name: SVHN or CIFAR10\n :param train_start: index of first training set example\n :param train_end: index of last training set example\n :param test_start: index of first test set example\n :param test_end: index of last test set example\n :param nb_epochs: number of epochs to train model\n :param batch_size: size of training batches\n :param num_capsules_output: number of output capsules\n :param output_atoms: size of each capsule vector\n :param num_routing: number of routings in capsule layer\n :param learning_rate: learning rate for training\n\n \"\"\"\n\n # Set TF random seed to improve reproducibility\n tf.set_random_seed(1234)\n\n # Create TF session\n if num_threads:\n config_args = dict(intra_op_parallelism_threads=1)\n else:\n config_args = {}\n sess = tf.Session(config=tf.ConfigProto(**config_args))\n\n # Get svhn data\n if dataset_name == \"SVHN\": \n data = SVHN(train_start=train_start, train_end=train_end,\n test_start=test_start, test_end=test_end)\n elif dataset_name == \"CIFAR10\":\n data = CIFAR10(train_start=train_start, train_end=train_end,\n test_start=test_start, test_end=test_end)\n dataset_size = data.x_train.shape[0]\n dataset_train = data.to_tensorflow()[0]\n \n if dataset_name == \"SVHN\": \n dataset_train = dataset_train.map(lambda x, y: (random_shift((x)), y), 4)\n elif dataset_name == \"CIFAR10\":\n dataset_train = dataset_train.map(\n lambda x, y: (random_shift(random_horizontal_flip(x)), y), 4)\n \n dataset_train = dataset_train.batch(batch_size)\n dataset_train = dataset_train.prefetch(16)\n x_train, y_train = data.get_set('train') \n x_test, y_test = data.get_set('test')\n \n # Use Image Parameters\n img_rows, img_cols, nchannels = x_test.shape[1:4]\n nb_classes = y_test.shape[1]\n\n # Define input TF placeholder\n x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,\n nchannels))\n y = tf.placeholder(tf.float32, shape=(None, nb_classes))\n\n\n train_params = {\n 'nb_epochs': nb_epochs,\n 'batch_size': batch_size,\n 'learning_rate': learning_rate\n }\n eval_params = {'batch_size': batch_size}\n \n rng = np.random.RandomState([2017, 8, 30])\n\n \n model = CapsNetRecons(dataset_name, nb_classes, nb_filters, input_shape=[batch_size, img_rows, img_cols, nchannels], num_capsules_output=num_capsules_output, output_atoms=output_atoms, num_routing=num_routing)\n var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=dataset_name)\n \n preds = model.get_logits(x) \n loss = MarginCycLoss(model)\n\n def evaluate():\n acc = model_eval(sess, x, y, preds, x_test, y_test, args=eval_params) \n print('Test accuracy on %s examples: %0.4f' % (\"clean\", acc))\n return acc\n \n train(sess, loss, None, None,\n dataset_train=dataset_train, dataset_size=dataset_size,\n evaluate=evaluate, args=train_params, rng=rng,\n var_list=var_lists) \n\n\ndef main(argv=None):\n \n check_installation(__file__)\n\n train_deflecting(dataset_name=FLAGS.dataset, \n train_end=FLAGS.train_end, \n test_end=FLAGS.test_end, \n nb_epochs=FLAGS.nb_epochs, \n batch_size=FLAGS.batch_size, \n num_capsules_output=FLAGS.num_capsules_output, \n output_atoms=FLAGS.output_atoms,\n num_routing=FLAGS.num_routing,\n learning_rate=FLAGS.learning_rate,\n nb_filters=FLAGS.nb_filters)\n\n\nif __name__ == '__main__': \n flags.DEFINE_integer('train_end', TRAIN_END,\n 'Number of training data')\n flags.DEFINE_integer('test_end', TEST_END,\n 'Number of test data')\n flags.DEFINE_integer('nb_filters', NB_FILTERS,\n 'Model size multiplier')\n flags.DEFINE_integer('nb_epochs', NB_EPOCHS,\n 'Number of epochs to train model')\n flags.DEFINE_integer('batch_size', BATCH_SIZE,\n 'Size of training batches')\n flags.DEFINE_integer('num_capsules_output', NUM_CAPSULES_OUTPUT,\n 'Number of class capsules and background capsules')\n flags.DEFINE_integer('output_atoms', OUTPUT_ATOMS,\n 'Size of each capsule')\n flags.DEFINE_integer('num_routing', NUM_ROUTING,\n 'Number of routing in capsule layer')\n flags.DEFINE_float('learning_rate', LEARNING_RATE,\n 'Learning rate for training')\n flags.DEFINE_string('dataset', DATASET, 'SVHN or CIFAR10')\n\n tf.app.run()\n", "id": "10453853", "language": "Python", "matching_score": 0, "max_stars_count": 21, "path": "cleverhans_tutorials/train_deflect.py" } ]
0
gonglucky
[ { "content": "import json\nres = {}\nwith open('./socfb-Caltech36.mtx') as f:\n texts = f.read()\ntexts = texts.split('\\n')\ntexts.pop(0)\n\nres['#nodesorigin'] = int(texts[0].split(' ')[0])\nres['#edgesorigin'] = int(texts[0].split(' ')[2])\nres['links'] = []\n\nsamplePercent = 0.15\nnodes = range(1, res['#nodesorigin']+1, int(1 / samplePercent))\nres['#nodes'] = len(nodes)\n\nfor t in texts[1:]:\n ts = t.split(' ')\n if len(ts) < 2:\n continue\n if int(ts[0])-1 in nodes and int(ts[1])-1 in nodes:\n res['links'].append({\"source\": int(ts[0])-1, \"target\": int(ts[1])-1})\nres['#links'] = len(res['links'])\n\n# re-mapping\nm = {}\nfor (n, i) in zip(nodes, range(0, res['#nodes'])):\n m[n] = i\nfor link in res['links']:\n link['source'] = m[link['source']]\n link['target'] = m[link['target']]\n\nwith open('./socfb-Caltech36.json', 'w') as f:\n json.dump(res, f)", "id": "11709251", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "test/data/mtx2json.py" }, { "content": "import json\nimport csv\n\nnewlist = []\n\nwith open('./suncg_occur.json') as f:\n suncg_occur = json.load(f)\n\nfor obj in suncg_occur:\n newlist.append(suncg_occur[obj])\n\n# \"modelId\": \"s__999\",\n# \"semantic\": \"unknown\",\n# \"occur\": 2\n\nwith open('./suncg_occur.csv', 'w', newline=\"\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['modelId', 'semantic', 'occur'])\n for obj in newlist:\n writer.writerow([obj['modelId'], obj['semantic'], obj['occur']])", "id": "10175949", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "test/data/suncg_occur_toCSV.py" } ]
0
anushiv25
[ { "content": "from rest_framework import serializers\nfrom .models import User\n\nclass UserSerializer(serializers.ModelSerializer):\n password = serializers.CharField(max_length = 65,min_length = 8,write_only = True) #Confidential thing\n email = serializers.EmailField(max_length = 255)\n\n class Meta:\n model = User\n fields = ['username','email','firstname','lastname',\n 'address','dob','company','password']\n\n \n def validate(self, attrs):\n if User.objects.filter(email=attrs['email']).exists():\n raise serializers.ValidationError(\n {'email':('Email is already in use')})\n return super().validate(attrs)\n\n def create(self, validated_data):\n return User.objects.create_user(**validated_data)\n\nclass LoginSerializer(serializers.ModelSerializer):\n password = serializers.CharField(\n max_length=65, min_length=8, write_only=True)\n email = serializers.EmailField(max_length=255, min_length=5)\n\n class Meta:\n model = User\n fields = ['email', 'password']", "id": "1734383", "language": "Python", "matching_score": 3.9951584339141846, "max_stars_count": 0, "path": "managerapi/authentication/serializers.py" }, { "content": "from rest_framework import serializers\nfrom rest_framework.serializers import ModelSerializer\nfrom .models import Employee\n\nclass EmployeeSerializer(ModelSerializer):\n password = serializers.CharField(max_length = 65,min_length = 8,write_only = True) #Confidential\n\n class Meta:\n model = Employee\n\n fields = ['emp_id','firstname','lastname','password','address','dob','company','mobile','city']", "id": "10848429", "language": "Python", "matching_score": 2.633572578430176, "max_stars_count": 0, "path": "managerapi/EmpApi/serializers.py" }, { "content": "# Generated by Django 2.2.16 on 2020-12-19 06:20\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('emp_id', models.CharField(max_length=15, unique=True)),\n ('email', models.EmailField(max_length=255, unique=True)),\n ('firstname', models.CharField(max_length=100)),\n ('lastname', models.CharField(max_length=100)),\n ('password', models.CharField(max_length=50)),\n ('address', models.CharField(max_length=255)),\n ('dob', models.DateField()),\n ('company', models.CharField(max_length=255)),\n ('mobile', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^\\\\d{1,10}$')])),\n ('city', models.CharField(max_length=100)),\n ],\n options={\n 'ordering': ['firstname', 'lastname'],\n },\n ),\n ]\n", "id": "4498026", "language": "Python", "matching_score": 3.543330430984497, "max_stars_count": 0, "path": "managerapi/EmpApi/migrations/0001_initial.py" }, { "content": "from django.db import models\nfrom django.conf import settings\nfrom authentication.models import User\nfrom django.core.validators import RegexValidator\n\n#Employee Table\nclass Employee(models.Model):\n emp_id = models.CharField(max_length=15, unique=True)\n email = models.EmailField(max_length=255, unique=True)\n firstname = models.CharField(max_length=100)\n lastname = models.CharField(max_length=100)\n password = models.CharField(max_length=50)\n address = models.CharField(max_length=255)\n dob = models.DateField(auto_now=False, auto_now_add=False)\n company = models.CharField(max_length=255)\n mobile = models.CharField(max_length=15, validators=[RegexValidator(r'^\\d{1,10}$')])\n city = models.CharField(max_length=100)\n\n class Meta:\n ordering = ['firstname', 'lastname'] #To retreive data first by firstname then by lastname\n\n def __str__(self):\n return emp_id", "id": "627752", "language": "Python", "matching_score": 4.4125261306762695, "max_stars_count": 0, "path": "managerapi/EmpApi/models.py" }, { "content": "#Implementing Custom User Model as email is used in place of Username.\nfrom django.db import models\n\nfrom django.contrib.auth.models import (\n AbstractBaseUser,BaseUserManager, PermissionsMixin)\n\nfrom django.db import models\n\n\nclass UserManager(BaseUserManager):\n\n def create_user(self, username, email, firstname, lastname, address, dob, company, password=None):\n\n if username is None:\n raise TypeError('Users should have a Username')\n if email is None:\n raise TypeError('Users should have a Email')\n\n user = self.model(\n username = username,\n email = self.normalize_email(email),\n firstname = firstname,\n lastname = lastname,\n address = address,\n dob = dob,\n company = company\n )\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, username, email, password=None):\n\n if password is None:\n raise TypeError('Password should not be None')\n\n user = self.create_user(username, email, password)\n user.is_superuser = True\n user.is_staff = True\n user.save()\n return user\n\n\n#Manager data is stored in this table\nclass User(AbstractBaseUser, PermissionsMixin):\n username = models.CharField(max_length=255)\n email = models.EmailField(max_length=255, unique=True, db_index=True)\n firstname = models.CharField(max_length=100)\n lastname = models.CharField(max_length=100)\n address = models.CharField(max_length=255)\n dob = models.DateField(auto_now=False, auto_now_add=False)\n company = models.CharField(max_length=255)\n is_verified = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n is_staff = models.BooleanField(default=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n USERNAME_FIELD = 'email' #To use email and password for authentication\n REQUIRED_FIELDS = ['username']\n\n objects = UserManager()\n\n def __str__(self):\n return email", "id": "11756730", "language": "Python", "matching_score": 1.7991586923599243, "max_stars_count": 0, "path": "managerapi/authentication/models.py" }, { "content": "#Implementing JWT Authentication for the API's\nimport jwt\nfrom rest_framework import authentication, exceptions\nfrom django.conf import settings\nfrom .models import User\n\n\nclass JWTAuthentication(authentication.BaseAuthentication):\n\n def authenticate(self, request):\n auth_data = authentication.get_authorization_header(request)\n\n if not auth_data:\n return None\n \n prefix,token=auth_data.decode('utf-8').split(' ')\n\n try:\n payload = jwt.decode(token,settings.JWT_SECRET_KEY)\n user = User.objects.get(email = payload['email'])\n return (user, token)\n except jwt.DecodeError as identifier:\n raise exceptions.AuthenticationFailed('Your token is invalid, login')\n except jwt.ExpiredSignatureError as identifier:\n raise exceptions.AuthenticationFailed('Your token is expired, login')\n\n\n return super().authenticate(request)", "id": "12174527", "language": "Python", "matching_score": 2.9768521785736084, "max_stars_count": 0, "path": "managerapi/authentication/backends.py" }, { "content": "from django.shortcuts import render\nfrom rest_framework.generics import GenericAPIView\nfrom .serializers import UserSerializer, LoginSerializer\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.conf import settings\nfrom django.contrib import auth\nimport jwt\n# Create your views here.\n\n\nclass RegisterView(GenericAPIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serializer = UserSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass LoginView(GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request):\n data = request.data\n email = data.get('email', '')\n password = data.get('password', '')\n user = auth.authenticate(email=email, password=password)\n\n if user:\n auth_token = jwt.encode(\n {'email': user.email}, settings.JWT_SECRET_KEY) #Retreiving SECRET_KEY. When deploying globally add KEY using\n serializer = UserSerializer(user) #Environment variable and then exporting through environment or by entering manually\n data = {'user': serializer.data, 'token': auth_token}\n\n return Response(data, status=status.HTTP_200_OK)\n\n # SEND RESPONSE\n return Response({'detail': 'Invalid credentials'}, status=status.HTTP_401_UNAUTHORIZED)\n", "id": "4009822", "language": "Python", "matching_score": 2.1881790161132812, "max_stars_count": 0, "path": "managerapi/authentication/views.py" }, { "content": "from django.shortcuts import render\nfrom rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView\nfrom .models import Employee\nfrom .serializers import EmployeeSerializer\nfrom rest_framework import permissions\n\n#Listing and Creating all the employees\nclass EmployeeList(ListCreateAPIView):\n\n serializer_class = EmployeeSerializer\n permission_classes = (permissions.IsAuthenticated,)\n\n def perform_create(self, serializer):\n serializer.save()\n\n def get_queryset(self):\n return Employee.objects.all()\n\n\n#Retrieving, Updating, Deleting the employee\nclass EmployeeDetailView(RetrieveUpdateDestroyAPIView):\n\n serializer_class = EmployeeSerializer\n permission_classes = (permissions.IsAuthenticated,)\n lookup_field = \"emp_id\"\n\n def get_queryset(self):\n return Employee.objects.all()", "id": "10430364", "language": "Python", "matching_score": 2.906019449234009, "max_stars_count": 0, "path": "managerapi/EmpApi/views.py" }, { "content": "from django.urls import path\nfrom .views import EmployeeList, EmployeeDetailView\n\n\nurlpatterns = [\n path('', EmployeeList.as_view()),\n path('<slug:emp_id>', EmployeeDetailView.as_view()),\n]", "id": "5329618", "language": "Python", "matching_score": 0.2577943205833435, "max_stars_count": 0, "path": "managerapi/EmpApi/urls.py" }, { "content": "from django.apps import AppConfig\n\n\nclass EmpapiConfig(AppConfig):\n name = 'EmpApi'\n", "id": "6957024", "language": "Python", "matching_score": 0.05845164135098457, "max_stars_count": 0, "path": "managerapi/EmpApi/apps.py" } ]
2.769796
alexmattera
[ { "content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCIS 9650\r\nGroup B\r\nProject\r\n\r\nInitial code to load and analyze flight delay data from\r\nKaggle dataset found at the following address:\r\nhttps://www.kaggle.com/usdot/flight-delays\r\nyou need to download the flights.csv, airlines.csv, airports.csv\r\nfiles to the home directory before running this script\r\n\r\nA data dictionary that looks to explain a lot of these\r\ncodes can be found at:\r\nhttps://www.transtats.bts.gov/Fields.asp?Table_ID=236\r\n\r\n<NAME>\r\n\"\"\"\r\n\r\nimport csv\r\n\r\n###open file\r\nf = open(\"flights.csv\")\r\nrows = csv.reader(f)\r\nnext(rows) #skip first line\r\n\r\n###create class to store flight record values \r\nclass flightRecord:\r\n YEAR = 0\r\n MONTH = 0\r\n DAY = 0\r\n DAY_OF_WEEK = 0\r\n AIRLINE_CODE = \"\" #for consistency, to match to airline name later\r\n FLIGHT_NUMBER = \"\"\r\n TAIL_NUMBER = \"\"\r\n ORIGIN_AIRPORT_CODE = \"\"\r\n DESTINATION_AIRPORT_CODE = \"\"\r\n SCHEDULED_DEPARTURE = 0\r\n DEPARTURE_TIME = 0\r\n DEPARTURE_DELAY = 0\r\n TAXI_OUT = 0\r\n WHEELS_OFF = 0\r\n SCHEDULED_TIME = 0\r\n ELAPSED_TIME = 0\r\n AIR_TIME = 0\r\n DISTANCE = 0\r\n WHEELS_ON = 0\r\n TAXI_IN = 0\r\n SCHEDULED_ARRIVAL = 0\r\n ARRIVAL_TIME = 0\r\n ARRIVAL_DELAY = 0\r\n DIVERTED = 0\r\n CANCELLED = 0\r\n CANCELLATION_REASON = \"\"\r\n AIR_SYSTEM_DELAY = 0\r\n SECURITY_DELAY = 0\r\n AIRLINE_DELAY = 0\r\n LATE_AIRCRAFT_DELAY = 0\r\n WEATHER_DELAY = 0\r\n\r\nfdata = []\r\n\r\n###iterate through file, create list of flightRecord objects\r\nfor row in rows: \r\n fr = flightRecord()\r\n \r\n fr.YEAR = int(row[0])\r\n fr.MONTH = int(row[1])\r\n fr.DAY = int(row[2])\r\n fr.DAY_OF_WEEK = int(row[3])\r\n fr.AIRLINE_CODE = row[4]\r\n fr.FLIGHT_NUMBER = row[5]\r\n fr.TAIL_NUMBER = row[6]\r\n fr.ORIGIN_AIRPORT_CODE = row[7]\r\n fr.DESTINATION_AIRPORT_CODE = row[8]\r\n fr.SCHEDULED_DEPARTURE = int(row[9])\r\n fr.DEPARTURE_TIME = row[10]\r\n if row[11] != \"\":\r\n fr.DEPARTURE_DELAY = int(row[11])\r\n fr.TAXI_OUT = row[12]\r\n fr.WHEELS_OFF = row[13]\r\n fr.SCHEDULED_TIME = row[14]\r\n fr.ELAPSED_TIME = row[15]\r\n fr.AIR_TIME = row[16]\r\n fr.DISTANCE = row[17]\r\n fr.WHEELS_ON = row[18]\r\n fr.TAXI_IN = row[19]\r\n fr.SCHEDULED_ARRIVAL = row[20]\r\n fr.ARRIVAL_TIME = row[21]\r\n if row[22] != \"\":\r\n fr.ARRIVAL_DELAY = int(row[22])\r\n fr.DIVERTED = int(row[23]) \r\n fr.CANCELLED = int(row[24])\r\n fr.CANCELLATION_REASON = row[25]\r\n fr.AIR_SYSTEM_DELAY = row[26]\r\n fr.SECURITY_DELAY = row[27]\r\n fr.AIRLINE_DELAY = row[28]\r\n fr.LATE_AIRCRAFT_DELAY = row[29]\r\n fr.WEATHER_DELAY = row[30]\r\n \r\n fdata.append(fr)\r\n\r\nf.close()\r\n\r\n###open file\r\nf = open(\"airlines.csv\")\r\nrows = csv.reader(f)\r\nnext(rows) #skip first line\r\n\r\n###create class to store airline data\r\nclass Airline:\r\n AIRLINE_CODE = \"\"\r\n AIRLINE = \"\"\r\n\r\naldata = []\r\n\r\nfor row in rows:\r\n ar = Airline()\r\n \r\n ar.AIRLINE_CODE = row[0]\r\n ar.AIRLINE = row[1]\r\n \r\n aldata.append(ar)\r\n\r\nf.close()\r\n\r\n###open file\r\nf = open(\"airports.csv\")\r\nrows = csv.reader(f)\r\nnext(rows) #skip first line\r\n\r\n###create class to store airport data\r\nclass Airport:\r\n AIRPORT_CODE = \"\"\r\n AIRPORT = \"\"\r\n CITY = \"\"\r\n STATE = \"\"\r\n COUNTRY = \"\"\r\n LATITUDE = 0.0\r\n LONGITUDE = 0.0\r\n \r\napdata = []\r\n\r\nfor row in rows:\r\n ap = Airport()\r\n \r\n ap.AIRPORT_CODE = row[0]\r\n ap.AIRPORT = row[1]\r\n ap.CITY = row[2]\r\n ap.STATE = row[3]\r\n ap.COUNTRY = row[4]\r\n if row[5] != \"\" and row[6] != \"\":\r\n ap.LATITUDE = float(row[5])\r\n ap.LONGITUDE = float(row[6])\r\n \r\n apdata.append(ap)\r\n \r\nf.close()\r\n\r\n###function to display summary statistics for a list of flight records\r\ndef flightRecordSummary(fdata):\r\n print(\"\\nData set represents \" + str(len(fdata)) + \" flights.\")\r\n print(\"Of those \" + str(len(list(filter(lambda x: x.DIVERTED == 1, fdata)))) + \" flights were diverted\")\r\n print(\"and \" + str(len(list(filter(lambda x: x.CANCELLED == 1, fdata)))) + \" flights were cancelled.\")\r\n depdelays = list(map(lambda x: x.DEPARTURE_DELAY, fdata))\r\n print(\"The average departure delay was \" + str(sum(depdelays)/len(depdelays)) + \" minutes\")\r\n arrdelays = list(map(lambda x: x.ARRIVAL_DELAY, fdata))\r\n print(\"and the average arrival delay was \" + str(sum(arrdelays)/len(arrdelays)) + \" minutes.\")\r\n\r\n###function to recursively subset data for delay prediction, expanding search until\r\n###more than 10 records are found or search is expanded 6 times\r\ndef findSubset(fdata, searchOap, searchDap, searchAir, searchMonth, searchDofW, searchSchedDep, tDiff):\r\n #calculate boundary values\r\n lowDep = searchSchedDep - tDiff\r\n highDep = searchSchedDep + tDiff\r\n \r\n subdata = list(filter(lambda x: x.AIRLINE_CODE == searchAir and x.ORIGIN_AIRPORT_CODE == searchOap and x.DESTINATION_AIRPORT_CODE == searchDap and \r\n x.MONTH == searchMonth and x.DAY_OF_WEEK == searchDofW and x.SCHEDULED_DEPARTURE <= highDep and x.SCHEDULED_DEPARTURE >= lowDep, fdata))\r\n \r\n if len(subdata) >= 10 or tDiff > 600:\r\n #sufficient number of records or search too wide\r\n return subdata\r\n else:\r\n #widen search\r\n tDiff += 100 #increment 1 hour\r\n return findSubset(fdata, searchOap, searchDap, searchAir, searchMonth, searchDofW, searchSchedDep, tDiff)\r\n\r\n###function to find the best and worst airports based on departure delays\r\ndef bestAndWorstPort(fdata, apdata):\r\n #running variable to store best and worst airports data\r\n best = \"\"\r\n besttm = 0\r\n worst = \"\"\r\n worsttm = 1\r\n \r\n \r\n print(\"\\nCalculating...\\n\") #so the user knows its working\r\n \r\n #iterate through airports\r\n for airport in apdata:\r\n flights = len(list(filter(lambda x: x.ORIGIN_AIRPORT_CODE == airport.AIRPORT_CODE, fdata)))\r\n delayed = len(list(filter(lambda x: x.ORIGIN_AIRPORT_CODE == airport.AIRPORT_CODE and x.DEPARTURE_DELAY > 0, fdata)))\r\n \r\n if flights == 0:\r\n #not valid for timeliness calculation if they have no departing flights\r\n continue\r\n else:\r\n #calculate timeliness and adjust running variables as necessary\r\n tm = delayed/flights\r\n if tm < worsttm:\r\n worst = airport.AIRPORT\r\n worsttm = tm\r\n if tm > besttm:\r\n best = airport.AIRPORT\r\n besttm = tm\r\n \r\n print (\"The best airport is \" + best + \"\\nwith an on-time record of \" + str(besttm * 100) + \"%.\")\r\n print (\"The worst airport is \" + worst + \"\\nwith an on-time record of \" + str(worsttm * 100) + \"%.\\n\")\r\n\r\n###function to find the best and worst airlines based on oeverall delays\r\ndef bestAndWorstLine(fdata, aldata):\r\n #running variable to store best and worst airports data\r\n best = \"\"\r\n besttm = 0\r\n worst = \"\"\r\n worsttm = 1\r\n \r\n print(\"\\nCalculating...\\n\") #so the user knows its working\r\n \r\n #iterate through airlines\r\n for airline in aldata:\r\n flights = len(list(filter(lambda x: x.AIRLINE_CODE == airline.AIRLINE_CODE, fdata)))\r\n delayed = len(list(filter(lambda x: x.AIRLINE_CODE == airline.AIRLINE_CODE and (x.DEPARTURE_DELAY > 0 or x.ARRIVAL_DELAY > 0), fdata)))\r\n \r\n #calculate timeliness and adjust running variables as necessary\r\n tm = delayed/flights\r\n if tm < worsttm:\r\n worst = airline.AIRLINE\r\n worsttm = tm\r\n if tm > besttm:\r\n best = airline.AIRLINE\r\n besttm = tm\r\n \r\n print (\"The best airline is \" + best + \"\\nwith an on-time record of \" + str(besttm * 100) + \"%.\")\r\n print (\"The worst airline is \" + worst + \"\\nwith an on-time record of \" + str(worsttm * 100) + \"%.\\n\")\r\n \r\n###function to predict delays based on known flight data\r\ndef predictDelay(fdata):\r\n print(\"\\nWelcome to the Flight Delay Predictive Model\\n============================================\\n\")\r\n #read in search values from user\r\n searchOap = input(\"What airport will you be leaving from? (example: JFK): \")\r\n searchDap = input(\"What airport will you be flying to? (example: LAX): \")\r\n searchAir = input(\"Which airline are you flying? (example: B6): \")\r\n searchMonth = int(input(\"Which month will you be flying? (example: 1 = January, etc): \"))\r\n searchDofW = int(input(\"Which day of the week will you be flying? (example: 1 = Monday, etc): \"))\r\n searchSchedDep = int(input(\"What time is your flight scheduled to depart? (example: 800 = 8:00am, 2000 = 8:00pm): \"))\r\n print(\"\\nSearching...\\n\") \r\n \r\n #calculate initial time difference\r\n tDiff = 100 # 1 hour\r\n \r\n #call function to subset data\r\n subdata = findSubset(fdata, searchOap, searchDap, searchAir, searchMonth, searchDofW, searchSchedDep, tDiff)\r\n \r\n if len(subdata) == 0:\r\n #no records returned\r\n print(\"Sorry, but no records were found for those search parameters.\\nPlease try again\\n\")\r\n else: \r\n #calculate and report average delays\r\n depdelays = list(map(lambda x: x.DEPARTURE_DELAY, subdata))\r\n arrdelays = list(map(lambda x: x.ARRIVAL_DELAY, subdata)) \r\n print(\"Based on the predictive model,\\nyou can expect an average departure delay of \" + str(sum(depdelays)/len(depdelays)) + \" minutes\")\r\n print(\"and an average arrival delay of \" + str(sum(arrdelays)/len(arrdelays)) + \" minutes. Have a great flight!\\n\")\r\n\r\n###main program\r\nwhile(1):\r\n \r\n print(\"\\nMain Menu\\nChoose One:\\n1 - Summary Statistics All Data\\n2 - Summary Statistics by Airline\\n3 - Summary Statistics by Origin Airport\\n4 - Best and Worst\\n5 - Predict Delay\\n0 - Exit\")\r\n i = input(\"What would you like to do? (0 - 5): \")\r\n if i == \"1\":\r\n ###Summary stats for all data\r\n print(\"\\nSummary Statistics for all Airlines\\n===================================\")\r\n flightRecordSummary(fdata)\r\n elif i == \"2\":\r\n ###Airline Summary stats\r\n print(\"\\nSummary Statistics by Airline\\n=============================\")\r\n print(\"1 - Show All Airlines\\n2 - Show Specific Airline\\n0 - Return to Main Menu\\n\")\r\n j = input(\"What is your choice? (0 - 2): \")\r\n if j == \"1\":\r\n #all airlines\r\n for airline in aldata:\r\n print(\"\\n\" + airline.AIRLINE + \":\\n\")\r\n #call function for subset of data\r\n flightRecordSummary(list(filter(lambda x: x.AIRLINE_CODE == airline.AIRLINE_CODE, fdata)))\r\n elif j == \"2\":\r\n #a particular airline\r\n searchLine = input(\"What airline would you like to see? (example: B6): \")\r\n print(\"\\n\" + list(filter(lambda x: x.AIRLINE_CODE == searchLine, aldata))[0].AIRLINE)\r\n flightRecordSummary(list(filter(lambda x: x.AIRLINE_CODE == searchLine, fdata)))\r\n else:\r\n continue \r\n elif i == \"3\":\r\n ###Summary stats by origin airport\r\n print(\"\\nSummary Statistics by Origin Airport\\n=======================================\\n\")\r\n print(\"1 - Show All Airports\\n2 - Show Specific Airport\\n0 - Return to Main Menu\\n\")\r\n j = input(\"What is your choice? (0 - 2): \")\r\n if j == \"1\":\r\n #all airports\r\n for airport in apdata:\r\n print(\"\\n\" + airport.AIRPORT + \":\\n\")\r\n #call function for subset of data\r\n subdata = list(filter(lambda x: x.ORIGIN_AIRPORT_CODE == airport.AIRPORT_CODE, fdata))\r\n if len(subdata) > 0:\r\n flightRecordSummary(subdata)\r\n elif j == \"2\":\r\n #a particular airport\r\n searchPort = input(\"What airport would you like to see? (example: JFK): \")\r\n print(\"\\n\" + list(filter(lambda x: x.AIRPORT_CODE == searchPort, apdata))[0].AIRPORT)\r\n flightRecordSummary(list(filter(lambda x: x.ORIGIN_AIRPORT_CODE == searchPort, fdata)))\r\n else:\r\n continue\r\n elif i == \"4\":\r\n print(\"\\nBest and Worst\\n==============\\n\")\r\n print(\"1 - See Best and Worst Airports by Departure Delays\\n2 - See Best and Worst Airlines by Overall Delays\\n0 - Return to Main Menu\\n\")\r\n j = input(\"What is your choice? (0 - 2): \")\r\n if j == \"1\":\r\n bestAndWorstPort(fdata, apdata)\r\n elif j == \"2\":\r\n bestAndWorstLine(fdata, aldata)\r\n else:\r\n continue\r\n elif i == \"5\":\r\n ###predict delay based on known flight data\r\n predictDelay(fdata)\r\n else:\r\n break\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "id": "1584797", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "CIS9650_GrpB_FlightDelays_am_20170410.py" } ]
0
Captain-Zach
[ { "content": "#! /usr/bin/env python3\n\nimport cortx_community\n\nimport argparse\n\ndef Debug(msg):\n print(msg)\n\ndef main():\n parser = argparse.ArgumentParser(description='Set a value for CORTX Community.', add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n required = parser.add_argument_group('required arguments')\n optional = parser.add_argument_group('optional_requirements')\n optional.add_argument(\n '-h',\n '--help',\n action='help',\n help='show this help message and exit'\n )\n required.add_argument('--key', '-k', type=str, help=\"Which key to set / query\", required=True)\n optional.add_argument('--value', '-v', type=str, help=\"Which value to set\", required=False)\n optional.add_argument('--date', '-d', type=str, help='Which date to set', required=False)\n optional.add_argument('--org', '-o', help='Which org', default='Seagate')\n optional.add_argument('--repo', '-r', help='Which repo', default='GLOBAL')\n args = parser.parse_args()\n\n repo = args.repo\n org = args.org\n key = args.key\n val = args.value\n date = args.date\n\n ps = cortx_community.PersistentStats(org_name=args.org)\n dates=ps.get_dates(args.repo)\n\n if date is None:\n date = dates[-1]\n print(\"Defaulting to use last valid date %s\" % date)\n\n if val is not None:\n ps.add_stat(date=date,repo=repo,stat=key,value=int(val))\n print(\"Changing %s on %s to be %s\" % (repo,date,val))\n\n for d in dates:\n print( d, args.key, ps.get_values_as_numbers(args.repo,args.key,[d]))\n\nif __name__ == \"__main__\":\n main()\n\n", "id": "12378752", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "metrics/set_metric.py" } ]
0
RaresTeodor
[ { "content": "# Modified ELO rating system for UFC\n# Extra feature: title bout\n\nimport csv\n\nmatch = {}\nrating = {}\nheader = None\nhistory = None\nplayers = None\n\nwinner = None\ntitle = None\nredWins = None\nblueWins = None\n\ndef loadCSV(filename):\n global header, history\n\n history = []\n with open(filename) as file:\n reader = csv.reader(file)\n for row in reader:\n if (len(row[0]) > 0):\n history.append(row)\n header = history[0]\n history.pop(0)\n\ndef printLeaderboard():\n global players, rating\n\n players = []\n for key in rating:\n players.append((key, rating[key]))\n\n players.sort(key = lambda x : x[1])\n players.reverse()\n for fighter in players:\n print (fighter[0], fighter[1])\n\ndef computeELO():\n global bonus, rating, players, header\n global winner, title, redWins, blueWins\n\n winner = \"Winner\"\n title = \"title_bout\"\n redWins = \"R_wins\"\n blueWins = \"B_wins\"\n\n for i in range(0, len(header)):\n if (header[i] == winner): winner = i\n if (header[i] == title): title = i\n if (header[i] == redWins): redWins = i\n if (header[i] == blueWins): blueWins = i\n\n for row in reversed(history):\n red = row[0]\n blue = row[1]\n\n # Create a new entry\n if red not in rating: rating[red] = 1200\n if blue not in rating: rating[blue] = 1200\n\n oldRed = rating[red]\n oldBlue = rating[blue]\n\n expectedRed = 1 / (1 + 10 ** ((oldBlue - oldRed) / 300))\n expectedBlue = 1 / (1 + 10 ** ((oldRed - oldBlue) / 300))\n\n # Title match\n if (row[title] == \"TRUE\"): K = 256\n else: K = 64\n\n # Who is the winner\n if (row[winner] == \"Red\"):\n newRed = oldRed + K * (1 - expectedRed)\n newBlue = oldBlue + K * (0 - expectedBlue)\n elif (row[winner] == \"Blue\"):\n newBlue = oldBlue + K * (1 - expectedBlue)\n newRed = oldRed + K * (0 - expectedRed)\n else:\n newBlue = oldBlue + K * (0.5 - expectedBlue)\n newRed = oldRed + K * (0.5 - expectedRed) \n\n rating[red] = newRed\n rating[blue] = newBlue\n\n superkey = red + blue + str(int(float(row[redWins]))) + str(int(float(row[blueWins])))\n match[superkey] = (newRed, newBlue)\n\ndef buildCSV():\n header.append(\"R_rating\")\n header.append(\"B_rating\")\n with open('../ufc_final.csv', mode='w', newline='') as ufc_final:\n ufc_writer = csv.writer(ufc_final)\n ufc_writer.writerow(header)\n for row in history:\n red = row[0]\n blue = row[1]\n superkey = red + blue + str(int(float(row[redWins]))) + str(int(float(row[blueWins])))\n\n curr_row = row\n curr_row.append(match[superkey][0])\n curr_row.append(match[superkey][1])\n ufc_writer.writerow(curr_row)\n\nloadCSV(\"ufc_old.csv\")\ncomputeELO()\n\nloadCSV(\"ufc_new.csv\")\ncomputeELO()\n\nloadCSV(\"ufc-master.csv\")\nbuildCSV()\n\n\n\n", "id": "3025953", "language": "Python", "matching_score": 1.4789255857467651, "max_stars_count": 0, "path": "Backend/UFC ELO Rating System/ufc_elo.py" }, { "content": "from typing import Optional\n\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nclass Fighter(BaseModel):\n first_name: str\n last_name: str\n\n\[email protected](\"/\")\ndef read_root():\n return {\"Hello\": \"World\"}\n\n\[email protected](\"/show\")\ndef get_winner(fighter_a: str, fighter_b: str):\n return {\"winner\": fighter_a}\n\[email protected](\"/create\")\ndef create_fighter(fighter: Fighter):\n return fighter\n\[email protected](\"/names\")\ndef show_names():\n file1 = open('names.txt', 'r', newline='\\n')\n names = file1.readlines()\n return names\n\n\n", "id": "12713526", "language": "Python", "matching_score": 1.4871903657913208, "max_stars_count": 0, "path": "Frontend/main.py" }, { "content": "\nimport pandas as pd\ndata=pd.read_csv(r\"C:\\Users\\gabri\\ProjectsCode\\Hacklytics2021\\Backend\\ufc-master.csv\")\nname = data.R_fighter.unique()\nname=name.join(data.B_fighter)\nname=name.unique()\nprint(name)", "id": "7607472", "language": "Python", "matching_score": 0.466438889503479, "max_stars_count": 0, "path": "Backend/names.py" }, { "content": "import os\nfrom os import walk\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef getExistingFile(mypath):\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n return onlyfiles[0]\n\nwith open(\"../names.txt\") as f:\n content = f.readlines()\ncontent = [x.strip() for x in content]\n\nf = []\ndirectories = []\nfor (dirpath, dirname, filename) in walk(\"./\"):\n directories.append(dirname)\n break\n\nfor superstar in content:\n directory = \"./\" + superstar + \" png/\"\n name = superstar.replace(\" \", \"\")\n name = name + \".png\"\n os.rename(directory + getExistingFile(directory), directory + name)\n\nfor superstar in content:\n directory = \"./\" + superstar + \" png\"\n newdirectory = superstar.replace(\" \", \"\")\n os.rename(directory, newdirectory)\n", "id": "4483535", "language": "Python", "matching_score": 1.7671703100204468, "max_stars_count": 0, "path": "Backend/Tools for Pictures/rename.py" }, { "content": "import shutil\n\nwith open(\"../names.txt\") as f:\n content = f.readlines()\ncontent = [x.strip() for x in content]\n\nfor superstar in content:\n newname = superstar.replace(\" \", \"\")\n directory = \"./\" + newname + \"/\"\n shutil.copy(directory + newname + \".png\", './')\n", "id": "3806574", "language": "Python", "matching_score": 1.6439200639724731, "max_stars_count": 0, "path": "Backend/Tools for Pictures/copyfiles.py" }, { "content": "from google_images_download import google_images_download\nfrom bing_image_downloader import downloader\n\ndef downloadimages(query):\n response = google_images_download.googleimagesdownload() \n arguments = {\"keywords\": query, \"format\": \"png\", \"limit\": 1, \"print_urls\": True}\n response.download(arguments)\n\nwith open(\"names.txt\") as f:\n content = f.readlines()\ncontent = [x.strip() for x in content] \n\nfor superstar in content:\n downloadimages(superstar + \" png\")\n", "id": "10285467", "language": "Python", "matching_score": 0.9640908241271973, "max_stars_count": 0, "path": "Backend/Tools for Pictures/scrape.py" } ]
1.483058
IkerZamora
[ { "content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom datetime import datetime\nfrom events import Event, Events\nfrom telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove,\n InlineKeyboardButton, InlineKeyboardMarkup)\nfrom telegram.ext import (Filters, CommandHandler, MessageHandler,\n RegexHandler, Updater)\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nBOT_NAME = None\nBOT_ADMINS = []\nCOMMANDS = {\n # command description used in the 'ayuda' command, keep these up to date\n 'ayuda': 'Obtener información acerca de los comandos',\n 'hype': 'Tiempo restante para la próxima EE, AE ó GE.' \\\n + ' Uso: /hype [EE | AE | GE]',\n 'lalala': 'Canto para ti \\u2665'\n}\n\nEVENTS = Events()\n\n\ndef greetings(bot, update):\n chat_id = update.message.chat.id\n new_members = update.message.new_chat_members\n left_member = update.message.left_chat_member\n if new_members:\n for new_member in new_members:\n # Bot was added to a group chat\n if new_member.username == BOT_NAME:\n bot.send_message(\n chat_id=chat_id,\n text='Hola a todos soy {} y os daré muchos mimitos!'\n .format(BOT_NAME)\n )\n # Another user joined the chat\n else:\n bot.send_message(\n chat_id=chat_id,\n text='Bienvenido al grupo {}. ¿Eres tu mi peluchito?'\n .format(new_member.name)\n )\n elif left_member:\n if left_member.username != BOT_NAME:\n bot.send_sticker(\n chat_id=chat_id,\n sticker=open('./assets/apastar.webp', 'rb')\n )\n if not left_member.is_bot:\n bot.send_sticker(\n chat_id=left_member.id,\n sticker=open('./assets/apastar.webp', 'rb')\n )\n\n\ndef apastar_resumen(bot, update):\n update.message.reply_sticker(sticker=open('./assets/apastar.webp', 'rb'))\n\n\ndef lalala_command(bot, update):\n bot.send_audio(\n chat_id=update.message.chat_id,\n audio=open('./assets/snufi_schnuffel_mi_peluchito.mp3', 'rb')\n )\n\n# Help command. Returns all the commands with their help text\n\n\ndef help_command(bot, update):\n help_text = 'Lista de comandos disponibles: \\n'\n for key in sorted(COMMANDS):\n # generate help text out of the commands dictionary defined at the top\n help_text += '/' + key + ': '\n help_text += COMMANDS[key] + '\\n'\n # send the generated help page\n bot.send_message(chat_id=update.message.chat_id, text=help_text)\n\n# Hype command. Returns the remaining time for the event requested\n\n\ndef hype_command(bot, update):\n chat_id = update.message.chat_id\n args = update.message.text.split() # Array of string '/hype' + [parameter]\n using_args = len(args) > 1\n if using_args:\n event = EVENTS.get_event(args[1].lower())\n else:\n event = EVENTS.next_event()\n if event:\n now = datetime.now()\n if event.date > now:\n days, hours, minutes, seconds = event.time_left()\n text = 'Tiempo restante para la %s%d ' % (\n event.acronym, event.edition)\n text += '({:d}-{:d}-{:d}):\\n'.format(\n event.date.year, event.date.month, event.date.day\n )\n\n date_list = [days, hours, minutes, seconds]\n\n text_date = ''\n last_number = next(\n i for i in reversed(range(len(date_list))) if date_list[i] != 0\n )\n for index, (number, unit) in enumerate(\n zip(date_list, ['día', 'hora', 'minuto', 'segundo'])\n ):\n if text_date and number:\n if index == last_number:\n text_date += ' y '\n else:\n text_date += ', '\n if number >= 1:\n text_date += '{:d} {}'.format(number, unit)\n if number > 1:\n text_date += 's'\n text += text_date\n bot.send_message(chat_id=chat_id, text=text)\n else:\n EVENTS.set_event(event.acronym, event.update_event())\n text = ('Aún no se ha anunciado la fecha para la {}{}. '\n .format(event.acronym, event.edition + 1) +\n 'Relaja esos pezones.'\n )\n bot.send_message(chat_id=chat_id, text=text)\n else:\n if using_args:\n bot.send_message(\n chat_id, 'Ese evento no existe. Uso: /hype [EE | GE | AE]'\n )\n else:\n bot.send_message(\n chat_id,\n 'Aún no se ha anunciado la fecha de ningún evento. ' +\n 'Relaja esos pezones.'\n )\n\n\ndef main(argv):\n parser = argparse.ArgumentParser('bot.py')\n parser.add_argument('--webhooks', action='store_true',\n help='enables webhooks instead of pooling')\n args = parser.parse_args(argv)\n\n try:\n token = os.environ['TELEGRAM_TOKEN']\n except KeyError:\n logger.exception('Please set the TELEGRAM_TOKEN environment variable')\n sys.exit(2)\n try:\n global BOT_NAME\n BOT_NAME = os.environ['BOT_NAME']\n except KeyError:\n logger.exception('Please set the BOT_NAME environment variable')\n sys.exit(2)\n try:\n global BOT_ADMINS\n BOT_ADMINS.append(os.environ['ADMIN'])\n except KeyError:\n logger.exception('Please set the ADMIN environment variable')\n sys.exit(2)\n\n updater = Updater(token)\n job_queue = updater.job_queue\n if args.webhooks:\n updater.start_webhook(listen='0.0.0.0', url_path=token,\n port=int(os.environ.get('PORT', '8443')))\n try:\n updater.bot.set_webhook(os.path.join(os.environ['URL'], token))\n except KeyError:\n logger.exception('Please set the environment variable URL')\n sys.exit(2)\n\n dispatcher = updater.dispatcher\n\n dispatcher.add_handler(MessageHandler(Filters.status_update, greetings))\n dispatcher.add_handler(CommandHandler('ayuda', help_command))\n dispatcher.add_handler(CommandHandler('hype', hype_command))\n dispatcher.add_handler(CommandHandler('lalala', lalala_command))\n dispatcher.add_handler(CommandHandler('resumen', apastar_resumen))\n dispatcher.add_handler(RegexHandler(r'^(resumen\\?)$', apastar_resumen))\n\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n", "id": "343914", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "bot.py" }, { "content": "from bs4 import BeautifulSoup\nimport calendar\nfrom datetime import datetime, timedelta\nimport json\nimport locale\nimport logging\nimport requests\nimport requests_cache\n\nlocale.setlocale(locale.LC_ALL, 'es_ES.UTF-8')\n\n\nclass Event:\n\n def __init__(self, acronym, date, first_edition_year, name, url):\n self.logger = logging.getLogger(__name__)\n self.acronym = acronym\n self.date = date\n self.first_edition_year = first_edition_year\n self.name = name\n self.edition = self.date.year - self.first_edition_year + 1\n self.url = url\n\n def time_left(self):\n '''\n Return: days, hours, minutes and seconds\n remaining for the next LAN party\n '''\n now = datetime.now()\n td = self.date - now\n days = td.days\n hours = td.seconds // 3600\n minutes = (td.seconds // 60) % 60\n seconds = (td.seconds % 3600) % 60\n return days, hours, minutes, seconds\n\n def update_event(self):\n response = requests.get(self.url)\n if response.from_cache:\n self.logger.info(\n 'Loading request for %s event from cache.' % self.name\n )\n else:\n self.logger.info(\n 'Caching request for %s event to cache.' % self.name\n )\n if response.history:\n url_edition = int(response.url.split('.')[0]\n .replace('https://', '').replace(self.acronym.lower(), '')\n )\n if url_edition == self.edition + 1:\n soup = BeautifulSoup(response.text, 'html.parser')\n try:\n # str e.g: Del 26 al 29 de julio de 2018\n str_date = (soup.find('a', {'name': 'cuando'})\n .parent.findNext('p').text.lower().split()\n )\n # pre-event day is the previous day to the official start.\n start_day = int(str_date[1]) - 1\n end_day = int(str_date[3])\n month = None\n for value, key in enumerate(calendar.month_name):\n if key == str_date[5]:\n month = value\n year = int(str_date[7])\n date = datetime(\n year,\n month,\n start_day,\n self.date.hour,\n self.date.minute\n )\n self.date = date\n self.edition += 1\n except:\n self.logger.exception()\n else:\n self.logger.info(\n 'URL for edition {} of {} event has not been updated yet.'\n .format(self.edition + 1, self.name)\n )\n else:\n self.logger.error('URL redirection of %s event did not happen.'\n % self.name)\n return self\n\n\nclass Events:\n\n CACHE_EXPIRE = 3600\n\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n requests_cache.install_cache(\n cache_name='date_cache',\n backend='memory',\n expire_after=Events.CACHE_EXPIRE\n )\n self.logger.info(\n 'Initialized in-memory cache for event date updater ' +\n 'with expiration set for %d minutes.' % (Events.CACHE_EXPIRE / 60)\n )\n self.events = self._load_events()\n\n def _load_events(self, filename='events.json'):\n file = open(filename)\n data = json.load(file)\n file.close()\n json_events = data['events']\n now = datetime.now()\n events = []\n for event in json_events:\n current = event['event']\n url = current['url']\n year = current['date']['year']\n month = current['date']['month']\n day = current['date']['day']\n hour = current['date']['hour']\n minute = current['date']['minute']\n date = datetime(year, month, day, hour, minute)\n current_event = Event(\n current['acronym'],\n date,\n current['first_edition_year'],\n current['name'],\n url\n )\n # if date < now:\n # current_event = current_event.update_event()\n events.append(current_event)\n return events\n\n def get_event(self, acronym):\n for event in self.events:\n if event.acronym.lower() == acronym.lower():\n return event\n return None\n\n def set_event(self, acronym, set_event):\n for index, event in enumerate(self.events):\n if event.acronym.lower() == acronym.lower():\n self.events[index] = set_event\n\n def next_event(self):\n now = datetime.now()\n # Since events occur annually max. time-lapse is 1 year.\n # Using Leap year just in case.\n soonest_date = now + timedelta(days=366)\n next_event = None\n for event in self.events:\n if event.date > now and event.date < soonest_date:\n next_event = event\n soonest_date = event.date\n return next_event\n", "id": "1758363", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "events.py" } ]
0
PauloSanches12
[ { "content": "from django.shortcuts import render\r\nfrom .models import *\r\n\r\n# Create your views here.\r\ndef orcamentos_lista(request):\r\n # logica\r\n orcamentos = Orcamento.objects.all()\r\n return render(request, 'mp_orcamento/orcamentos.html', {'orcamentos': orcamentos})\r\n\r\n\r\ndef orcamentos_estatisticas(request):\r\n maior_custo = 0\r\n menor_custo = 999999999999\r\n orcamento_maior_custo = None\r\n orcamento_menor_custo = None\r\n orcamentos = Orcamento.objects.all()\r\n somatorio_custo_total = 0\r\n for orcamento in orcamentos:\r\n somatorio = 0\r\n for peca in Peca.objects.filter(orcamento=orcamento):\r\n somatorio += peca.custo_de_producao_ajustado()\r\n orcamento.custo_total = somatorio * 1.25\r\n somatorio_custo_total += orcamento.custo_total\r\n if orcamento.custo_total >= maior_custo:\r\n orcamento_maior_custo = orcamento\r\n maior_custo = orcamento.custo_total\r\n if orcamento.custo_total <= menor_custo:\r\n orcamento_menor_custo = orcamento\r\n menor_custo = orcamento.custo_total\r\n quantidade = Orcamento.objects.count() \r\n media_custo_total = somatorio_custo_total / quantidade\r\n return render(request, 'mp_orcamento/estatisticas.html', \r\n {'quantidade': quantidade, \r\n 'orcamento_maior_custo': orcamento_maior_custo,\r\n 'orcamento_menor_custo': orcamento_menor_custo,\r\n 'media_custo_total': media_custo_total\r\n })\r\n\r\ndef info_cliente(request,id_cliente):\r\n clientes=Cliente.objects.get(pk=id_cliente)\r\n orcamento = Orcamento.objects.filter(cliente=clientes)\r\n return render(request, 'mp_orcamento/id_cliente.html',{'cliente': clientes, 'orcamento':orcamento})\r\n\r\ndef estatistica(request):\r\n maior_custo = 0\r\n menor_custo = 999999999999\r\n orcamento_maior_custo = None\r\n orcamento_menor_custo = None\r\n orcamentos = Cliente.objects.all()\r\n\r\n somatorio_custo_total = 0\r\n\r\n for orcamento in orcamentos:\r\n somatorio = 0\r\n\r\n for orcamento in Orcamento.objects.filter(cliente=orcamento):\r\n somatorio += orcamento.custo_total()\r\n\r\n orcamento.custo_total = somatorio\r\n \r\n if orcamento.custo_total >= maior_custo:\r\n orcamento_maior_custo = orcamento\r\n maior_custo = orcamento.custo_total\r\n\r\n if orcamento.custo_total <= menor_custo:\r\n orcamento_menor_custo = orcamento\r\n menor_custo = orcamento.custo_total\r\n\r\n quantidade = Cliente.objects.count()\r\n\r\n return render(request, 'mp_orcamento/estatistica.html', \r\n {'quantidade': quantidade, \r\n 'orcamento_maior_custo': orcamento_maior_custo,\r\n 'orcamento_menor_custo': orcamento_menor_custo,\r\n 'maior_custo': maior_custo,\r\n 'menor_custo': menor_custo}) \r\n", "id": "6998454", "language": "Python", "matching_score": 2.259368419647217, "max_stars_count": 0, "path": "mp_orcamento/mp_orcamento/views.py" }, { "content": "from django.urls import path\r\nfrom .views import *\r\n\r\nurlpatterns = [\r\n path('orcamentos/', orcamentos_lista, name='orcamentos-lista'),\r\n path('orcamentos/estatisticas/', orcamentos_estatisticas,name='orcamentos-estatisticas'),\r\n path('orcamentos/cliente/<int:id_cliente>/', info_cliente,name='Informacao-cliente'),\r\n path('orcamentos/cliente/estatistica/', estatistica,name='Estatistica'),\r\n]", "id": "5839263", "language": "Python", "matching_score": 0.8375502824783325, "max_stars_count": 0, "path": "mp_orcamento/mp_orcamento/urls.py" }, { "content": "from django.apps import AppConfig\r\n\r\n\r\nclass MpOrcamentoConfig(AppConfig):\r\n name = 'mp_orcamento'\r\n", "id": "10394229", "language": "Python", "matching_score": 0.10970420390367508, "max_stars_count": 0, "path": "mp_orcamento/mp_orcamento/apps.py" } ]
0.83755
vaugusto92
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport json\nimport logging\nimport traceback\n\nimport imageio\n\nfrom core import Kanjar\n\nspec_errors = {\n 'assert isinstance(dataset.get(\\'title\\'), str)': 'Title is not a string.',\n 'assert bool(dataset.get(\\'title\\').strip())': 'Title is an empty string.',\n 'assert isinstance(dataset.get(\\'images\\'), list)': 'List of images is not a list.',\n 'assert not dataset.get(\\'images\\')': 'List of images has invalid initialization.',\n 'assert isinstance(img, str)': 'An image name is not a string.',\n 'assert bool(img.strip())': 'There is an empty image name.',\n 'assert isinstance(dataset.get(\\'input_folder\\'), str)': 'The input folder must be a string.',\n 'assert bool(dataset.get(\\'input_folder\\').strip())': 'The input folder string cannot be empty.'\n}\n\n\nclass JsonDataset(Kanjar):\n\n def __init__(self, **kwargs):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s')\n self.dataset = None\n self.input_file_name = kwargs.get('input_file_name', None)\n\n def compose(self, current_value, *args):\n for function in args:\n current_value = function(current_value)\n return current_value\n\n def _parse_assert_error(self, error):\n traceback_msg = self.compose(error.__traceback__,\n traceback.format_tb,\n ''.join)\n\n split_traceback = traceback_msg.split('\\n')\n return split_traceback[1].strip()\n\n def _assert_message(self, assert_error):\n return spec_errors[assert_error]\n\n def validate_dataset(self, dataset):\n valid = True\n message = None\n\n try:\n assert isinstance(dataset.get('title'), str)\n assert bool(dataset.get('title').strip())\n\n assert isinstance(dataset.get('images'), list)\n assert not dataset.get('images')\n\n assert isinstance(dataset.get('image_names'), list)\n assert dataset.get('image_names')\n\n for img in dataset.get('image_names'):\n assert isinstance(img, str)\n assert bool(img.strip())\n\n assert isinstance(dataset.get('input_folder'), str)\n assert bool(dataset.get('input_folder').strip())\n\n except AssertionError as e:\n assert_error = self._parse_assert_error(e)\n message = self._assert_message(assert_error)\n valid = False\n\n if not valid:\n raise Exception(message)\n return valid\n\n def load_images(self, folder, file_names):\n images = []\n\n for name in file_names:\n img = imageio.imread(folder + name)\n images.append(img)\n\n return images\n\n def load_dataset(self):\n dataset = None\n images = None\n\n try:\n if self.input_file_name:\n file = open(self.input_file_name)\n dataset = json.load(file)\n file.close()\n\n is_dataset_valid = self.validate_dataset(dataset)\n\n if is_dataset_valid:\n images = self.load_images(dataset.get('input_folder'),\n dataset.get('image_names'))\n\n dataset['images'] = images\n return dataset\n\n except Exception as e:\n logging.error(e.args)\n", "id": "1101674", "language": "Python", "matching_score": 3.282766103744507, "max_stars_count": 1, "path": "impl/json_dataset.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright (c) 2020 <NAME>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"Computes a Fourier Transform based NR-IQA index.\n\nThis class implements the no-reference image quality assessment index proposed\nby <NAME> and <NAME> in the paper\n\n\"Image Sharpness Measure for Blurred Images in Frequency Domain\"\nhttps://www.sciencedirect.com/science/article/pii/S1877705813016007\n\n\"\"\"\n\n\nimport logging\nimport os\nfrom abc import ABC, abstractmethod\n\nimport imageio\nimport numpy as np\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s')\n\n__version__ = '2.0'\n__author__ = '<NAME>'\n__copyright__ = \"Copyright (c) 2019 - <NAME>\"\n\n\nclass Kanjar(ABC):\n\n def compute_iqa(self, dataset):\n\n try:\n if not dataset:\n raise Exception('The dataset was not initialized.')\n\n results = []\n for name in dataset.get('image_names'):\n logging.info('Computing image ' + str(name))\n\n image = imageio.imread(dataset.get('input_folder') + name)\n\n fourier_coefficients = np.fft.fftshift(np.fft.fft2(image))\n\n # compute the absolute value of all Fourier coefficients\n abs_values = np.abs(fourier_coefficients)\n\n # compute the maximum value among all coefficients\n maximum_value = np.max(abs_values)\n\n # compute the total number of coefficients that are higher than\n # the maximum value / 1000\n total = abs_values[abs_values > maximum_value / 1000].size\n\n results.append(total / image.size)\n\n output = 'output/' + dataset.get('title') + '-kanjar.txt'\n np.savetxt(output, results, fmt='%.10f')\n\n except Exception as e:\n if 'False' == os.getenv('TESTING'):\n logging.error(e.args)\n\n @abstractmethod\n def load_dataset(self, **kwargs):\n pass\n", "id": "9099365", "language": "Python", "matching_score": 0.9509969353675842, "max_stars_count": 1, "path": "core/core.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nfrom core import Kanjar\n\nabs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nsys.path.insert(0, abs_path)\n", "id": "8560290", "language": "Python", "matching_score": 0.01579960621893406, "max_stars_count": 1, "path": "impl/context.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport copy\nimport os\nimport unittest\n\nfrom impl import JsonDataset\n\n\nclass TestJsonDatasetImplementation(unittest.TestCase):\n\n def setUp(self):\n os.environ['TESTING'] = 'True'\n\n def tearDown(self):\n os.environ['TESTING'] = 'False'\n\n def _wrap_validation_test(self, base_params, args):\n message = args.get('msg')\n validate = base_params.get('validate')\n\n with self.assertRaisesRegex(Exception, message):\n _dataset = copy.deepcopy(base_params.get('dataset'))\n _dataset[args.get('key')] = args.get('value')\n validate(_dataset)\n\n # Redefs for the load images function\n def mock_load_images(self, folder, file_names):\n return []\n\n def test_validate_dataset(self):\n json_impl = JsonDataset(input_file_name='./input/json/airplane.json')\n\n json_impl.load_images = self.mock_load_images\n dataset = json_impl.load_dataset()\n\n validate = json_impl.validate_dataset\n\n self.assertTrue(validate(dataset))\n\n base_params = {\n 'validate': json_impl.validate_dataset,\n 'dataset': dataset\n }\n\n assert_cases = [\n {\n 'msg': 'Title is not a string.',\n 'key': 'title',\n 'value': 123\n },\n {\n 'msg': 'Title is an empty string.',\n 'key': 'title',\n 'value': ''\n },\n {\n 'msg': 'Title is an empty string.',\n 'key': 'title',\n 'value': ''\n },\n {\n 'msg': 'List of images is not a list.',\n 'key': 'images',\n 'value': 123\n },\n {\n 'msg': 'List of images has invalid initialization.',\n 'key': 'images',\n 'value': ['image.png']\n },\n {\n 'msg': 'An image name is not a string.',\n 'key': 'image_names',\n 'value': [1]\n },\n {\n 'msg': 'There is an empty image name.',\n 'key': 'image_names',\n 'value': ['']\n },\n {\n 'msg': 'The input folder must be a string.',\n 'key': 'input_folder',\n 'value': 123\n },\n {\n 'msg': 'The input folder string cannot be empty.',\n 'key': 'input_folder',\n 'value': ''\n },\n ]\n\n for case in assert_cases:\n self._wrap_validation_test(base_params, case)\n\n def test_load_dataset(self):\n json_impl = JsonDataset(input_file_name='./input/json/airplane.json')\n\n # Original function\n load_images = json_impl.load_images\n self.assertIsNotNone(json_impl)\n self.assertIsNone(json_impl.dataset)\n\n json_impl.load_images = self.mock_load_images\n dataset = json_impl.load_dataset()\n self.assertIsNotNone(dataset)\n\n json_impl.load_images = load_images\n dataset = json_impl.load_dataset()\n self.assertIsNotNone(dataset)\n\n def test_compute_iqa(self):\n json_impl = JsonDataset(input_file_name='./input/json/airplane.json')\n dataset = json_impl.load_dataset()\n\n dataset['output_folder'] = ''\n\n json_impl.compute_iqa(dataset)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "4211753", "language": "Python", "matching_score": 2.773970127105713, "max_stars_count": 1, "path": "tests/test_json_dataset.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom impl import JsonDataset\n\n\ndef main():\n json_impl = JsonDataset(input_file_name='./input/json/airplane.json')\n dataset = json_impl.load_dataset()\n\n dataset['output_folder'] = ''\n\n json_impl.compute_iqa(dataset)\n\n\nif __name__ == '__main__':\n main()\n", "id": "9875958", "language": "Python", "matching_score": 2.2771103382110596, "max_stars_count": 1, "path": "main.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -\n\nfrom .json_dataset import JsonDataset\n", "id": "8856730", "language": "Python", "matching_score": 0.5651562809944153, "max_stars_count": 1, "path": "impl/__init__.py" } ]
1.614054
AlexBulankou
[ { "content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Creates autoscaled, network LB IGM running specified docker image.\"\"\"\n\n\ndef GenerateConfig(context):\n \"\"\"Generate YAML resource configuration.\"\"\"\n\n # NOTE: Once we can specify the port/service during creation of IGM,\n # we will wire it up here.\n name = context.env['name']\n resources = [{\n 'name': name + '-igm',\n 'type': 'compute.v1.instanceGroupManager',\n 'properties': {\n 'zone': context.properties['zone'],\n 'targetSize': context.properties['size'],\n 'baseInstanceName': name + '-instance',\n 'instanceTemplate': context.properties['instanceTemplate']\n }\n }, {\n 'name': name + '-as',\n 'type': 'compute.v1.autoscaler',\n 'properties': {\n 'zone': context.properties['zone'],\n 'target': '$(ref.' + name + '-igm.selfLink)',\n 'autoscalingPolicy': {\n 'maxNumReplicas': context.properties['maxSize']\n\n }\n }\n }]\n return {'resources': resources}", "id": "4148672", "language": "Python", "matching_score": 2.7621397972106934, "max_stars_count": 0, "path": "templates/autoscaled_group.py" }, { "content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Creates primary/secondary zone autoscaled IGM running specified container.\"\"\"\n\n\ndef GenerateConfig(context):\n \"\"\"Generate YAML resource configuration.\"\"\"\n\n name = context.env['name']\n\n resources = [{\n 'name': name,\n 'type': 'container_instance_template.py',\n 'properties': {\n 'port': context.properties['port'],\n 'dockerEnv': context.properties['dockerEnv'],\n 'dockerImage': context.properties['dockerImage'],\n 'containerImage': context.properties['containerImage']\n }\n }, {\n 'name': name + '-pri',\n 'type': 'autoscaled_group.py',\n 'properties': {\n 'zone': context.properties['primaryZone'],\n 'size': context.properties['primarySize'],\n 'maxSize': context.properties['maxSize'],\n 'port': context.properties['port'],\n 'service': context.properties['service'],\n 'baseInstanceName': name + '-instance',\n 'instanceTemplate': '$(ref.' + name + '-it.selfLink)'\n }\n }, {\n 'name': name + '-sec',\n 'type': 'autoscaled_group.py',\n 'properties': {\n 'zone': context.properties['secondaryZone'],\n 'size': context.properties['secondarySize'],\n 'maxSize': context.properties['maxSize'],\n 'port': context.properties['port'],\n 'service': context.properties['service'],\n 'baseInstanceName': name + '-instance',\n 'instanceTemplate': '$(ref.' + name + '-it.selfLink)'\n }\n }, {\n 'name': name + '-hc',\n 'type': 'compute.v1.httpHealthCheck',\n 'properties': {\n 'port': context.properties['port'],\n 'requestPath': '/_ah/health'\n }\n }, {\n 'name': name + '-bes',\n 'type': 'compute.v1.backendService',\n 'properties': {\n 'port': context.properties['port'],\n 'portName': context.properties['service'],\n 'backends': [{\n 'name': name + '-primary',\n 'group': '$(ref.' + name + '-pri-igm.instanceGroup)'\n }, {\n 'name': name + '-secondary',\n 'group': '$(ref.' + name + '-sec-igm.instanceGroup)'\n }],\n 'healthChecks': ['$(ref.' + name + '-hc.selfLink)']\n }\n }]\n return {'resources': resources}", "id": "2147859", "language": "Python", "matching_score": 2.9776463508605957, "max_stars_count": 0, "path": "templates/service.py" }, { "content": "# Copyright 2016 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Create appplication template with back-end and front-end templates.\"\"\"\n\n\ndef GenerateConfig(context):\n \"\"\"Generate configuration.\"\"\"\n\n backend = context.env['deployment'] + '-backend'\n frontend = context.env['deployment'] + '-frontend'\n static_service = context.env['deployment'] + '-static-service'\n application = context.env['deployment'] + '-application'\n\n container_image = 'family/cos-stable'\n\n application_port = 8080\n lb_port = 8080\n mysql_port = 8080\n\n resources = [{\n 'name': backend,\n 'type': 'container_vm.py',\n 'properties': {\n 'zone': context.properties['primaryZone'],\n 'dockerImage': context.properties['backendImage'],\n 'containerImage': container_image,\n 'port': mysql_port\n }\n }, {\n 'name': frontend,\n 'type': 'service.py',\n 'properties': {\n 'primaryZone': context.properties['primaryZone'],\n 'primarySize': 2,\n 'secondaryZone': context.properties['secondaryZone'],\n 'secondarySize': 0,\n 'dockerImage': context.properties['frontendImage'],\n 'containerImage': container_image,\n 'port': application_port,\n 'service': 'http',\n # If left out will default to 1\n 'maxSize': 20,\n # Define the variables that are exposed to container as env variables.\n 'dockerEnv': {\n 'SEVEN_SERVICE_MYSQL_PORT': mysql_port,\n 'SEVEN_SERVICE_PROXY_HOST': '$(ref.' + backend\n + '.networkInterfaces[0].networkIP)'\n }\n }\n }, {\n 'name': static_service,\n 'type': 'service.py',\n 'properties': {\n 'primaryZone': context.properties['primaryZone'],\n 'primarySize': 2,\n 'secondaryZone': context.properties['secondaryZone'],\n 'secondarySize': 0,\n 'dockerImage': context.properties['staticImage'],\n 'containerImage': container_image,\n 'port': application_port,\n 'service': 'httpstatic',\n # If left out will default to 1\n 'maxSize': 20\n }\n }, {\n 'name': application + '-urlmap',\n 'type': 'compute.v1.urlMap',\n 'properties': {\n 'defaultService': '$(ref.' + frontend + '-bes.selfLink)',\n 'hostRules': [{\n 'hosts': ['*'],\n 'pathMatcher': 'pathmap'\n }],\n 'pathMatchers': [{\n 'name': 'pathmap',\n 'defaultService': '$(ref.' + frontend + '-bes.selfLink)',\n 'pathRules': [{\n 'paths': ['/static', '/static/*'],\n 'service': '$(ref.' + static_service + '-bes.selfLink)'\n }]\n }]\n }\n }, {\n 'name': application + '-targetproxy',\n 'type': 'compute.v1.targetHttpProxy',\n 'properties': {\n 'urlMap': '$(ref.' + application + '-urlmap.selfLink)'\n }\n }, {\n 'name': application + '-l7lb',\n 'type': 'compute.v1.globalForwardingRule',\n 'properties': {\n 'IPProtocol': 'TCP',\n 'portRange': lb_port,\n 'target': '$(ref.' + application + '-targetproxy.selfLink)'\n }\n }, {\n 'name': application + '-fw',\n 'type': 'compute.v1.firewall',\n 'properties': {\n 'allowed': [{\n 'IPProtocol': 'TCP',\n 'ports': [lb_port]\n }],\n 'sourceRanges': ['0.0.0.0/0']\n }\n }]\n return {'resources': resources}", "id": "10976972", "language": "Python", "matching_score": 2.4213738441467285, "max_stars_count": 0, "path": "templates/application.py" } ]
2.76214
yoongeemin
[ { "content": "#!/usr/bin/env python\nimport json\nimport sys\nimport os\n\nargs = sys.argv\npath = os.path.join(os.path.dirname(__file__), args[1])\ntry:\n config = json.load(open(path, 'r'))\n config['PLATFORM'] = 'IOS'\n mapper = lambda (x,y): '@\"{}\": @\"{}\"'.format(x, y)\n dictionary = ','.join(map(mapper, config.iteritems()))\n result = '#define ENV @{{{}}}'.format(dictionary)\n\n output = open('./Environment.m', 'w')\n output.write(result)\n output.close()\n\nexcept ValueError:\n print 'Invalid configuration file'\n", "id": "8941706", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ios/Fabric/AppConfigBuilder.py" } ]
0
akudnaver
[ { "content": "# importing the libraries required to access the HTML page and pull a table from a Web Page.\n# This code expects users to have fair amount of knowledge on how to read through the HTML body,tags,classes. You can refer\n# the below links to develope an understanding of HTML page and for some basic foundation on Python Beautiful Soup functionality. \n\nimport os\nimport bs4\nimport pandas as pd\nfrom selenium import webdriver\n\n# Choose a working directory \n\nPATH = os.path.join('/','C:/', 'Users','akudnaver', 'Desktop')\n\n# Download the chromedriver.exe file and specif the path under PATH_CHROME\nPATH_CHROME = 'specify the path to chromedriver.exe' \n\n# Here is a quick definition/function to iterate over each rows ='tr' under a table body of your choice. We are basically\n# reading through the table rows from the webpage.\n\ndef table_to_df(table):\n return pd.DataFrame([[td.text for td in row.find_all('td')] for row in soup.find_all('tr')])\n\nres = pd.DataFrame()\n\n# Here we are only interested in the website under variable 'URL' , i will try to make it generic in my next project for\n# you to download all the tables from any webpage using Beautiful soup function.\n\nurl = \"https://www.meteoschweiz.admin.ch/home/messwerte.html\"\ncounter = 0\n\n# We will be using the selenium webdriver function to start the webpage we are interested to work with here. \nchrome_options = webdriver.ChromeOptions()\n\n# A sandbox is security mechanism used to run an application in a restricted environment. \n# If an attacker is able to exploit the browser in a way that lets him run arbitrary code on the machine, \n# the sandbox would help prevent this code from causing damage to the system. But in our case we would like to disbale \n# the sandbox just to serve our purpose.\nchrome_options.add_argument('--no-sandbox')\n\n# Selenium python binding provides a simple API to write the functional tests using Selenium webdriver, with the help\n# of this tool, you can extract any element out of the webpage in a convenient way and as well you can perform tests on the\n# webpage, of your choice.\n\n# In the below code we are creating an instance of Chrome webdriver and setting the URL our choice we wish to work with.\n\ndriver = webdriver.Chrome(PATH, chrome_options=chrome_options)\ndriver.get(url)\n\n\n# In the below segment of code we are going to pull the table of our interest from the webpage.\nwhile True:\n print(\"=========================================================\\n\")\n print(\"The task is completed , you can access the CSV file now\")\n print(\"=========================================================\\n\")\n page = driver.get(url)\n soup = bs4.BeautifulSoup(driver.page_source, 'lxml')\n table = driver.find_element_by_xpath('//*[@id=\"measurementv3-table\"]')\n if table is None:\n print(\"no table 'tableID' found for url {}\".format(url))\n print(\"html content:\\n{}\\n\".format(page.content))\n continue\n \n # In the below code we will convert the table to a dataframe and save it in a CSV file\n res = res.append(table_to_df(table))\n res.dropna(axis=0,inplace=True)\n res.to_csv(os.path.join(PATH,\"table.csv\"), index=False, sep=',', header=None)\n break\n counter += 1\n", "id": "11993181", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "my_codes/beautify_web_pager/beautify_html_proj.py" } ]
0
DanielBrookRoberge
[ { "content": "from collections import deque\n\nimport rpn\n\nPRECEDENCE = {\n '^': 3,\n '*': 2,\n '/': 2,\n '-': 1,\n '+': 1,\n '(': 0\n}\n\nclass InfixException(Exception):\n pass\n\ndef convert(tokens):\n stack = deque()\n\n for token in tokens:\n if token.isdigit():\n yield token\n elif token == '(':\n stack.append(token)\n elif token == ')':\n while True:\n if len(stack) == 0:\n raise InfixException('Stack underflow')\n operator = stack.pop()\n if operator == '(':\n break\n yield operator\n else:\n token_precedence = PRECEDENCE.get(token)\n while True:\n if len(stack) > 0:\n stack_precedence = PRECEDENCE.get(stack[-1])\n else:\n stack_precedence = 0\n if token_precedence > stack_precedence:\n stack.append(token)\n break\n elif token_precedence < stack_precedence:\n yield stack.pop()\n else:\n yield stack.pop()\n stack.append(token)\n break\n\n while len(stack) > 0:\n yield stack.pop()\n\ndef evaluate(tokens):\n return rpn.evaluate(convert(tokens))\n", "id": "12057714", "language": "Python", "matching_score": 2.288742780685425, "max_stars_count": 0, "path": "python3-bottle/infix.py" }, { "content": "from collections import deque\n\nBINARY_OPERATORS = {\n '+': lambda op1, op2: op2 + op1,\n '-': lambda op1, op2: op2 - op1,\n '*': lambda op1, op2: op2 * op1,\n '/': lambda op1, op2: op2 / op1,\n '^': lambda op1, op2: op2 ** op1\n}\n\nclass RpnException(Exception):\n pass\n\ndef evaluate(tokens):\n stack = deque()\n for token in tokens:\n if token.isdigit():\n stack.append(int(token))\n elif token in BINARY_OPERATORS:\n if len(stack) < 2:\n raise RpnException('Stack underflow')\n stack.append(\n BINARY_OPERATORS[token](\n stack.pop(),\n stack.pop()\n )\n )\n else:\n raise RpnException('Unknown token')\n\n if len(stack) != 1:\n raise RpnException('Multiple stack elements remain')\n\n return stack[0]\n", "id": "11576703", "language": "Python", "matching_score": 0.2001558542251587, "max_stars_count": 0, "path": "python3-bottle/rpn.py" }, { "content": "from bottle import route, run\n\nfrom tokenize import tokenize\nfrom infix import evaluate\n\n@route('/<expr>')\ndef calculate(expr):\n return str(evaluate(tokenize(expr)))\n\nrun(host='localhost', port=3000)\n", "id": "5771097", "language": "Python", "matching_score": 0.8891838192939758, "max_stars_count": 0, "path": "python3-bottle/app.py" }, { "content": "def tokenize(expression):\n inprogress = ''\n\n for c in expression:\n if c.isspace():\n continue\n if c.isdigit():\n inprogress += c\n elif c.isalpha():\n raise ValueError\n else:\n if inprogress:\n yield inprogress\n inprogress = ''\n yield c\n if inprogress:\n yield inprogress\n", "id": "4877492", "language": "Python", "matching_score": 0.8412105441093445, "max_stars_count": 0, "path": "python3-bottle/tokenize.py" } ]
0.865197
jamiegowing
[ { "content": "alienDictionary = {\n \"we\": \"vorag\",\n \"come\": \"thang\",\n \"in\": \"zon\",\n \"peace\": \"argh\",\n \"hello\": \"kodar\",\n \"can\": \"znak\",\n \"i\": \"az\",\n \"borrow\": \"liftit\",\n \"some\": \"zum\",\n \"rocket\": \"upgoman\",\n \"fuel\": \"kakboom\",\n \"please\": \"selpin\",\n \"don't\": \"baaaaaaaaarn\",\n \"shoot\": \"flabil\",\n \"welcome\": \"unkip\",\n \"our\": \"mandig\",\n \"new\": \"brang\",\n \"alien\": \"marangin\",\n \"overlords\": \"bap\",\n}\n\nenglishPhrase = input(\"please enter an english word or phrase to translate: \")\nenglishWords = englishPhrase.lower().split()\n\nalienWords = []\nfor word in englishWords:\n if word in alienDictionary:\n alienWords.append(alienDictionary[word])\n else:\n alienWords.append(word)\n\nprint(\"in alien, say: \", \" \".join(alienWords))\n", "id": "11202056", "language": "Python", "matching_score": 1.6907992362976074, "max_stars_count": 0, "path": "AlienDictionary.py" }, { "content": "percentages = {\"100%\": \"1/1\", \"50%\": \"1/2\", \"25%\": \"1/4\"}\npercentage = input(\"please enter a percentage\")\nfraction = []\nfor word in percentage:\n if word in percentages:\n fraction.append(percentages[word])\n else:\n fraction.append(word)\nprint(\"the fraction is\", \" \".join(fraction))", "id": "10902941", "language": "Python", "matching_score": 0.5130050182342529, "max_stars_count": 0, "path": "alphabet.py" }, { "content": "alphabet = \"\".join([chr(65 + r) for r in range(26)] * 2)\n\nstringToEncrypt = input(\"please enter a message to encrypt\")\nstringToEncrypt = stringToEncrypt.upper()\nshiftAmount = int(input(\"please enter a whole number from -25-25 to be your key\"))\nencryptedString = \"\"\nfor currentCharacter in stringToEncrypt:\n position = alphabet.find(currentCharacter)\n newPosition = position + shiftAmount\n if currentCharacter in alphabet:\n encryptedString = encryptedString + alphabet[newPosition]\n else:\n encryptedString = encryptedString + currentCharacter\nprint(\"your encrypted message is\", encryptedString)", "id": "12558329", "language": "Python", "matching_score": 0.9792754650115967, "max_stars_count": 0, "path": "SpyMessages.py" }, { "content": "print(\"Create your character\")\nname = input(\"what is your character's name\")\nage = int(input(\"how old is your character\"))\nstrengths = input(\"what are your character's strengths\")\nweaknesses = input(\"what are your character's weaknesses\")\nprint(f\"\"\"You'r charicters name is {name}\nYour charicter is {age} years old\nstrengths:{strengths}\nweaknesses:{weaknesses}\n{name}says,'thanks for creating me.'\n\"\"\")", "id": "3351323", "language": "Python", "matching_score": 0.6574469804763794, "max_stars_count": 0, "path": "character.py" }, { "content": "import random\ndias = [\"lunes\", \"martes\", \"miercoles\", \"jueves\", \"veirnes\", \"sabado\", \"domingo\"]\nmes = [\"enero\", \"febuero\", \"marcho\", \"april\", \"mayo\", \"juno\", \"julyo\", \"augusto\", \"septimbre\", \"octobre\", \"novembre\", \"decembre\"]\ndias2 = random.randint(1, 31)\nyear = random.randint(2000, 2050)\nprint(\"it is\", random.choice[dias], dias2, random.choice[mes], year)", "id": "4579094", "language": "Python", "matching_score": 0.9410727024078369, "max_stars_count": 0, "path": "espanol.py" }, { "content": "from random import choice\n\nspacelist = [\"rocket\", \"planet\", \"alien\", \"asteroid\"]\nprint(choice(spacelist), choice(spacelist))", "id": "1465506", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "SpaceList.py" }, { "content": "import random\nnumber = random.randint(12345,67890)\nprint(number)", "id": "6960202", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "guessing_game.py" }, { "content": "import random\n\n\nnumber = random.randint(1, 90)\nbingo = random.randint(1, 90)\nif number == bingo:\n print(\"true\")\nelse:\n print(\"false\")", "id": "5013853", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "bingo.py" }, { "content": "import random\nnumber1 = random.randint(1, 9999)\nnumber2 = random.randint(1, 9999)\nif number1 != number2:\n if number1 < number2:\n print(number1, \"<\", number2)\n else:\n print(number1, \">\", number2)\nelse:\n print(number1, \"=\", number2)\nprint(number1 + number2)\nprint(number1 - number2)\nprint(number1*number2)\nprint(number1/number2)", "id": "7903188", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "snowflakes.py" }, { "content": "import random\nimport time\nfrom turtle import *\ncolor(\"cyan\")\nshape(\"classic\")\nshapesize(2.5)\npensize(5)\nspeed(10)\nshapes = random.randint(1, 100)\nsides = random.randint(1, 20)\nprint(shapes, sides)\nprint(shapes*sides)\nprint(shapes/sides)\nfor x in range(shapes):\n for y in range(sides):\n forward(50)\n right(360/sides)\n color(random.random(), random.random(), random.random())\n right(360/shapes)\ntime.sleep(10)", "id": "10412098", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "shapes_and_sides.py" }, { "content": "import tkinter\nwindow = tkinter.Tk()\ncanvas = tkinter.Canvas(window, width=1000, height=500, bg=\"white\")\ncanvas.pack()\nlastX, lastY = 0,0\ncolour = \"red\"\ndef store_position(event):\n global lastX, lastY\n lastX = event.x\n lastY = event.y\ndef on_click(event):\n store_position(event)\ndef on_drag(event):\n global colour\n canvas.create_line(lastX, lastY, event.x, event.y, fill=colour, width=3)\n store_position(event)\ncanvas.bind(\"<Button-1>\", on_click)\ncanvas.bind(\"<B1-Motion>\", on_drag)\nred_id = canvas.create_rectangle(10, 10, 30, 30, fill=\"red\")\nblue_id = canvas.create_rectangle(10, 35, 30, 55, fill=\"blue\")\nblack_id = canvas.create_rectangle(10, 60, 30, 80, fill=\"black\")\nwhite_id = canvas.create_rectangle(10, 85, 30, 105, fill=\"white\")\n\ndef colour_event(c):\n def set_colour(event):\n global colour\n colour = c\n return set_colour\n\ncanvas.tag_bind(red_id, \"<Button-1>\", colour_event(\"red\"))\ncanvas.tag_bind(blue_id, \"<Button-1>\", colour_event(\"blue\"))\ncanvas.tag_bind(black_id, \"<Button-1>\", colour_event(\"black\"))\ncanvas.tag_bind(white_id, \"<Button-1>\", colour_event(\"white\"))\n\nwindow.mainloop()\n", "id": "11758112", "language": "Python", "matching_score": 2.372152090072632, "max_stars_count": 0, "path": "masterpeice.py" }, { "content": "import tkinter\nwindow = tkinter.Tk()\nbutton = tkinter.Button(window, text=\"Do not press this button\", width=40)\nbutton.pack(padx=10, pady=10)\nclickCount = 0\ndef onClick(event):\n global clickCount\n clickCount = clickCount + 1\n if clickCount == 1:\n button.configure(text=f\"this is the {clickCount}st time you pressed this button\")\n elif clickCount == 2:\n button.configure(text=f\"NO BUTTON!\")\n elif clickCount == 3:\n button.configure(text=\"tut tut tut\")\n else:\n button.configure(text=f\"You clicked this button {clickCount} times!\")\nbutton.bind(\"<ButtonRelease-1>\", onClick)\nwindow.mainloop()", "id": "5574946", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "don'tpressthebutton.py" }, { "content": "print(8**3)", "id": "5184865", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "print.py" }, { "content": "expense_report = [\n1864,\n1192,\n1802,\n1850,\n1986,\n1514,\n1620,\n1910,\n1557,\n1529,\n1081,\n1227,\n1869,\n1545,\n1064,\n1509,\n1060,\n1590,\n1146,\n1855,\n667,\n1441,\n1241,\n1473,\n1321,\n1429,\n1534,\n1959,\n1188,\n1597,\n1256,\n1673,\n1879,\n1821,\n1423,\n1838,\n1392,\n1941,\n1124,\n1629,\n1780,\n1271,\n1190,\n1680,\n1379,\n1601,\n1670,\n1916,\n1787,\n1844,\n2000,\n1672,\n1276,\n1896,\n1746,\n1369,\n1687,\n1263,\n1948,\n1159,\n1710,\n1304,\n1806,\n1709,\n1286,\n1635,\n1075,\n1125,\n1607,\n1408,\n1903,\n1143,\n1736,\n1266,\n1645,\n1571,\n1488,\n1200,\n211,\n1148,\n1585,\n2005,\n1724,\n1071,\n1690,\n1189,\n1101,\n1315,\n1452,\n1622,\n1074,\n1486,\n1209,\n1253,\n1422,\n1235,\n1354,\n1399,\n1675,\n241,\n1229,\n1136,\n1901,\n1453,\n1344,\n1685,\n1985,\n1455,\n1764,\n1634,\n1935,\n1386,\n1772,\n1174,\n1743,\n1818,\n1156,\n1221,\n167,\n1398,\n1552,\n1816,\n1197,\n1829,\n1930,\n1812,\n1983,\n1185,\n1579,\n1928,\n1892,\n1978,\n1720,\n1584,\n1506,\n1245,\n1539,\n1653,\n1876,\n1883,\n1982,\n1114,\n1406,\n2002,\n1765,\n1175,\n1947,\n1519,\n1943,\n1566,\n1361,\n1830,\n1679,\n999,\n1366,\n1575,\n1556,\n1555,\n1065,\n1606,\n1508,\n1548,\n1162,\n1664,\n1525,\n1925,\n1975,\n1384,\n1076,\n1790,\n1656,\n1578,\n1671,\n1424,\n757,\n1485,\n1677,\n1583,\n1395,\n1793,\n1111,\n1522,\n1195,\n1128,\n1123,\n1151,\n1568,\n1559,\n1331,\n1191,\n1753,\n1630,\n1979,\n953,\n1480,\n1655,\n1100,\n1419,\n1560,\n1667,\n]\n\ndef part1():\n for x in range(len(expense_report) - 1):\n for y in range(x + 1, len(expense_report)):\n if expense_report[x] + expense_report[y] == 2020:\n print(expense_report[x]*expense_report[y])\n\ndef part2():\n for x in range(len(expense_report) - 2):\n for y in range(x + 1, len(expense_report) - 1):\n for z in range(y + 1, len(expense_report)):\n if expense_report[x] + expense_report[y] + expense_report[z] == 2020:\n print(expense_report[x]*expense_report[y]*expense_report[z])\n\nif __name__ == '__main__':\n part1()\n part2()\n", "id": "7489967", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "aoc_2020_01.py" } ]
0.79926
BryanMachin
[ { "content": "__all__ = [\"activity\", \"category\", \"element\", \"environment\", \"rule\", \"student\"]\r\n", "id": "8176710", "language": "Python", "matching_score": 1.0605072975158691, "max_stars_count": 0, "path": "backend/Entities/__init__.py" }, { "content": "from backend.Entities.category import *\r\n\r\n\r\nclass Student:\r\n def __init__(self, name):\r\n self.name = name\r\n self.categories = dict()\r\n self.elements = dict()\r\n self.goals = []\r\n\r\n def set_category(self, element, category):\r\n if not self.categories.keys().__contains__(element):\r\n self.categories.__setitem__(element, Category.__getattribute__(Category, category))\r\n self.elements.__setitem__(element, 0)\r\n return True\r\n return False\r\n\r\n def edit_element_points(self, element, points):\r\n if self.elements.keys().__contains__(element):\r\n self.elements[element] += points\r\n if self.elements[element] > 10:\r\n self.elements[element] = 10\r\n return True\r\n return False\r\n\r\n def add_goal(self, element):\r\n self.goals.append(element)\r\n", "id": "10627814", "language": "Python", "matching_score": 0.45623162388801575, "max_stars_count": 0, "path": "backend/Entities/student.py" }, { "content": "from backend.Simulation.simulation import *\r\nfrom backend.tools import *\r\n\r\n\r\ndef search_good_strategy(main_env):\r\n strategies = build_strategies(main_env)\r\n strats_eval = []\r\n for strat in strategies:\r\n strats_eval.append(objective_function(strat, main_env))\r\n for j in range(100):\r\n for i in range(len(strategies)):\r\n v = vns(strat, main_env)\r\n if strats_eval[i][0] < v[1][0]:\r\n strats_eval[i] = v[1]\r\n strategies[i] = v[0]\r\n best_strat = -1\r\n bs_index = -1\r\n for i in range(len(strats_eval)):\r\n if strats_eval[i][0] > best_strat:\r\n bs_index = i\r\n best_strat = strats_eval[i][0]\r\n content_order = []\r\n for i in strats_eval[bs_index][2]:\r\n if i.name not in content_order:\r\n content_order.append(i.name)\r\n print(\"Orden de aprendizaje de contenidos:\")\r\n for i in range(len(content_order)):\r\n print(i+1,\"--\",content_order[i])\r\n print(\"Porcentaje de objetivos aprendidos:\",strats_eval[bs_index][0],\"%\")\r\n print(\"Tiempo transcurrido:\",round(-1*strats_eval[bs_index][1],1),\"h\")\r\n return [strategies[bs_index], best_strat, content_order, strats_eval[bs_index][0], round(-1*strats_eval[bs_index][1],1)]\r\n\r\n\r\ndef build_strategies(main_env):\r\n strategies = []\r\n add_attributes(main_env.student.goals, \"dep_goals\", [])\r\n goals = topological_sort(main_env.elements, main_env.student.goals)\r\n strategies.append(goals)\r\n for i in range(3):\r\n strategies.append(other_topological_sort(goals))\r\n return strategies\r\n\r\n\r\ndef vns(strategy, main_env):\r\n a = randint(0, len(strategy) - 1)\r\n b = randint(0, len(strategy) - 1)\r\n while b == a:\r\n b = randint(0, len(strategy) - 1)\r\n temp = strategy[a]\r\n temp2 = strategy[b]\r\n new_strategy = []\r\n for i in strategy:\r\n if i == temp:\r\n new_strategy.append(temp2)\r\n continue\r\n if i == temp2:\r\n new_strategy.append(temp)\r\n continue\r\n new_strategy.append(i)\r\n return [new_strategy, objective_function(new_strategy, main_env)]\r\n\r\n\r\ndef objective_function(strategy, main_env):\r\n strat_env = main_env.clone_environment()\r\n strat_env.student.goals = []\r\n ret_strategy = []\r\n for goal in strategy:\r\n strat_env.student.goals.append(goal)\r\n if strat_env.student.categories[goal] == \"Not_learned\":\r\n ret_strategy += fill_sub_strategy(goal, strat_env)\r\n else:\r\n ret_strategy.append(goal)\r\n return simulate(ret_strategy, main_env, 1)\r\n\r\n\r\ndef fill_sub_strategy(goal, strat_env):\r\n add_attributes(strat_env.elements, \"available_points\", 0)\r\n add_attributes(strat_env.elements, \"learned_missing\", 0)\r\n ldfss(goal, strat_env)\r\n strats = []\r\n strats.append(search_strat(goal, strat_env, \"avp\"))\r\n strats.append(search_strat(goal, strat_env, \"lm\"))\r\n strats.append(search_strat(goal, strat_env, \"rnd\"))\r\n strats_eval = []\r\n eval_avp = simulate(strats[0], strat_env, 3)\r\n eval_avp.append(2)\r\n eval_lm = simulate(strats[1], strat_env, 3)\r\n eval_lm.append(1)\r\n eval_rnd = simulate(strats[2], strat_env, 3)\r\n eval_rnd.append(0)\r\n strats_eval.append(eval_avp)\r\n strats_eval.append(eval_lm)\r\n strats_eval.append(eval_rnd)\r\n strats_eval.sort()\r\n delete_attributes(strat_env.elements, \"available_points\")\r\n delete_attributes(strat_env.elements, \"learned_missing\")\r\n return strats[0]\r\n\r\n\r\ndef search_strat(goal, env, strat_name):\r\n strat_env = env.clone_environment()\r\n strat = []\r\n if strat_name == \"avp\":\r\n avps_visit(goal, strat, strat_env, 0)\r\n elif strat_name == \"lm\":\r\n lm_visit(goal, strat, strat_env, 0)\r\n else:\r\n rnd_visit(goal, strat, strat_env, 0)\r\n return strat\r\n\r\n\r\ndef avps_visit(v, stack, env, behind_count):\r\n if env.student.categories[v] == \"Learnable\":\r\n return stack.insert(0, v)\r\n deps_needed = math.ceil(len(v.dependencies) * env.rules_params[0]) - learned_deps(v, env)\r\n maxs = []\r\n for dep in v.dependencies:\r\n if len(maxs) < deps_needed:\r\n maxs.append(dep)\r\n continue\r\n maxs.sort(key=lambda x: x.available_points)\r\n if dep.available_points > maxs[0].available_points:\r\n maxs.pop(0)\r\n maxs.append(dep)\r\n for dep in maxs:\r\n avps_visit(dep, stack, env, len(stack))\r\n return stack.insert(len(stack) - behind_count, v)\r\n\r\n\r\ndef lm_visit(v, stack, env, behind_count):\r\n if env.student.categories[v] == \"Learnable\":\r\n return stack.insert(0, v)\r\n deps_needed = math.ceil(len(v.dependencies) * env.rules_params[0]) - learned_deps(v, env)\r\n mins = []\r\n for dep in v.dependencies:\r\n if len(mins) < deps_needed:\r\n mins.append(dep)\r\n continue\r\n mins.sort(key=lambda x: x.learned_missing, reverse=True)\r\n if dep.learned_missing < mins[0].learned_missing:\r\n mins.pop(0)\r\n mins.append(dep)\r\n for dep in mins:\r\n lm_visit(dep, stack, env, behind_count)\r\n return stack.insert(len(stack) - behind_count, v)\r\n\r\n\r\ndef rnd_visit(v, stack, env, behind_count):\r\n if env.student.categories[v] == \"Learnable\":\r\n return stack.insert(0, v)\r\n deps_needed = math.ceil(len(v.dependencies) * env.rules_params[0]) - learned_deps(v, env)\r\n valid_indexes = []\r\n for j in range(len(v.dependencies)):\r\n if env.student.categories[v.dependencies[j]] != \"Learned\":\r\n valid_indexes.append(j)\r\n for i in range(deps_needed):\r\n r = randint(0, len(valid_indexes) - 1)\r\n rnd_visit(v.dependencies[valid_indexes.pop(r)], stack, env, behind_count)\r\n return stack.insert(len(stack) - behind_count, v)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "id": "10449314", "language": "Python", "matching_score": 3.597332239151001, "max_stars_count": 0, "path": "backend/Simulation/learningStrategy.py" }, { "content": "from backend.Entities.category import *\r\nfrom random import *\r\nfrom copy import copy\r\nimport math\r\n\r\n\r\ndef add_attributes(elements, attribute, default_value):\r\n for element in elements:\r\n element.__setattr__(attribute, default_value)\r\n\r\n\r\ndef delete_attributes(elements, attribute):\r\n for element in elements:\r\n try:\r\n a = element.__delattr__(attribute)\r\n except:\r\n continue\r\n\r\n\r\ndef check_cycles(elements):\r\n add_attributes(elements, \"color\", None)\r\n for element in elements:\r\n if element.color is None:\r\n back_edge = check_cycles_visit(element)\r\n if back_edge is not None:\r\n delete_attributes(elements, \"color\")\r\n return back_edge\r\n delete_attributes(elements, \"color\")\r\n return None\r\n\r\n\r\ndef check_cycles_visit(v):\r\n v.color = \"gray\"\r\n for dependency in v.dependencies:\r\n if dependency.color is None:\r\n dependency.color = \"gray\"\r\n back_edge = check_cycles_visit(dependency)\r\n if back_edge is not None:\r\n return back_edge\r\n elif dependency.color == \"gray\":\r\n return [v, dependency]\r\n v.color = \"black\"\r\n return None\r\n\r\n\r\n\r\ndef rank(elements, goals):\r\n add_attributes(elements, \"rank\", 0)\r\n add_attributes(elements, \"parent_goals\", [])\r\n for i in goals:\r\n add_attributes(elements, \"visited\", None)\r\n rank_visit(i, 1)\r\n delete_attributes(elements, \"visited\")\r\n\r\n\r\ndef rank_visit(v, value):\r\n v.visited = 1\r\n v.rank += value\r\n for dependency in v.dependencies:\r\n if dependency.visited is None:\r\n rank_visit(dependency, value)\r\n\r\n\r\ndef estimate_time(student, activity, times):\r\n accum = 0\r\n for i in range(times):\r\n k = 0\r\n for element in activity.elements:\r\n k += student.elements[element]\r\n k /= len(activity.elements)\r\n k = k.__round__()\r\n\r\n if k < 5:\r\n k *= 0.1\r\n k = 0.5 - k\r\n k = round(activity.estimated_time * k, 1)\r\n accum += round(uniform(activity.estimated_time, activity.estimated_time + k), 1)\r\n continue\r\n if k > 5:\r\n if k < 10:\r\n k %= 5\r\n k *= 0.1\r\n k = round(activity.estimated_time * k, 1)\r\n else:\r\n k = round(activity.estimated_time * 0.5, 1)\r\n accum += round(uniform(activity.estimated_time - k, activity.estimated_time), 1)\r\n continue\r\n\r\n accum += activity.estimated_time\r\n return round(accum/times, 1)\r\n\r\n\r\ndef topological_sort(elements, goals):\r\n stack = []\r\n add_attributes(elements, \"rank\", 0)\r\n for goal in goals:\r\n add_attributes(elements, \"visited\", None)\r\n topological_sort_visit(goal, stack, goal, goals)\r\n delete_attributes(elements, \"visited\")\r\n delete_attributes(elements, \"rank\")\r\n return stack\r\n\r\n\r\ndef topological_sort_visit(v, stack, root, goals):\r\n for dependency in v.dependencies:\r\n if dependency.visited is None:\r\n topological_sort_visit(dependency, stack, root, goals)\r\n if v in goals:\r\n if v is not root:\r\n root.dep_goals.append(v)\r\n if v not in stack:\r\n stack.append(v)\r\n\r\n\r\ndef ldfss(goal, env):\r\n \"\"\"Antes y despues de usarse deben agregarse y eliminarse respectivamente los atributos\r\n learned_missing y available points\"\"\"\r\n add_attributes(env.elements, \"rank\", 0)\r\n add_attributes(env.elements, \"visited\", None)\r\n visit(goal, env)\r\n delete_attributes(env.elements, \"visited\")\r\n delete_attributes(env.elements, \"rank\")\r\n\r\n\r\ndef visit(v, env):\r\n v.visited = 1\r\n for i in v.dependencies:\r\n if i.visited is None:\r\n visit(i, env)\r\n if env.student.categories[v] == \"Learned\":\r\n v.learned_missing = 0\r\n elif env.student.categories[v] == \"Learnable\":\r\n v.learned_missing = 1\r\n v.available_points = available_points(v, env.activities)\r\n else:\r\n v.learned_missing = lm_for_not_learned(v, env)\r\n v.available_points = av_for_not_learned(v, env)\r\n\r\n\r\ndef lm_for_not_learned(element, env):\r\n deps_needed = math.ceil(len(element.dependencies) * env.rules_params[0]) - learned_deps(element, env)\r\n mins = []\r\n for dep in element.dependencies:\r\n if len(mins) < deps_needed:\r\n mins.append(dep.learned_missing)\r\n continue\r\n mins.sort(reverse=True)\r\n if dep.learned_missing < mins[0]:\r\n mins[0] = dep.learned_missing\r\n return sum(mins)\r\n\r\n\r\ndef av_for_not_learned(element, env):\r\n deps_needed = math.ceil(len(element.dependencies) * env.rules_params[0]) - learned_deps(element, env)\r\n maxs = []\r\n for dep in element.dependencies:\r\n if len(maxs) < deps_needed:\r\n maxs.append(dep.available_points)\r\n continue\r\n maxs.sort()\r\n if dep.available_points > maxs[0]:\r\n maxs[0] = dep.available_points\r\n ret = 1\r\n for x in maxs:\r\n ret *= x\r\n return ret\r\n\r\n\r\ndef available_points(element, activities):\r\n a = 0\r\n for activity in activities:\r\n for e in activity.elements:\r\n if e is element:\r\n a += activity.elements[e]\r\n return a\r\n\r\n\r\ndef learned_deps(element, env):\r\n l = 0\r\n for dep in element.dependencies:\r\n if env.student.categories[dep] == \"Learned\":\r\n l += 1\r\n return l\r\n\r\n\r\ndef other_topological_sort(top_sort):\r\n no_indegree = []\r\n for i in top_sort:\r\n i.indegree = len(i.dep_goals)\r\n if not i.indegree:\r\n no_indegree.append(i)\r\n sort = []\r\n\r\n while no_indegree:\r\n r = randint(0, len(no_indegree) - 1)\r\n next = no_indegree.pop(r)\r\n sort.append(next)\r\n for i in top_sort:\r\n for j in i.dep_goals:\r\n if next.name == j:\r\n i.indegree -= 1\r\n if not i.indegree:\r\n no_indegree.append(i)\r\n return sort\r\n", "id": "7996910", "language": "Python", "matching_score": 1.9498618841171265, "max_stars_count": 0, "path": "backend/tools.py" }, { "content": "from backend.tools import estimate_time\r\nfrom backend.Agents.adviser import *\r\n\r\n\r\ndef simulate(sub_strategy, main_env, times):\r\n tracks = []\r\n for i in range(times):\r\n env = main_env.clone_environment()\r\n tracks.append(reps(sub_strategy, env, Adviser()))\r\n learning_percent = 0\r\n time = 0\r\n for i in range(times):\r\n learning_percent += tracks[i][0]\r\n time += tracks[i][1]\r\n\r\n return [learning_percent/times, -(time/times), sub_strategy]\r\n\r\n\r\ndef reps(strategy, env, adviser):\r\n time = 0\r\n rep_checker = -1\r\n index = 0\r\n while index < len(strategy):\r\n rep_checker += 1\r\n next_content = strategy[index]\r\n if adviser.stop(rep_checker):\r\n break\r\n\r\n if env.student.categories[next_content] == \"Learned\":\r\n index += 1\r\n rep_checker = 0\r\n continue\r\n if env.student.categories[next_content] == \"Not_learned\":\r\n break\r\n activities = []\r\n for activity in env.activities:\r\n if next_content in activity.elements:\r\n activities.append(activity)\r\n if not activities:\r\n break\r\n front = make_front(activities, next_content, env.student)\r\n r = randint(0, len(front) - 1)\r\n time += env.perform_activity(front[r])\r\n p = 0\r\n for goal in env.student.goals:\r\n if env.student.categories[goal] == \"Learned\":\r\n p += 1\r\n t = len(env.student.goals)\r\n return [100 * p / t, time, env]\r\n\r\n\r\ndef make_front(activities, element, student):\r\n front = []\r\n comparable = False\r\n for activity in activities:\r\n for front_activity in front:\r\n comp = compare_activities(activity, front_activity, element, student)\r\n if comp == \"better\":\r\n comparable = True\r\n front.remove(front_activity)\r\n front.append(activity)\r\n break\r\n elif not comp == \"not_comparable\":\r\n comparable = True\r\n break\r\n if not comparable:\r\n front.append(activity)\r\n return front\r\n\r\n\r\ndef compare_activities(a, b, element, student):\r\n ret = \"\"\r\n a_estimated_time = estimate_time(student, a, 3)\r\n b_estimated_time = estimate_time(student, b, 3)\r\n if a_estimated_time > b_estimated_time and a.elements[element] > b.elements[element]:\r\n ret = \"not_comparable\"\r\n if a_estimated_time < b_estimated_time:\r\n if a.elements[element] >= b.elements[element]:\r\n ret = \"better\"\r\n ret = \"not_comparable\"\r\n if a.elements[element] > b.elements[element]:\r\n ret = \"better\"\r\n return ret\r\n", "id": "6389129", "language": "Python", "matching_score": 1.5462095737457275, "max_stars_count": 0, "path": "backend/Simulation/simulation.py" }, { "content": "from random import *\r\n\r\n\r\nclass Adviser:\r\n\r\n def stop(self, count):\r\n if count > 15:\r\n r = random()\r\n if r < 0.5:\r\n return True\r\n if count > 10:\r\n r = random()\r\n if r < 0.35:\r\n return True\r\n return False\r\n", "id": "6327316", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "backend/Agents/adviser.py" }, { "content": "from compilation.Parser.ShiftReduce import ShiftReduce\r\n\r\n\r\ndef construction_ast(parser_lr1, operations, tokens):\r\n if not parser_lr1 or not operations or not tokens:\r\n return # Nada que eval!!!!\r\n right_parse = iter(parser_lr1)\r\n tokens = iter(tokens)\r\n stack = []\r\n for operation in operations:\r\n if operation == ShiftReduce.SHIFT:\r\n token = next(tokens)\r\n stack.append(token.value)\r\n elif operation == ShiftReduce.REDUCE:\r\n production = next(right_parse)\r\n head, body = production\r\n attributes = production.attributes\r\n assert all(rule is None for rule in attributes[1:]), 'There must be only synteticed attributes.'\r\n rule = attributes[0]\r\n if len(body):\r\n synteticed = [None] + stack[-len(body):]\r\n value = rule(None, synteticed)\r\n stack[-len(body):] = [value]\r\n else:\r\n stack.append(rule(None, None))\r\n else:\r\n raise Exception('error')\r\n # queda la raiz del AST, el node Program, y 'eof', el token final.\r\n return stack[0]\r\n", "id": "2870910", "language": "Python", "matching_score": 1.837960124015808, "max_stars_count": 0, "path": "compilation/AST/compilation/AST/Construction.py" }, { "content": "class ShiftReduce:\r\n SHIFT = 'SHIFT'\r\n REDUCE = 'REDUCE'\r\n OK = 'OK'\r\n\r\n def __init__(self, g):\r\n self.g = g\r\n self.action = {}\r\n self.goto = {}\r\n self.build_parsing_table()\r\n\r\n def build_parsing_table(self):\r\n pass\r\n\r\n def __call__(self, tokens, ope=False):\r\n stack = [0]\r\n cursor = 0\r\n output = []\r\n operations = []\r\n errors = []\r\n\r\n while True:\r\n state = stack[-1]\r\n lookahead = tokens[cursor]\r\n try:\r\n action, tag = self.action[state, lookahead]\r\n # Shift case\r\n if action == self.SHIFT:\r\n operations.append(self.SHIFT)\r\n stack.append(tag)\r\n cursor += 1\r\n\r\n # Reduce case\r\n elif action == self.REDUCE:\r\n operations.append(self.REDUCE)\r\n output.append(tag)\r\n for _ in tag.Right:\r\n stack.pop()\r\n a = self.goto[stack[-1], tag.Left.name]\r\n stack.append(a)\r\n\r\n # OK case\r\n elif action == self.OK:\r\n return errors, output, operations if ope else output\r\n # Invalid case\r\n else:\r\n raise NameError\r\n except KeyError:\r\n errors.append(tokens[cursor])\r\n return errors, output, operations if ope else output\r\n", "id": "1846306", "language": "Python", "matching_score": 1.2553833723068237, "max_stars_count": 0, "path": "compilation/AST/compilation/Parser/ShiftReduce.py" }, { "content": "from compilation.Parser.ShiftReduce import ShiftReduce\r\nfrom compilation.Parser.Tools import *\r\nfrom compilation.Parser.Grammar import *\r\nfrom compilation.Parser.State import *\r\n\r\n\r\nclass LR1Parser(ShiftReduce):\r\n def build_parsing_table(self):\r\n g = self.g.augmented_grammar(True)\r\n automata = self.build_automata(g)\r\n for i, node in enumerate(automata):\r\n node.idx = i\r\n for node in automata:\r\n idx = node.idx\r\n for item in node.state:\r\n p = item.Production\r\n if item.is_reduce_item:\r\n if p.Left == g.Start_symbol:\r\n self._register(self.action, (idx, self.g.Eof.name), (ShiftReduce.OK, None))\r\n else:\r\n for c in item.Lookaheads:\r\n self._register(self.action, (idx, c.name), (ShiftReduce.REDUCE, p))\r\n else:\r\n if item.next_symbol.is_terminal:\r\n self._register(self.action, (idx, item.next_symbol.name),\r\n (ShiftReduce.SHIFT, node[item.next_symbol.name][0].idx))\r\n else:\r\n self._register(self.goto, (idx, item.next_symbol.name), node[item.next_symbol.name][0].idx)\r\n pass\r\n\r\n def build_automata(self, g):\r\n assert len(g.Start_symbol.Productions) == 1, 'Grammar must be augmented'\r\n firsts = compute_firsts(g)\r\n firsts[g.Eof] = ContainerSet(g.Eof)\r\n start_production = g.Start_symbol.Productions[0]\r\n start_item = Item(start_production, 0, lookaheads=(g.Eof,))\r\n start = frozenset([start_item])\r\n closure = self.closure_lr1(start, firsts)\r\n automata = State(frozenset(closure), True)\r\n pending = [start]\r\n visited = {start: automata}\r\n while pending:\r\n current = pending.pop()\r\n current_state = visited[current]\r\n for symbol in g.Terminals + g.Non_terminals:\r\n # (Get/Build `next_state`)\r\n a = self.goto_lr1(current_state.state, symbol, firsts, True)\r\n if not a:\r\n continue\r\n try:\r\n next_state = visited[a]\r\n except KeyError:\r\n next_state = State(frozenset(self.goto_lr1(current_state.state, symbol, firsts)), True)\r\n visited[a] = next_state\r\n pending.append(a)\r\n current_state.add_transition(symbol.name, next_state)\r\n automata.set_formatter(multiline_formatter)\r\n return automata\r\n\r\n def goto_lr1(self, items, symbol, firsts=None, just_kernel=False):\r\n assert just_kernel or firsts is not None, '`firsts` must be provided if `just_kernel=False`'\r\n items = frozenset(item.next_item() for item in items if item.next_symbol == symbol)\r\n return items if just_kernel else self.closure_lr1(items, firsts)\r\n\r\n def closure_lr1(self, items, firsts):\r\n closure = ContainerSet(*items)\r\n changed = True\r\n while changed:\r\n new_items = ContainerSet()\r\n # por cada item hacer expand y añadirlo a new_items\r\n for item in closure:\r\n e = self.expand(item, firsts)\r\n new_items.extend(e)\r\n changed = closure.update(new_items)\r\n return self.compress(closure)\r\n\r\n @staticmethod\r\n def compress(items):\r\n centers = {}\r\n for item in items:\r\n center = item.center()\r\n try:\r\n lookaheads = centers[center]\r\n except KeyError:\r\n centers[center] = lookaheads = set()\r\n lookaheads.update(item.Lookaheads)\r\n return {Item(x.Production, x.Pos, set(lookahead)) for x, lookahead in centers.items()}\r\n\r\n @staticmethod\r\n def expand(item, firsts):\r\n next_symbol = item.next_symbol\r\n if next_symbol is None or not next_symbol.is_non_terminal:\r\n return []\r\n lookaheads = ContainerSet()\r\n # (Compute lookahead for child items)\r\n # calcular el first a todos los preview posibles\r\n for p in item.preview():\r\n for first in compute_local_first(firsts, p):\r\n lookaheads.add(first)\r\n _list = []\r\n for production in next_symbol.Productions:\r\n _list.append(Item(production, 0, lookaheads))\r\n return _list\r\n\r\n @staticmethod\r\n def _register(table, key, value):\r\n table[key] = value\r\n", "id": "4592094", "language": "Python", "matching_score": 3.5961546897888184, "max_stars_count": 0, "path": "compilation/AST/compilation/Parser/Parser_LR1.py" }, { "content": "\r\n\r\nclass Symbol(object):\r\n def __init__(self, name, grammar):\r\n self.name = name\r\n self.grammar = grammar\r\n\r\n def __repr__(self):\r\n return repr(self.name)\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n def __add__(self, other):\r\n if isinstance(other, Symbol):\r\n return Sentence(self, other)\r\n raise TypeError(other)\r\n\r\n def __or__(self, other):\r\n if isinstance(other, Sentence):\r\n return SentenceList(Sentence(self), other)\r\n raise TypeError(other)\r\n\r\n def __len__(self):\r\n return 1\r\n\r\n @property\r\n def is_epsilon(self):\r\n return False\r\n\r\n\r\nclass Sentence(object):\r\n def __init__(self, *args):\r\n self._symbols = tuple(x for x in args if not x.is_epsilon)\r\n self.hash = hash(self._symbols)\r\n\r\n def __len__(self):\r\n return len(self._symbols)\r\n\r\n def __add__(self, other):\r\n if isinstance(other, Symbol):\r\n return Sentence(*(self._symbols + (other,)))\r\n if isinstance(other, Sentence):\r\n return Sentence(*(self._symbols + other._symbols))\r\n\r\n def __or__(self, other):\r\n if isinstance(other, Sentence):\r\n return SentenceList(self, other)\r\n if isinstance(other, Symbol):\r\n return SentenceList(self, Sentence(other))\r\n\r\n def __str__(self):\r\n return (\"%s \" * len(self._symbols) % tuple(self._symbols)).strip()\r\n\r\n def __iter__(self):\r\n return iter(self._symbols)\r\n\r\n def __getitem__(self, index):\r\n return self._symbols[index]\r\n\r\n def __eq__(self, other):\r\n return self._symbols == other._symbols\r\n\r\n def __hash__(self):\r\n return self.hash\r\n\r\n @property\r\n def is_epsilon(self):\r\n return False\r\n\r\n\r\nclass SentenceList(object):\r\n def __init__(self, *args):\r\n self._sentences = list(args)\r\n\r\n def add(self, symbol):\r\n if not symbol and (symbol is None or not symbol.is_epsilon):\r\n raise ValueError(symbol)\r\n self._sentences.append(symbol)\r\n\r\n def __or__(self, other):\r\n if isinstance(other, Sentence):\r\n self.add(other)\r\n return self\r\n\r\n if isinstance(other, Symbol):\r\n return self | Sentence(other)\r\n\r\n def __iter__(self):\r\n return iter(self._sentences)\r\n\r\n\r\nclass Production(object):\r\n def __init__(self, non_terminal, sentence):\r\n self.Left = non_terminal\r\n self.Right = sentence\r\n\r\n def __str__(self):\r\n return '%s := %s' % (self.Left, self.Right)\r\n\r\n def __repr__(self):\r\n return '%s -> %s' % (self.Left, self.Right)\r\n\r\n def __iter__(self):\r\n yield self.Left\r\n yield self.Right\r\n\r\n def __eq__(self, other):\r\n return isinstance(other, Production) and self.Left == other.Left and self.Right == other.Right\r\n\r\n def __hash__(self):\r\n return hash((self.Left, self.Right))\r\n\r\n @property\r\n def is_epsilon(self):\r\n return self.Right.IsEpsilon\r\n\r\n\r\nclass AttributeProduction(Production):\r\n def __init__(self, non_terminal, sentence, attributes):\r\n if not isinstance(sentence, Sentence) and isinstance(sentence, Symbol):\r\n sentence = Sentence(sentence)\r\n super(AttributeProduction, self).__init__(non_terminal, sentence)\r\n self.attributes = attributes\r\n\r\n def __str__(self):\r\n return '%s := %s' % (self.Left, self.Right)\r\n\r\n def __repr__(self):\r\n return '%s -> %s' % (self.Left, self.Right)\r\n\r\n def __iter__(self):\r\n yield self.Left\r\n yield self.Right\r\n\r\n @property\r\n def is_epsilon(self):\r\n return self.Right.IsEpsilon\r\n\r\n\r\nclass NonTerminal(Symbol):\r\n def __init__(self, name, grammar):\r\n super().__init__(name, grammar)\r\n self.Productions = []\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n def __mod__(self, other):\r\n if isinstance(other, Sentence):\r\n p = Production(self, other)\r\n self.grammar.add_production(p)\r\n return self\r\n if isinstance(other, tuple):\r\n if len(other) == 2:\r\n other += (None,) * len(other[0])\r\n # Debe definirse una regla por cada símbolo de la producción\r\n if isinstance(other[0], Symbol) or isinstance(other[0], Sentence):\r\n p = AttributeProduction(self, other[0], other[1:])\r\n else:\r\n raise Exception(\"\")\r\n self.grammar.add_production(p)\r\n return self\r\n if isinstance(other, Symbol):\r\n p = Production(self, Sentence(other))\r\n self.grammar.add_production(p)\r\n return self\r\n if isinstance(other, SentenceList):\r\n for s in other:\r\n p = Production(self, s)\r\n self.grammar.add_production(p)\r\n return self\r\n raise TypeError(other)\r\n\r\n @property\r\n def is_terminal(self):\r\n return False\r\n\r\n @property\r\n def is_non_terminal(self):\r\n return True\r\n\r\n @property\r\n def is_epsilon(self):\r\n return False\r\n\r\n\r\nclass Terminal(Symbol):\r\n def __int__(self, name, grammar):\r\n super().__init__(name, grammar)\r\n self.Productions = []\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\n @property\r\n def is_terminal(self):\r\n return True\r\n\r\n @property\r\n def is_non_terminal(self):\r\n return False\r\n\r\n @property\r\n def is_epsilon(self):\r\n return False\r\n\r\n\r\nclass EOF(Terminal):\r\n def __init__(self, grammar):\r\n super().__init__('eof', grammar)\r\n\r\n def __str__(self):\r\n return 'eof'\r\n\r\n\r\nclass Epsilon(Terminal, Sentence):\r\n def __init__(self, grammar):\r\n super().__init__('epsilon', grammar)\r\n\r\n def __hash__(self):\r\n return hash(\"\")\r\n\r\n def __len__(self):\r\n return 0\r\n\r\n def __str__(self):\r\n return \"e\"\r\n\r\n def __repr__(self):\r\n return 'epsilon'\r\n\r\n def __iter__(self):\r\n yield from ()\r\n\r\n def __add__(self, other):\r\n return other\r\n\r\n def __eq__(self, other):\r\n return isinstance(other, (Epsilon,))\r\n\r\n @property\r\n def is_epsilon(self):\r\n return True\r\n\r\n\r\nclass Grammar:\r\n def __init__(self):\r\n self.Productions = []\r\n self.pType = None\r\n self.Non_terminals = []\r\n self.Terminals = []\r\n self.Start_symbol = None\r\n self.Epsilon = Epsilon(self)\r\n self.Eof = EOF(self)\r\n self.SymbolDict = {'eof': self.Eof}\r\n\r\n def non_terminal(self, name, start_symbol=False):\r\n if not name:\r\n raise Exception(\"Empty\")\r\n term = NonTerminal(name, self)\r\n if start_symbol:\r\n if self.Start_symbol is None:\r\n self.Start_symbol = term\r\n else:\r\n raise Exception('Cannot define more than one start symbol')\r\n self.Non_terminals.append(term)\r\n self.SymbolDict[name] = term\r\n return term\r\n\r\n def non_terminals(self, names):\r\n aux = tuple(self.non_terminal(i) for i in names.strip().split())\r\n return aux\r\n\r\n def add_production(self, production):\r\n if len(self.Productions) == 0:\r\n self.pType = type(production)\r\n production.Left.Productions.append(production)\r\n self.Productions.append(production)\r\n\r\n def terminal(self, name):\r\n if not name:\r\n raise Exception('Empty')\r\n term = Terminal(name, self)\r\n self.Terminals.append(term)\r\n self.SymbolDict[name] = term\r\n return term\r\n\r\n def terminals(self, names):\r\n aux = tuple(self.terminal(i) for i in names.strip().split())\r\n return aux\r\n\r\n def __getitem__(self, item):\r\n try:\r\n return self.SymbolDict[item]\r\n except KeyError:\r\n return None\r\n\r\n def copy(self):\r\n g = Grammar()\r\n g.Productions = self.Productions.copy()\r\n g.Non_terminals = self.Non_terminals.copy()\r\n g.Terminals = self.Terminals.copy()\r\n g.pType = self.pType\r\n g.Start_symbol = self.Start_symbol\r\n g.Epsilon = self.Epsilon\r\n g.Eof = self.Eof\r\n g.SymbolDict = self.SymbolDict.copy()\r\n return g\r\n\r\n @property\r\n def is_augmented_grammar(self):\r\n augmented = 0\r\n for left, right in self.Productions:\r\n if self.Start_symbol == left:\r\n augmented += 1\r\n if augmented <= 1:\r\n return True\r\n else:\r\n return False\r\n\r\n def augmented_grammar(self, force=False):\r\n if not self.is_augmented_grammar or force:\r\n g = self.copy()\r\n s = g.Start_symbol\r\n g.Start_symbol = None\r\n ss = g.non_terminal('S\\'', True)\r\n if g.pType is AttributeProduction:\r\n ss %= s + g.Epsilon, lambda x: x\r\n else:\r\n ss %= s + g.Epsilon\r\n return g\r\n else:\r\n return self.copy()\r\n\r\n\r\nclass Item:\r\n def __init__(self, production, pos, lookaheads=frozenset()):\r\n self.Production = production\r\n self.Pos = pos\r\n self.Lookaheads = frozenset(look for look in lookaheads)\r\n\r\n def __str__(self):\r\n s = str(self.Production.Left) + \" -> \"\r\n if len(self.Production.Right) > 0:\r\n for i, c in enumerate(self.Production.Right):\r\n if i == self.Pos:\r\n s += \".\"\r\n s += str(self.Production.Right[i])\r\n if self.Pos == len(self.Production.Right):\r\n s += \".\"\r\n else:\r\n s += \".\"\r\n s += \", \" + str(self.Lookaheads)[10:-1]\r\n return s\r\n\r\n def __repr__(self):\r\n return str(self)\r\n\r\n def __eq__(self, other):\r\n return (\r\n (self.Pos == other.Pos) and\r\n (self.Production == other.Production) and\r\n (set(self.Lookaheads) == set(other.Lookaheads))\r\n )\r\n\r\n def __hash__(self):\r\n return hash((self.Production, self.Pos, self.Lookaheads))\r\n\r\n @property\r\n def is_reduce_item(self):\r\n return len(self.Production.Right) == self.Pos\r\n\r\n @property\r\n def next_symbol(self):\r\n if self.Pos < len(self.Production.Right):\r\n return self.Production.Right[self.Pos]\r\n else:\r\n return None\r\n\r\n def next_item(self):\r\n if self.Pos < len(self.Production.Right):\r\n return Item(self.Production, self.Pos + 1, self.Lookaheads)\r\n else:\r\n return None\r\n\r\n def preview(self, skip=1):\r\n return [ self.Production.Right[self.Pos + skip:] + (lookahead,) for lookahead in self.Lookaheads]\r\n\r\n def center(self):\r\n return Item(self.Production, self.Pos)\r\n", "id": "3818341", "language": "Python", "matching_score": 1.2808171510696411, "max_stars_count": 0, "path": "compilation/AST/compilation/Parser/Grammar.py" }, { "content": "\r\nclass Token:\r\n def __init__(self, _type, value, location):\r\n self._type = _type\r\n self.value = value\r\n self.location = location\r\n \r\n def __str__(self):\r\n return self._type + \"[\" + self.value + \"]\"\r\n\r\n def type(self):\r\n return self._type\r\n\r\n def value(self):\r\n return self.value\r\n\r\n def location(self):\r\n return self.location\r\n\r\n\r\nclass TokenType:\r\n Unknown = \"Unknown\"\r\n Number = \"Number\"\r\n Text = \"Text\"\r\n Keyword = \"Keyword\"\r\n Identifier = \"Identifier\"\r\n Symbol = \"Symbol\"\r\n\r\n\r\nclass TokenValue:\r\n Add = \"Add\"\r\n Sub = \"Sub\"\r\n Mul = \"Mul\"\r\n Div = \"Div\"\r\n Mod = \"Mod\"\r\n Less = \"Less\"\r\n LessOrEquals = \"LEqual\"\r\n\r\n If = \"IfClausule\"\r\n Then = \"ThenClausule\"\r\n Else = \"ElseClausule\"\r\n\r\n", "id": "9907390", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "compilation/AST/compilation/Old/Token.py" }, { "content": "class Variable:\r\n def __init__(self, name, value):\r\n self.name = name\r\n self.value = value\r\n\r\n\r\nclass Function:\r\n def __init__(self, func):\r\n self.name = func.idx\r\n self.params = func.params\r\n self.stat_list = func.stat_list\r\n\r\n\r\nclass Context:\r\n def __init__(self, parent=None):\r\n self.local_var = []\r\n self.local_func = []\r\n self.children_context = []\r\n self.parent = parent\r\n\r\n def create_child_context(self):\r\n children_context = Context(self)\r\n self.children_context.append(children_context)\r\n return children_context\r\n\r\n def def_var(self, name, value):\r\n var = Variable(name, value)\r\n self.local_var.append(var)\r\n\r\n def redef_var(self, name, value):\r\n for var in self.local_var:\r\n if var.name == name:\r\n var.value = value\r\n\r\n def def_function(self, func):\r\n function = Function(func)\r\n self.local_func.append(function)\r\n\r\n def check_var_defined(self, name):\r\n for var in self.local_var:\r\n if var.name == name:\r\n return 1\r\n return self.parent is not None and self.parent.is_var_defined(name)\r\n\r\n def check_func_defined(self, name, num_params):\r\n for func in self.local_func:\r\n if func.name == name and len(func.params) == num_params:\r\n return 1\r\n return self.parent is not None and self.parent.is_func_defined(name, num_params)\r\n\r\n def is_local_var(self, name):\r\n return self.get_local_variable_info(name) is not None\r\n\r\n def is_local_func(self, name, params):\r\n return self.get_local_function_info(name, params) is not None\r\n\r\n def get_local_variable_info(self, name):\r\n for var in self.local_var:\r\n if var.name == name:\r\n return var.value\r\n if self.parent is None:\r\n return None\r\n else:\r\n return self.parent.get_local_variable_info(name)\r\n\r\n def get_local_function_info(self, name, num_params):\r\n for func in self.local_func:\r\n if func.name == name and len(func.params) == num_params:\r\n return func\r\n if self.parent is None:\r\n return None\r\n else:\r\n return self.parent.get_local_function_info(name, num_params)\r\n\r\n", "id": "5940742", "language": "Python", "matching_score": 1.3935861587524414, "max_stars_count": 0, "path": "compilation/AST/compilation/AST/Context.py" }, { "content": "from compilation.AST.Context import *\r\n\r\n\r\nclass Node:\r\n def check_semantic(self, context, errors):\r\n pass\r\n\r\n\r\nclass ProgramNode(Node):\r\n def __init__(self, declarations):\r\n self.declarations = declarations\r\n self.context = Context()\r\n\r\n def run(self):\r\n errors = self.check()\r\n ans = []\r\n if len(errors) == 0:\r\n for node in self.declarations:\r\n if isinstance(node, DeclarationNode):\r\n node.execute(self.context, errors, ans)\r\n return errors, ans\r\n\r\n def check(self):\r\n errors = []\r\n for node in self.declarations:\r\n node.check_semantic(self.context, errors)\r\n return errors\r\n\r\n\r\nclass DeclarationNode(Node):\r\n def execute(self, context, errors, ans):\r\n pass\r\n\r\n\r\nclass ExpressionNode(Node):\r\n def evaluate(self, context, errors, ans):\r\n pass\r\n\r\n\r\nclass ClassDeclarationNode(DeclarationNode):\r\n def __init__(self, idx, features, parent=None):\r\n self.id = idx\r\n self.parent = parent\r\n self.features = features\r\n\r\n\r\nclass FuncDeclarationNode(DeclarationNode):\r\n def __init__(self, idx, params, stat_list):\r\n self.idx = idx\r\n self.params = params\r\n self.stat_list = stat_list\r\n\r\n def check_semantic(self, context, errors):\r\n if context.check_func_defined(self.idx, len(self.params)):\r\n errors.append(f'Función {self.idx} ya declarada :(')\r\n else:\r\n context.def_function(self)\r\n\r\n def execute(self, context, errors, ans):\r\n pass\r\n\r\n\r\nclass ReturnNode(DeclarationNode):\r\n def __init__(self, expr):\r\n self.expr = expr\r\n\r\n def check_semantic(self, context, errors):\r\n self.expr.check_semantic(context, errors)\r\n\r\n def execute(self, context, errors, ans):\r\n pass\r\n\r\n\r\nclass AttrDeclarationNode(DeclarationNode):\r\n def __init__(self, idx, typex):\r\n self.id = idx\r\n self.type = typex\r\n\r\n\r\nclass ForNode(DeclarationNode):\r\n def __init__(self, idx, idx_value, expr, idx_counter, counter_one, counter_two, body):\r\n self.idx = idx\r\n self.idx_value = idx_value\r\n self.expr = expr\r\n self. idx_counter = idx_counter\r\n self.counter_one = counter_one\r\n self.counter_two = counter_two\r\n self.counter = \"\" + counter_one + counter_two\r\n self.body = body\r\n\r\n def check_semantic(self, context, errors):\r\n if self.idx != self.idx_counter:\r\n errors.append(f'El id {self.idx} debe ser igual a {self.idx_counter}')\r\n if self.counter_one != self.counter_two:\r\n errors.append(f'El {self.counter_one} debe ser igual a {self.counter_two}')\r\n child = context.create_child_context()\r\n child.def_var(self.idx, self.idx_value)\r\n for i in self.body:\r\n i.check_semantic(child, errors)\r\n\r\n def execute(self, context, errors, ans):\r\n child = context.create_child_context()\r\n child.def_var(self.idx, self.idx_value)\r\n while self.expr.evaluate(child, errors, ans):\r\n if self.counter == \"++\":\r\n value = child.get_local_variable_info(self.idx) + 1\r\n else:\r\n value = child.get_local_variable_info(self.idx) - 1\r\n child.redef_var(self.idx, value)\r\n for i in self.body:\r\n i.execute(child, errors, ans)\r\n\r\n\r\nclass VarDeclarationNode(DeclarationNode):\r\n def __init__(self, idx, expr):\r\n self.idx = idx\r\n self.expr = expr\r\n\r\n def check_semantic(self, context, errors):\r\n if context.check_var_defined(self.idx):\r\n errors.append(f'Variable {self.idx} no definida :(')\r\n\r\n def execute(self, context, errors, ans):\r\n if isinstance(self.expr, ExpressionNode):\r\n expr = self.expr.evaluate(context, errors, ans)\r\n context.def_var(self.idx, expr)\r\n else:\r\n context.def_var(self.idx, self.expr)\r\n\r\n\r\nclass IfExprNode(DeclarationNode):\r\n def __init__(self, eva_expr, body):\r\n self.eva_expr = eva_expr\r\n self.body = body\r\n\r\n def check_semantic(self, context, errors):\r\n self.eva_expr.check_semantic(context, errors)\r\n\r\n def execute(self, context, errors, ans):\r\n result = self.eva_expr.evaluate(context, errors, ans)\r\n if result:\r\n for i in self.body:\r\n i.execute(context, errors, ans)\r\n\r\n\r\nclass IfElseExprNode(DeclarationNode):\r\n def __init__(self, eva_expr, one_body, two_body):\r\n self.eva_expr = eva_expr\r\n self.one_body = one_body\r\n self.two_body = two_body\r\n\r\n def check_semantic(self, context, errors):\r\n self.eva_expr.check_semantic(context, errors)\r\n\r\n def execute(self, context, errors, ans):\r\n result = self.eva_expr.evaluate(context, errors, ans)\r\n if result:\r\n for i in self.one_body:\r\n i.execute(context, errors, ans)\r\n else:\r\n for i in self.two_body:\r\n i.execute(context, errors, ans)\r\n\r\n\r\nclass CallNode(ExpressionNode):\r\n def __init__(self, idx, args):\r\n self.idx = idx\r\n self.args = args\r\n\r\n def check_semantic(self, context, errors):\r\n if not context.check_func_defined(self.idx, len(self.args)):\r\n errors.append(f'Función {self.idx} no definida :(')\r\n\r\n def evaluate(self, context, errors, ans):\r\n func = context.get_local_function_info(self.idx, len(self.args))\r\n child = context.create_child_context()\r\n for i, p in enumerate(func.params):\r\n child.def_var(p, self.args[i].evaluate(context, errors, ans))\r\n for stat in func.stat_list:\r\n if isinstance(stat, ReturnNode):\r\n return stat.expr.evaluate(child, errors, ans)\r\n stat.execute(child, errors, ans)\r\n return None\r\n\r\n\r\nclass AtomicNode(ExpressionNode):\r\n def evaluate(self, context, errors, ans):\r\n pass\r\n\r\n\r\nclass BinaryNode(ExpressionNode):\r\n def evaluate(self, context, errors, ans):\r\n pass\r\n\r\n\r\nclass ConstantNumNode(AtomicNode):\r\n def __init__(self, value):\r\n self.value = value\r\n\r\n def check_semantic(self, context, errors):\r\n pass\r\n\r\n def evaluate(self, context, errors, ans):\r\n return self.value\r\n\r\n\r\nclass ConstantStrNode(AtomicNode):\r\n def __init__(self, value):\r\n self.value = value\r\n\r\n def check_semantic(self, context, errors):\r\n pass\r\n\r\n def evaluate(self, context, errors, ans):\r\n return self.value\r\n\r\n\r\nclass ConstantBoolNode(AtomicNode):\r\n def __init__(self, value):\r\n self.value = value\r\n\r\n def check_semantic(self, context, errors):\r\n pass\r\n\r\n def evaluate(self, context, errors, ans):\r\n return self.value\r\n\r\n\r\nclass VariableNode(AtomicNode):\r\n def __init__(self, idx):\r\n self.idx = idx\r\n\r\n def check_semantic(self, context, errors):\r\n if not context.check_var_defined(self.idx):\r\n errors.append(f'Variable {self.idx} no definida :(')\r\n\r\n def evaluate(self, context, errors, ans):\r\n var = context.get_local_variable_info(self.idx)\r\n if var is None:\r\n errors.append(f'Variable {self.idx} no definida :(')\r\n return\r\n return var\r\n\r\n\r\nclass AndNode(BinaryNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, bool]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"and\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue and rvalue\r\n\r\n\r\nclass OrNode(BinaryNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, bool]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"or\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue or rvalue\r\n\r\n\r\nclass PlusNode(BinaryNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"+\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue + rvalue\r\n\r\n\r\nclass MinusNode(BinaryNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"-\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue - rvalue\r\n\r\n\r\nclass StarNode(BinaryNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"*\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue * rvalue\r\n\r\n\r\nclass DivNode(BinaryNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"/\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue / rvalue\r\n\r\n\r\nclass LeqNode(ExpressionNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"<=\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue <= rvalue\r\n\r\n\r\nclass GeqNode(ExpressionNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \">=\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue >= rvalue\r\n\r\n\r\nclass EqualNode(ExpressionNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"==\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue == rvalue\r\n\r\n\r\nclass NotNode(ExpressionNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"!=\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue != rvalue\r\n\r\n\r\nclass LessNode(ExpressionNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \"<\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue < rvalue\r\n\r\n\r\nclass GreaterNode(ExpressionNode):\r\n def __init__(self, left, right):\r\n self.left = left\r\n self.right = right\r\n\r\n def evaluate(self, context, errors, ans):\r\n lvalue = self.left.evaluate(context, errors, ans)\r\n rvalue = self.right.evaluate(context, errors, ans)\r\n if isinstance(lvalue, ExpressionNode):\r\n lvalue = lvalue.evaluate(context, errors, ans)\r\n if isinstance(rvalue, ExpressionNode):\r\n rvalue = rvalue.evaluate(context, errors, ans)\r\n _type = [int, float]\r\n if not _type.__contains__(type(lvalue)) or not _type.__contains__(type(rvalue)):\r\n errors.append(f'Operación \">\" entre un tipo {type(lvalue)} y un tipo {type(rvalue)} no definida :( ')\r\n return\r\n return lvalue > rvalue\r\n\r\n\r\nclass PrintNode(DeclarationNode):\r\n def __init__(self, expr):\r\n self.expr = expr\r\n\r\n def check_semantic(self, context, errors):\r\n self.expr.check_semantic(context, errors)\r\n\r\n def execute(self, context, errors, ans):\r\n result = self.expr.evaluate(context, errors, ans)\r\n if not len(errors):\r\n ans.append(result)\r\n", "id": "3807135", "language": "Python", "matching_score": 3.6083524227142334, "max_stars_count": 0, "path": "compilation/AST/compilation/AST/Nodes.py" }, { "content": "from compilation.Parser.Grammar import *\r\nfrom compilation.AST.Construction import *\r\nfrom compilation.Parser.Parser_LR1 import LR1Parser\r\nfrom compilation.Tokenizer.Tokenizer import tokenize\r\nfrom compilation.AST.Nodes import *\r\nfrom compilation.AST.Context import *\r\n\r\ng = Grammar()\r\n\r\nprogram = g.non_terminal('<program>', start_symbol=True)\r\nstat_list, stat = g.non_terminals('<stat_list> <stat>')\r\nlet_var, def_func, print_stat, arg_list, def_return = \\\r\n g.non_terminals('<let-var> <def-func> <print-stat> <arg-list> <def-return>')\r\nexpr, term, factor, atom = g.non_terminals('<expr> <term> <factor> <atom>')\r\nfunc_call, expr_list = g.non_terminals('<func-call> <expr-list>')\r\nif_decl, if_else_decl, for_decl = g.non_terminals('<if-decl> <if-else-decl> <for_decl>')\r\n\r\nifx, elsex, let, defx, printx, returnx = g.terminals('if else let def print return')\r\nsemi, comma, opar, cpar, colon, okey, ckey = g.terminals('semi comma o_bracket c_bracket colon o_key c_key')\r\nequal, plus, minus, star, div, andx, orx, true, false = g.terminals('assign plus minus mul div and or true false')\r\nleq, geq, equalx, notx, less, greater = g.terminals('leq geq equal not less greater')\r\nidx, num, stringx, forx = g.terminals('id number string for')\r\n\r\nprogram %= stat_list, lambda h, s: ProgramNode(s[1])\r\n\r\nstat_list %= stat, lambda h, s: [s[1]]\r\nstat_list %= stat + stat_list, lambda h, s: [s[1]] + s[2]\r\n\r\nstat %= let_var + semi, lambda h, s: s[1]\r\nstat %= def_func, lambda h, s: s[1]\r\nstat %= print_stat + semi, lambda h, s: s[1]\r\nstat %= if_decl, lambda h, s: s[1]\r\nstat %= if_else_decl, lambda h, s: s[1]\r\nstat %= def_return + semi, lambda h, s: s[1]\r\nstat %= for_decl, lambda h, s: s[1]\r\n\r\nif_decl %= ifx + opar + expr + cpar + okey + stat_list + ckey, lambda h, s: IfExprNode(s[3], s[6])\r\nif_else_decl %= ifx + opar + expr + cpar + okey + stat_list + ckey + elsex + okey + stat_list + ckey, \\\r\n lambda h, s: IfElseExprNode(s[3], s[6], s[10])\r\n\r\nfor_decl %= forx + opar + idx + equal + num + semi + expr + semi + idx + plus + plus + cpar + okey + stat_list + ckey, \\\r\n lambda h, s: ForNode(s[3], s[5], s[7], s[9], s[10], s[11], s[14])\r\n\r\ndef_return %= returnx + expr, lambda h, s: ReturnNode(s[2])\r\n\r\nprint_stat %= printx + expr, lambda h, s: PrintNode(s[2])\r\n\r\nlet_var %= let + idx + equal + expr, lambda h, s: VarDeclarationNode(s[2], s[4])\r\n\r\ndef_func %= defx + idx + opar + arg_list + cpar + okey + stat_list + ckey, \\\r\n lambda h, s: FuncDeclarationNode(s[2], s[4], s[7])\r\n\r\narg_list %= idx, lambda h, s: [s[1]]\r\narg_list %= idx + comma + arg_list, lambda h, s: [s[1]] + s[3]\r\n\r\n\r\nexpr %= expr + plus + term, lambda h, s: PlusNode(s[1], s[3])\r\nexpr %= expr + minus + term, lambda h, s: MinusNode(s[1], s[3])\r\nterm %= term + star + factor, lambda h, s: StarNode(s[1], s[3])\r\nterm %= term + div + factor, lambda h, s: DivNode(s[1], s[3])\r\n\r\nexpr %= expr + leq + term, lambda h, s: LeqNode(s[1], s[3])\r\nexpr %= expr + geq + term, lambda h, s: GeqNode(s[1], s[3])\r\nexpr %= expr + equalx + term, lambda h, s: EqualNode(s[1], s[3])\r\nexpr %= expr + notx + term, lambda h, s: NotNode(s[1], s[3])\r\nexpr %= expr + less + term, lambda h, s: LessNode(s[1], s[3])\r\nexpr %= expr + greater + term, lambda h, s: GreaterNode(s[1], s[3])\r\n\r\nexpr %= expr + andx + term, lambda h, s: AndNode(s[1], s[3])\r\nexpr %= expr + orx + term, lambda h, s: OrNode(s[1], s[3])\r\n\r\nexpr %= term, lambda h, s: s[1], None\r\n\r\n\r\nterm %= factor, lambda h, s: s[1]\r\nfactor %= atom, lambda h, s: s[1]\r\nfactor %= opar + expr + cpar, lambda h, s: s[2]\r\n\r\n\r\natom %= stringx, lambda h, s: ConstantStrNode(s[1])\r\natom %= num, lambda h, s: ConstantNumNode(s[1])\r\natom %= true, lambda h, s: ConstantBoolNode(s[1])\r\natom %= false, lambda h, s: ConstantBoolNode(s[1])\r\natom %= idx, lambda h, s: VariableNode(s[1])\r\natom %= func_call, lambda h, s: s[1]\r\n\r\nfunc_call %= idx + opar + expr_list + cpar, lambda h, s: CallNode(s[1], s[3])\r\n\r\nexpr_list %= expr, lambda h, s: [s[1]]\r\nexpr_list %= expr + comma + expr_list, lambda h, s: [s[1]] + s[3]\r\n\r\n\r\nparser = LR1Parser(g)\r\n\r\n\r\np = '''\r\nlet x = 20; \r\nlet y = x * 50; \r\nprint y + 15 / 3 - 1;\r\n'''\r\n\r\nh = ''' \r\ndef COMP(x,y) {\r\n return x < y; \r\n } \r\n print COMP(4,5);\r\n '''\r\n\r\nfac = '''\r\ndef fac(n){\r\n if(n <= 1){\r\n let x = 1;\r\n }\r\n else{\r\n let x = n*fac(n-1);\r\n } \r\n return x;\r\n}\r\nprint fac(5);\r\n'''\r\n\r\nfib = ''' \r\ndef fib(n){\r\n if(n < 2){\r\n let x = n;\r\n }\r\n else{\r\n let x = fib(n-1) + fib(n-2);\r\n }\r\n return x;\r\n} \r\nprint fib(12);\r\n'''\r\n\r\nf = '''\r\ndef SUM(x,y){\r\nlet z = 50;\r\n return x+z;\r\n }\r\n\r\nif (3 < 2 or False and True){\r\nprint \"Hello\";\r\n}\r\nelse{\r\nprint SUM(4,5);\r\n}\r\n '''\r\n\r\nd = ''' \r\nlet x = 58;\r\ndef f ( a, b ){\r\n return 5 + 6;\r\n }\r\nprint f( 5 + x, 7 );\r\n '''\r\n\r\ns = '''\r\nprint \" # Hello Bryan\";\r\n'''\r\n\r\nk = '''\r\nprint 2 + 7 * 5 / 7;\r\n'''\r\n\r\nb = ''' \r\nhfh #False True#\r\n'''\r\ng = '''\r\nprint 0 and True;\r\n'''\r\n\r\nforx = '''\r\nlet i = 15;\r\nfor(i = 0; i < 10;i++){\r\nprint i;\r\n}\r\n'''\r\n\r\nerrors_tokenizer, tokens = tokenize(forx)\r\n\r\nprint(errors_tokenizer)\r\n\r\nerrors, parse, operations = parser([i.type for i in tokens], ope=True)\r\n\r\n\r\ndef errors_parser(errors, tokens):\r\n err = []\r\n for i in errors:\r\n value = ''\r\n for t in tokens:\r\n if t.type == i:\r\n value = t.value\r\n break\r\n err.append(f'{i} -> {value} no ha sido bien definido :(')\r\n return err\r\n\r\n\r\nprint(errors_parser(errors, tokens))\r\n\r\nast = construction_ast(parse, operations, tokens)\r\n\r\nerrors_ast, answer = ast.run()\r\n\r\nprint(errors_ast)\r\n\r\nprint(answer)\r\n\r\n", "id": "5742551", "language": "Python", "matching_score": 3.367891788482666, "max_stars_count": 0, "path": "compilation/AST/compilation/Parser/Lenguaje.py" }, { "content": "from typing import NamedTuple\r\nimport re\r\n\r\n\r\nclass Token(NamedTuple):\r\n type: str\r\n value: str\r\n\r\n\r\ndef tokenize(code):\r\n ret = []\r\n errors = []\r\n keywords = {'print', 'if', 'else', 'for', 'Next', 'Student', 'Element', 'Activity', 'Simulate', 'Rule',\r\n 'return', 'SetRule', '<=', '>=', '==', '!=', '<', '>', 'and', 'or'}\r\n token_specification = [\r\n\r\n ('comment', r'[\\#](\\w+)#|[\\#](\\w+)[ \\t]+(\\w+)#|'), # comment\r\n ('true', r'True'), # true\r\n ('false', r'False'), # false\r\n ('string', r'[\\\"](\\w+)[\\\"]|[\\\"](\\w+)[ \\t]+(\\w+)[\\\"]'), # string\r\n ('return', r'return'), # return\r\n ('eof', r'eof'), # eof\r\n ('let', r'let'), # let\r\n ('for', r'for'), # for\r\n ('if', r'if'), # if\r\n ('else', r'else'), # else\r\n ('def', r'def'), # def\r\n ('Student', r'Student'), # Student\r\n ('Element', r'Element'), # Element\r\n\r\n # Comparison operators\r\n ('leq', r'<='), # less than or equal\r\n ('geq', r'>='), # greater than or equal\r\n ('equal', r'=='), # equal\r\n ('not', r'!='), # not equal\r\n ('less', r'[<]'), # less than\r\n ('greater', r'[>]'), # greater than\r\n\r\n ('o_bracket', r'\\('), # (\r\n ('c_bracket', r'\\)'), # )\r\n\r\n ('o_key', r'\\{'), # {\r\n ('c_key', r'\\}'), # }\r\n\r\n ('comma', r','), # comma\r\n ('colon', r':'), # colon\r\n\r\n # Logic operators\r\n ('and', r'and'), # and\r\n ('or', r'or'), # or\r\n\r\n ('print', r'print'), # print\r\n\r\n ('number', r'\\d+(\\.\\d*)?'), # Integer or decimal number\r\n ('assign', r'='), # Assignment operator\r\n ('semi', r';'), # ;\r\n ('id', r'[A-Za-z]+'), # Identifiers\r\n\r\n # Arithmetic operators\r\n ('plus', r'[+]'), # plus\r\n ('minus', r'[\\-]'), # minus\r\n ('mul', r'[*]'), # mul\r\n ('div', r'[/]'), # div\r\n\r\n ('newline', r'\\n'), # Line endings\r\n ('skip', r'[ \\t]+'),\r\n ('salt', r'\\r'), # Skip over spaces and tabs\r\n ('mismatch', r'.'), # Any other character\r\n ]\r\n tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_specification)\r\n code += ' eof'\r\n for mo in re.finditer(tok_regex, code):\r\n kind = mo.lastgroup\r\n value = mo.group()\r\n if kind == 'number':\r\n value = float(value) if '.' in value else int(value)\r\n elif kind == 'true' or kind == 'false':\r\n value = eval(value)\r\n elif kind == 'id' and value in keywords:\r\n kind = value\r\n elif kind == 'newline' or kind == 'comment' or kind == 'skip' or kind == 'salt':\r\n continue\r\n elif kind == 'mismatch':\r\n errors.append(f'{value} expresión no reconocida en el lenguaje')\r\n continue\r\n ret.append(Token(kind, value))\r\n return errors, ret\r\n\r\n\r\n\r\n", "id": "2776882", "language": "Python", "matching_score": 1.7191380262374878, "max_stars_count": 0, "path": "compilation/AST/compilation/Tokenizer/Tokenizer.py" }, { "content": "import sys\r\n\r\nfrom backend.Entities.element import *\r\nfrom backend.Entities.student import *\r\nfrom backend.Entities.activity import *\r\nfrom backend.tools import *\r\nfrom backend.Simulation.simulation import *\r\nfrom backend.Entities.category import *\r\nfrom backend.Entities.environment import *\r\nfrom backend.Simulation.learningStrategy import *\r\n\r\ntipos_basicos = Element(\"tipos_basicos\")\r\noperadores = Element(\"operadores\")\r\ncolecciones = Element(\"colecciones\")\r\nmetodos_de_lista = Element(\"metodos_de_lista\")\r\nmetodos_de_diccionario = Element(\"metodos_de_diccionario\")\r\ncomprension_de_lista = Element(\"comprension_de_lista\")\r\ncondicionales = Element(\"condicionales\")\r\nciclos = Element(\"ciclos\")\r\nmetodos_de_cadena = Element(\"metodos_de_cadena\")\r\nfunciones = Element(\"funciones\")\r\ndecoradores = Element(\"decoradores\")\r\nfunciones_lambda = Element(\"funciones_lambda\")\r\ngeneradores = Element(\"generadores\")\r\nobjetos = Element(\"objetos\")\r\nherencia = Element(\"herencia\")\r\nherencia_multiple = Element(\"herencia_multiple\")\r\nclases_decoradoras = Element(\"clases_decoradoras\")\r\n\r\n\r\nelements = [tipos_basicos, operadores, colecciones, metodos_de_lista, metodos_de_cadena, metodos_de_diccionario,\r\n comprension_de_lista, condicionales, ciclos, funciones, funciones_lambda, generadores, decoradores,\r\n objetos, herencia, herencia_multiple, clases_decoradoras]\r\n\r\n\r\nfunciones.dependencies = [tipos_basicos]\r\noperadores.dependencies = [tipos_basicos]\r\ncondicionales.dependencies = [tipos_basicos]\r\ncolecciones.dependencies = [tipos_basicos]\r\nmetodos_de_lista.dependencies = [colecciones]\r\nmetodos_de_diccionario.dependencies = [colecciones]\r\ncomprension_de_lista.dependencies = [colecciones]\r\nmetodos_de_cadena.dependencies = [operadores]\r\ndecoradores.dependencies = [funciones]\r\nfunciones_lambda.dependencies = [funciones]\r\ngeneradores.dependencies = [funciones]\r\nobjetos.dependencies = [funciones]\r\nherencia.dependencies = [objetos]\r\nherencia_multiple.dependencies = [herencia]\r\nciclos.dependencies = [condicionales]\r\nclases_decoradoras.dependencies = [objetos, decoradores]\r\n\r\ntargets = [ciclos, clases_decoradoras, herencia_multiple, comprension_de_lista]\r\n\r\nstudent = Student(\"Edalberto\")\r\nstudent.goals = targets\r\n\r\nstudent.set_category(elements[0], Category.Learnable)\r\nfor i in range(1, 17):\r\n student.set_category(elements[i], Category.Not_learned)\r\n\r\n#tipos basicos 1\r\nvideo_tutorial_tipos_basicos = Activity(\"video_tutorial_tipos_basicos\", 0.5)\r\nvideo_tutorial_tipos_basicos.add_element(tipos_basicos, 2)\r\n\r\ndocumento_conferencia_tipos_basicos = Activity(\"documento_conferencia_tipos_basicos\", 1)\r\ndocumento_conferencia_tipos_basicos.add_element(tipos_basicos, 3)\r\n\r\nejercicios_practicos_tipos_basicos = Activity(\"ejercicios_practicos_tipos_basicos\", 2)\r\nejercicios_practicos_tipos_basicos.add_element(tipos_basicos, 3)\r\n\r\n#operadores 2\r\nvideo_tutorial_operadores = Activity(\"video_tutorial_operadores\", 0.5)\r\nvideo_tutorial_operadores.add_element(operadores, 2)\r\n\r\ndocumento_conferencia_operadores = Activity(\"documento_conferencia_operadores\", 1)\r\ndocumento_conferencia_operadores.add_element(operadores, 3)\r\n\r\nejercicios_practicos_operadores = Activity(\"ejercicios_practicos_operadores\", 2)\r\nejercicios_practicos_operadores.add_element(operadores, 3)\r\n\r\n#colecciones 3\r\nvideo_tutorial_colecciones = Activity(\"video_tutorial_colecciones\", 0.5)\r\nvideo_tutorial_colecciones.add_element(colecciones, 2)\r\n\r\ndocumento_conferencia_colecciones = Activity(\"documento_conferencia_colecciones\", 1)\r\ndocumento_conferencia_colecciones.add_element(colecciones, 3)\r\n\r\nejercicios_practicos_colecciones = Activity(\"ejercicios_practicos_colecciones\", 2)\r\nejercicios_practicos_colecciones.add_element(colecciones, 3)\r\n\r\n#metodos de lista 4\r\nvideo_tutorial_metodos_de_lista = Activity(\"video_tutorial_metodos_de_lista\", 0.5)\r\nvideo_tutorial_metodos_de_lista.add_element(metodos_de_lista, 2)\r\n\r\ndocumento_conferencia_metodos_de_lista = Activity(\"documento_conferencia_metodos_de_lista\", 1)\r\ndocumento_conferencia_metodos_de_lista.add_element(metodos_de_lista, 3)\r\n\r\nejercicios_practicos_metodos_de_lista = Activity(\"ejercicios_practicos_metodos_de_lista\", 2)\r\nejercicios_practicos_metodos_de_lista.add_element(metodos_de_lista, 3)\r\n\r\n#metodos de diccionario 5\r\nvideo_tutorial_metodos_de_diccionario = Activity(\"video_tutorial_metodos_de_diccionario\", 0.5)\r\nvideo_tutorial_metodos_de_diccionario.add_element(metodos_de_diccionario, 2)\r\n\r\ndocumento_conferencia_metodos_de_diccionario = Activity(\"documento_conferencia_metodos_de_diccionario\", 1)\r\ndocumento_conferencia_metodos_de_diccionario.add_element(metodos_de_diccionario, 3)\r\n\r\nejercicios_practicos_metodos_de_diccionario = Activity(\"ejercicios_practicos_metodos_de_diccionario\", 2)\r\nejercicios_practicos_metodos_de_diccionario.add_element(metodos_de_diccionario, 3)\r\n\r\n#comprension de lista 6\r\nvideo_tutorial_comprension_de_lista = Activity(\"video_tutorial_comprension_de_lista\", 0.5)\r\nvideo_tutorial_comprension_de_lista.add_element(comprension_de_lista, 2)\r\n\r\ndocumento_conferencia_comprension_de_lista = Activity(\"documento_conferencia_comprension_de_lista\", 1)\r\ndocumento_conferencia_comprension_de_lista.add_element(comprension_de_lista, 3)\r\n\r\nejercicios_practicos_comprension_de_lista = Activity(\"ejercicios_practicos_comprension_de_lista\", 2)\r\nejercicios_practicos_comprension_de_lista.add_element(comprension_de_lista, 3)\r\n\r\n#condicionales 7\r\nvideo_tutorial_condicionales = Activity(\"video_tutorial_condicionales\", 0.5)\r\nvideo_tutorial_condicionales.add_element(condicionales, 2)\r\n\r\ndocumento_conferencia_condicionales = Activity(\"documento_conferencia_condicionales\", 1)\r\ndocumento_conferencia_condicionales.add_element(condicionales, 3)\r\n\r\nejercicios_practicos_condicionales = Activity(\"ejercicios_practicos_condicionales\", 2)\r\nejercicios_practicos_condicionales.add_element(condicionales, 3)\r\n\r\n#ciclos 8\r\nvideo_tutorial_ciclos = Activity(\"video_tutorial_ciclos\", 0.5)\r\nvideo_tutorial_ciclos.add_element(ciclos, 2)\r\n\r\ndocumento_conferencia_ciclos = Activity(\"documento_conferencia_ciclos\", 1)\r\ndocumento_conferencia_ciclos.add_element(ciclos, 3)\r\n\r\nejercicios_practicos_ciclos = Activity(\"ejercicios_practicos_ciclos\", 2)\r\nejercicios_practicos_ciclos.add_element(ciclos, 3)\r\n\r\n#metodos de cadena 9\r\nvideo_tutorial_metodos_de_cadena = Activity(\"video_tutorial_metodos_de_cadena\", 0.5)\r\nvideo_tutorial_metodos_de_cadena.add_element(metodos_de_cadena, 2)\r\n\r\ndocumento_conferencia_metodos_de_cadena = Activity(\"documento_conferencia_metodos_de_cadena\", 1)\r\ndocumento_conferencia_metodos_de_cadena.add_element(metodos_de_cadena, 3)\r\n\r\nejercicios_practicos_metodos_de_cadena = Activity(\"ejercicios_practicos_metodos_de_cadena\", 2)\r\nejercicios_practicos_metodos_de_cadena.add_element(metodos_de_cadena, 3)\r\n\r\n#funciones 10\r\nvideo_tutorial_funciones = Activity(\"video_tutorial_funciones\", 0.5)\r\nvideo_tutorial_funciones.add_element(funciones, 2)\r\n\r\ndocumento_conferencia_funciones = Activity(\"documento_conferencia_funciones\", 1)\r\ndocumento_conferencia_funciones.add_element(funciones, 3)\r\n\r\nejercicios_practicos_funciones = Activity(\"ejercicios_practicos_funciones\", 2)\r\nejercicios_practicos_funciones.add_element(funciones, 3)\r\n\r\n#decoradores 11\r\nvideo_tutorial_decoradores = Activity(\"video_tutorial_decoradores\", 0.5)\r\nvideo_tutorial_decoradores.add_element(decoradores, 2)\r\n\r\ndocumento_conferencia_decoradores = Activity(\"documento_conferencia_decoradores\", 1)\r\ndocumento_conferencia_decoradores.add_element(decoradores, 3)\r\n\r\nejercicios_practicos_decoradores = Activity(\"ejercicios_practicos_decoradores\", 2)\r\nejercicios_practicos_decoradores.add_element(decoradores, 3)\r\n\r\n#funciones_lambda 12\r\nvideo_tutorial_funciones_lambda = Activity(\"video_tutorial_funciones_lambda\", 0.5)\r\nvideo_tutorial_funciones_lambda.add_element(funciones_lambda, 2)\r\n\r\ndocumento_conferencia_funciones_lambda = Activity(\"documento_conferencia_funciones_lambda\", 1)\r\ndocumento_conferencia_funciones_lambda.add_element(funciones_lambda, 3)\r\n\r\nejercicios_practicos_funciones_lambda = Activity(\"ejercicios_practicos_funciones_lambda\", 2)\r\nejercicios_practicos_funciones_lambda.add_element(funciones_lambda, 3)\r\n\r\n#generadores 13\r\nvideo_tutorial_generadores = Activity(\"video_tutorial_generadores\", 0.5)\r\nvideo_tutorial_generadores.add_element(generadores, 2)\r\n\r\ndocumento_conferencia_generadores = Activity(\"documento_conferencia_generadores\", 1)\r\ndocumento_conferencia_generadores.add_element(generadores, 3)\r\n\r\nejercicios_practicos_generadores = Activity(\"ejercicios_practicos_generadores\", 2)\r\nejercicios_practicos_generadores.add_element(generadores, 3)\r\n\r\n#clases_decoradoras 14\r\nvideo_tutorial_clases_decoradoras = Activity(\"video_tutorial_clases_decoradoras\", 0.5)\r\nvideo_tutorial_clases_decoradoras.add_element(clases_decoradoras, 2)\r\n\r\ndocumento_conferencia_clases_decoradoras = Activity(\"documento_conferencia_clases_decoradoras\", 1)\r\ndocumento_conferencia_clases_decoradoras.add_element(clases_decoradoras, 3)\r\n\r\nejercicios_practicos_clases_decoradoras = Activity(\"ejercicios_practicos_clases_decoradoras\", 2)\r\nejercicios_practicos_clases_decoradoras.add_element(clases_decoradoras, 3)\r\n\r\n#clases_decoradoras 14\r\nvideo_tutorial_clases_decoradoras = Activity(\"video_tutorial_clases_decoradoras\", 0.5)\r\nvideo_tutorial_clases_decoradoras.add_element(clases_decoradoras, 2)\r\n\r\ndocumento_conferencia_clases_decoradoras = Activity(\"documento_conferencia_clases_decoradoras\", 1)\r\ndocumento_conferencia_clases_decoradoras.add_element(clases_decoradoras, 3)\r\n\r\nejercicios_practicos_clases_decoradoras = Activity(\"ejercicios_practicos_clases_decoradoras\", 2)\r\nejercicios_practicos_clases_decoradoras.add_element(clases_decoradoras, 3)\r\n\r\n\r\n#objetos 15\r\nvideo_tutorial_objetos = Activity(\"video_tutorial_objetos\", 0.5)\r\nvideo_tutorial_objetos.add_element(objetos, 2)\r\n\r\ndocumento_conferencia_objetos = Activity(\"documento_conferencia_objetos\", 1)\r\ndocumento_conferencia_objetos.add_element(objetos, 3)\r\n\r\nejercicios_practicos_objetos = Activity(\"ejercicios_practicos_objetos\", 2)\r\nejercicios_practicos_objetos.add_element(objetos, 3)\r\n\r\n#herencia 16\r\nvideo_tutorial_herencia = Activity(\"video_tutorial_herencia\", 0.5)\r\nvideo_tutorial_herencia.add_element(herencia, 2)\r\n\r\ndocumento_conferencia_herencia = Activity(\"documento_conferencia_herencia\", 1)\r\ndocumento_conferencia_herencia.add_element(herencia, 3)\r\n\r\nejercicios_practicos_herencia = Activity(\"ejercicios_practicos_herencia\", 2)\r\nejercicios_practicos_herencia.add_element(herencia, 3)\r\n\r\n#herencia_multiple 17\r\nvideo_tutorial_herencia_multiple = Activity(\"video_tutorial_herencia_multiple\", 0.5)\r\nvideo_tutorial_herencia_multiple.add_element(herencia_multiple, 2)\r\n\r\ndocumento_conferencia_herencia_multiple = Activity(\"documento_conferencia_herencia_multiple\", 1)\r\ndocumento_conferencia_herencia_multiple.add_element(herencia_multiple, 3)\r\n\r\nejercicios_practicos_herencia_multiple = Activity(\"ejercicios_practicos_herencia_multiple\", 2)\r\nejercicios_practicos_herencia_multiple.add_element(herencia_multiple, 3)\r\n\r\nactivities = [video_tutorial_clases_decoradoras, video_tutorial_objetos, video_tutorial_herencia,\r\n video_tutorial_decoradores, video_tutorial_funciones, video_tutorial_operadores,\r\n video_tutorial_funciones_lambda, video_tutorial_metodos_de_cadena, video_tutorial_ciclos,\r\n video_tutorial_condicionales, video_tutorial_comprension_de_lista, video_tutorial_metodos_de_diccionario,\r\n video_tutorial_metodos_de_lista, video_tutorial_herencia_multiple, video_tutorial_tipos_basicos,\r\n video_tutorial_generadores, video_tutorial_colecciones,\r\n documento_conferencia_clases_decoradoras, documento_conferencia_objetos, documento_conferencia_herencia,\r\n documento_conferencia_decoradores, documento_conferencia_funciones, documento_conferencia_operadores,\r\n documento_conferencia_funciones_lambda, documento_conferencia_metodos_de_cadena, documento_conferencia_ciclos,\r\n documento_conferencia_condicionales, documento_conferencia_comprension_de_lista, documento_conferencia_metodos_de_diccionario,\r\n documento_conferencia_metodos_de_lista, documento_conferencia_herencia_multiple, documento_conferencia_tipos_basicos,\r\n documento_conferencia_generadores, documento_conferencia_colecciones,\r\n ejercicios_practicos_clases_decoradoras, ejercicios_practicos_objetos, ejercicios_practicos_herencia,\r\n ejercicios_practicos_decoradores, ejercicios_practicos_funciones, ejercicios_practicos_operadores,\r\n ejercicios_practicos_funciones_lambda, ejercicios_practicos_metodos_de_cadena,ejercicios_practicos_ciclos,\r\n ejercicios_practicos_condicionales, ejercicios_practicos_comprension_de_lista, ejercicios_practicos_metodos_de_diccionario,\r\n ejercicios_practicos_metodos_de_lista, ejercicios_practicos_herencia_multiple, ejercicios_practicos_tipos_basicos,\r\n ejercicios_practicos_generadores, ejercicios_practicos_colecciones]\r\n\r\n\r\ndef notlearned(env, element):\r\n count = 0\r\n for i in element.dependencies:\r\n if env.student.categories[i] is Category.Learned:\r\n count += 1\r\n if count >= len(element.dependencies) * env.rules_params[0]:\r\n return Category.Learnable\r\n return Category.Not_learned\r\n\r\n\r\ndef learnable(env, element):\r\n if env.student.elements[element] >= env.rules_params[1]:\r\n return Category.Learned\r\n return Category.Learnable\r\n\r\n\r\ndef learned(student, element):\r\n return Category.Learned\r\n\r\n\r\nrules = {Category.Not_learned: notlearned, Category.Learnable: learnable, Category.Learned: learned}\r\nrules_params = [0.5, 6]\r\nenv = Environment(elements, activities, rules, rules_params, student, Categorizer())\r\n\r\nresults = search_good_strategy(env)\r\nprint(results)\r\n\"\"\"\r\nprint(\"Activities:\")\r\nfor i in a[2]:\r\n print(i.name)\r\n\r\nprint(\"Categories:\")\r\nfor i in a[0].elements:\r\n print(i.name,\":\",a[0].student.categories[i])\r\n\r\nprint(\"Skill Points:\")\r\nfor i in a[0].elements:\r\n print(i.name, \":\", a[0].student.elements[i])\r\nprint(\"Goal Reached:\", a[3])\r\n\r\nprint(\"Elapsed Time: \", a[1])\r\n\"\"\"", "id": "11262369", "language": "Python", "matching_score": 1.9362798929214478, "max_stars_count": 0, "path": "backend/tester.py" }, { "content": "class Category:\r\n Learned = \"Learned\" # aprendido\r\n Not_learned = \"Not_learned\" # no aprendido\r\n Forgotten = \"Forgotten\" # olvidado\r\n Learnable = \"Learnable\" # aprendible\r\n\r\n\r\n\r\n", "id": "803399", "language": "Python", "matching_score": 0.42443954944610596, "max_stars_count": 0, "path": "backend/Entities/category.py" }, { "content": "from backend.tools import *\r\n\r\nclass Categorizer:\r\n def check_rules(self, env, element):\r\n env.student.categories[element] = env.rules[env.student.categories[element]](env, element)\r\n # esto significa: la nueva categoria de este elemento para el estudiante\r\n # va a ser el resultado de evaluar la regla de la categoria actual del elemento\r\n\r\n def recheck_categories(self, elements, env):\r\n add_attributes(elements, \"visited\", None)\r\n for element in elements:\r\n if element.visited is None:\r\n self.recheck_cat_visit(element, env)\r\n delete_attributes(elements, \"visited\")\r\n\r\n def recheck_cat_visit(self, v, env):\r\n v.visited = 1\r\n for u in v.dependencies:\r\n if u.visited is None:\r\n self.recheck_cat_visit(u, env)\r\n self.check_rules(env, v)\r\n", "id": "5036618", "language": "Python", "matching_score": 1.465579628944397, "max_stars_count": 0, "path": "backend/Agents/categorizer.py" }, { "content": "from random import *\r\nfrom backend.tools import *\r\nfrom backend.Agents.categorizer import *\r\n\r\n\r\nl = [1,2,3,4,5,5]\r\nl += [0,0,0,0]\r\nprint(l)\r\n\r\n", "id": "9467050", "language": "Python", "matching_score": 0.22032976150512695, "max_stars_count": 0, "path": "backend/pythonTester.py" } ]
1.46558
ccsm-cds-tools
[ { "content": "# Author: CMS Alliance to Modernize Healthcare, operated by THE MITRE Corporation.\n# (C) 2021 The MITRE Corporation. All Rights Reserved. \n# Approved for Public Release: 21-1556. Distribution Unlimited.\n#\n# Unless otherwise noted, this work is available under an Apache 2.0 license. \n# It was produced by the MITRE Corporation for the Division of Cancer Prevention \n# and Control, Centers for Disease Control and Prevention in accordance with the \n# Statement of Work, contract number 75FCMC18D0047, task order number 75D30120F09743.\n\nimport pandas as pd\nimport os\nfrom datetime import datetime, time\nimport os.path, time\n\n# This file converts the cervical cancer risk tables from Excel format into one CQL file:\n# 1) Read in the five NCI risk spreadsheets\n# 2) Use pandas to concatenate the resulting dataframes and strip out the columns which are not of interest (parameterize the names of the columns that ARE of interest)\n# 3) Write the resulting dataframe to a CQL file:\n# 4) Open a new blank file and write in the necessary header boilerplate\n# 5) Write the concatenated dataframe as a list of tuples\n# 6) Close the file\n\n## CONFIG ----------------------------------------------------------------------------\n# Output Excel file with individual sheets that contain each of the tables' relevant data columns\nMASTERTABLE = False\n# Generate CQL version of the relevant columns in each risk table\nGENERATECQLFILE = True\n# Output CQL file contents to the console for debugging\nDEBUG = False\n\nINPUT_DIRECTORY = '/risk-tables/risk-tables-excel/'\nOUTPUT_DIRECTORY_CQL_TABLES = '/cql/'\nOUTPUT_DIRECTORY_MASTER_TABLES = '/risk-tables/output/'\n\n# READ INPUT FILES ----------------------------------------------------------------------------\n# Define paths for reading and writing\ncwd = os.path.abspath(os.getcwd())\ninput_path = cwd + INPUT_DIRECTORY\noutput_path_master_tables = cwd + OUTPUT_DIRECTORY_MASTER_TABLES\noutput_path_cql_tables = cwd + OUTPUT_DIRECTORY_CQL_TABLES\n\n# Import raw risk table files\nrisk_spreadsheet1 = pd.ExcelFile(input_path + '1-General Table for Screening_locked.xlsx', engine='openpyxl')\nrisk_spreadsheet2 = pd.ExcelFile(input_path + '2-General Table for Surveillance_locked.xlsx', engine='openpyxl')\nrisk_spreadsheet3 = pd.ExcelFile(input_path + '3-General Table for Risk Following Colposcpy_locked.xlsx', engine='openpyxl')\nrisk_spreadsheet4 = pd.ExcelFile(input_path + '4-General Table for Post-Colpo.xlsx', engine='openpyxl')\nrisk_spreadsheet5 = pd.ExcelFile(input_path + '5-General Table for Post-Treatment_locked.xlsx', engine='openpyxl')\n\n# Helper function to get the \"Created\" date of a file\ndef getDateCreated(filePath):\n return time.ctime(os.path.getctime(filePath))\n\n# Get last downloaded dates for risk table\ndef getLastDownloadedDates():\n download_date1 = getDateCreated((input_path + '1-General Table for Screening_locked.xlsx'))\n download_date2 = getDateCreated((input_path + '2-General Table for Surveillance_locked.xlsx'))\n download_date3 = getDateCreated((input_path + '3-General Table for Risk Following Colposcpy_locked.xlsx'))\n download_date4 = getDateCreated((input_path + '4-General Table for Post-Colpo.xlsx'))\n download_date5 = getDateCreated((input_path + '5-General Table for Post-Treatment_locked.xlsx'))\n return {\n '1 - Screening': download_date1,\n '2 - Surveillance': download_date2,\n '3 - Colposcopy Results': download_date3,\n '4 - Post Colpo Surveillance': download_date4,\n '5 - Post Treatment Surveillance': download_date5\n }\n\n# Read Excel files\nrisk_table1 = pd.read_excel(risk_spreadsheet1)\nrisk_table2 = pd.read_excel(risk_spreadsheet2)\nrisk_table3 = pd.read_excel(risk_spreadsheet3)\nrisk_table4 = pd.read_excel(risk_spreadsheet4)\nrisk_table5 = pd.read_excel(risk_spreadsheet5)\n\n# Extract relevant columns from risk tables\ndef stripRiskTables():\n ROUND_VALUE = 2\n risk_table1_stripped = risk_table1[['PAST HISTORY (most recent)','Current HPV Result', 'Current PAP Result' , 'CIN3+ Immediate risk (%)', 'CIN3+ 5 year risk (%)', 'Management']].round(ROUND_VALUE)\n risk_table2_stripped = risk_table2[['PAST HISTORY (previous 2)', 'PAST HISTORY (most recent)', 'Current HPV Result', 'Current PAP Result', 'CIN3+ Immediate risk (%)', 'CIN3+ 5 year risk (%)', 'Management']].round(ROUND_VALUE)\n risk_table3_stripped = risk_table3[['Referral Screen Result', 'Biopsy Result', 'CIN3+ 1 year risk (%)', 'CIN3+ 5 year risk (%)', 'Management']].round(ROUND_VALUE)\n risk_table4_stripped = risk_table4[['Pre-Colpo Test Result', 'Post-Colpo Test Result - PAST HISTORY', 'Current HPV Result', 'Current PAP Result', 'CIN3+ Immediate risk (%)', 'CIN3+ 5 year risk (%)', 'Management']].round(ROUND_VALUE)\n risk_table5_stripped = risk_table5[['Biopsy Result Before Treatment', 'Current HPV Result', 'Current PAP Result', 'CIN3+ Immediate risk (%)', 'CIN3+ 5 year risk (%)', 'Management']].round(ROUND_VALUE)\n return {\n '1 - Screening': risk_table1_stripped,\n '2 - Surveillance': risk_table2_stripped,\n '3 - Colposcopy Results': risk_table3_stripped,\n '4 - Post Colpo Surveillance': risk_table4_stripped,\n '5 - Post Treatment Surveillance': risk_table5_stripped\n }\n\n# GENERATE MASTER TABLE ----------------------------------------------------------------------------\ndef createMasterTable(tableDictionary):\n writer = pd.ExcelWriter(output_path_master_tables + 'Master_NCI_Risk_Table.xlsx', engine='xlsxwriter')\n for sheetname, df in tableDictionary.items(): # loop through `dict` of dataframes\n df.to_excel(writer, sheet_name=sheetname, index=False) # send df to writer\n worksheet = writer.sheets[sheetname] # pull worksheet object\n for idx, col in enumerate(df.columns): # loop through all columns\n series = df[col]\n max_len = max((\n series.astype(str).map(len).max(), # len of largest item\n len(str(series.name)) # len of column name/header\n )) + 1 # adding a little extra space\n worksheet.set_column(idx, idx, max_len) # set column width\n\n writer.save()\n print('Master Table written to: ' + output_path_master_tables)\n\n\n# GENRATE CQL LIBRARY ----------------------------------------------------------------------------\ndef createCQLFile(tables, name = 'AutogeneratedRiskTables', version = '1.0.0', fhirVersion = '4.0.1'):\n # CQL boilerplate r\n header = '/*****************************************************' + \\\n '\\n * This file is generated from code - DO NOT EDIT!!! *' + \\\n '\\n *****************************************************/' + \\\n '\\nlibrary ' + name + ' version \\'' + version + '\\'' + \\\n '\\nusing FHIR version \\'' + fhirVersion + '\\'' + \\\n '\\ninclude \"FHIRHelpers\" version \\'4.0.1\\' called FHIRHelpers' + \\\n '\\n'\n\n # Metadata\n meta = '// Risk Estimates Supporting the 2019 ASCCP Risk-Based Management Consensus Guidelines' + \\\n '\\n// Generation of these risk estimates was supported by the Intramural Research Program of the National Cancer Institute.' + \\\n '\\n// The risk estimates are in the public domain in the United States of America and are made freely available elsewhere.' + \\\n '\\n' + \\\n '\\n// METADATA' + \\\n '\\n\\t// Downloaded from: https://cervixca.nlm.nih.gov/RiskTables/' + \\\n '\\n\\t// Date Generated: ' + datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\") + \\\n '\\n\\t// Number of Risk Tables: ' + str(len(tables)) + \\\n '\\n\\t// Risk Tables:'\n downloadDates = getLastDownloadedDates()\n for key in tables:\n if downloadDates[key]:\n meta += '\\n\\t\\t// ' + key + \\\n '\\n\\t\\t\\t\\t// (last downloaded: ' + downloadDates[key] + ')'\n else:\n meta += '\\n\\t\\t// ' + key + \\\n '\\n\\t\\t\\t\\t// (Unable to find file downloaded date)'\n meta += '\\n'\n\n # API to allow other CQL libraries to read from risk tables\n api = '// Export Risk Tables' + \\\n '\\ndefine ClinicalSituations:' + \\\n '\\n{'\n\n # TABLE 1: GeneralScreening\n table1_data = tables['1 - Screening']\n api += '\\n\\tGeneralScreening: GeneralScreening,'\n screening = '// Table 1: Screening' + \\\n '\\ndefine GeneralScreening:' + \\\n '\\n{'\n\n for i in table1_data.index:\n history = table1_data['PAST HISTORY (most recent)'][i]\n hpv = table1_data['Current HPV Result'][i]\n pap = table1_data['Current PAP Result'][i]\n immediate = table1_data['CIN3+ Immediate risk (%)'][i]\n five_year = table1_data['CIN3+ 5 year risk (%)'][i]\n management = table1_data['Management'][i]\n\n screening += '\\n\\t{' + \\\n '\\n\\t\\thistory: \\'' + history + '\\',' + \\\n '\\n\\t\\thpv: \\'' + hpv + '\\',' + \\\n '\\n\\t\\tpap: \\'' + pap + '\\',' + \\\n '\\n\\t\\tvalue: { immediate: ' + str(immediate) + ', five_year: ' + str(five_year) + ' },' + \\\n '\\n\\t\\tmanagement: \\'' + management + '\\'' + \\\n '\\n\\t},'\n\n screening = screening[:-1]\n screening += '\\n}' + \\\n '\\n'\n\n # TABLE 2: Surveillance\n table2_data = tables['2 - Surveillance']\n api += '\\n\\tSurveillance: Surveillance,'\n surveillance = '// Table 2: Surveillance' + \\\n '\\ndefine Surveillance:' + \\\n '\\n{'\n\n for i in table2_data.index:\n if str(table2_data['PAST HISTORY (previous 2)'][i]) != 'nan':\n history_prev_2 = table2_data['PAST HISTORY (previous 2)'][i]\n else:\n history_prev_2 = 'none'\n history = table2_data['PAST HISTORY (most recent)'][i]\n hpv = table2_data['Current HPV Result'][i]\n pap = table2_data['Current PAP Result'][i]\n if (str(table2_data['CIN3+ Immediate risk (%)'][i])!= 'nan') and (str(table2_data['CIN3+ Immediate risk (%)'][i]) != 'NA'):\n immediate = table2_data['CIN3+ Immediate risk (%)'][i]\n else:\n immediate = -1\n if (str(table2_data['CIN3+ 5 year risk (%)'][i]) != 'nan') and (str(table2_data['CIN3+ 5 year risk (%)'][i]) != 'NA'):\n five_year = table2_data['CIN3+ 5 year risk (%)'][i]\n else:\n five_year = -1\n management = table2_data['Management'][i]\n\n surveillance += '\\n\\t{' + \\\n '\\n\\t\\thistory_prev_2: \\'' + str(history_prev_2) + '\\',' + \\\n '\\n\\t\\thistory: \\'' + history + '\\',' + \\\n '\\n\\t\\thpv: \\'' + hpv + '\\',' + \\\n '\\n\\t\\tpap: \\'' + pap + '\\',' + \\\n '\\n\\t\\tvalue: { immediate: ' + str(immediate) + ', five_year: ' + str(five_year) + ' },' + \\\n '\\n\\t\\tmanagement: \\'' + management + '\\'' + \\\n '\\n\\t},'\n\n surveillance = surveillance[:-1]\n surveillance += '\\n}' + \\\n '\\n'\n\n # TABLE 3: Colposcopy Results\n table3_data = tables['3 - Colposcopy Results']\n api += '\\n\\tColposcopyResults: ColposcopyResults,'\n colposcopy_results = '// Table 3: Colposcopy Results' + \\\n '\\ndefine ColposcopyResults:' + \\\n '\\n{'\n\n for i in table3_data.index:\n if str(table3_data['Referral Screen Result'][i]) != '-':\n referral_screen_result = table3_data['Referral Screen Result'][i]\n else:\n referral_screen_result = 'none'\n biopsy = table3_data['Biopsy Result'][i]\n if str(table3_data['CIN3+ 1 year risk (%)'][i]) != 'NA' and str(table3_data['CIN3+ 1 year risk (%)'][i]) != 'nan':\n one_year = table3_data['CIN3+ 1 year risk (%)'][i]\n else:\n one_year = -1\n if str(table3_data['CIN3+ 5 year risk (%)'][i]) != 'NA' and str(table3_data['CIN3+ 5 year risk (%)'][i]) != 'nan':\n five_year = table3_data['CIN3+ 5 year risk (%)'][i]\n else:\n five_year = -1\n management = table3_data['Management'][i]\n\n colposcopy_results += '\\n\\t{' + \\\n '\\n\\t\\treferral_screen_result: \\'' + str(referral_screen_result) + '\\',' + \\\n '\\n\\t\\tbiopsy: \\'' + biopsy + '\\',' + \\\n '\\n\\t\\tvalue: { immediate: ' + str(one_year) + ', five_year: ' + str(five_year) + ' },' + \\\n '\\n\\t\\tmanagement: \\'' + management + '\\'' + \\\n '\\n\\t},'\n\n colposcopy_results = colposcopy_results[:-1]\n colposcopy_results += '\\n}' + \\\n '\\n'\n\n # Table 4: Post Colposcopy\n table4_data = tables['4 - Post Colpo Surveillance']\n api += '\\n\\tPostColposcopy: PostColposcopy,'\n post_colposcopy = '// Table 4: Post Colposcopy Surveillance' + \\\n '\\ndefine PostColposcopy:' + \\\n '\\n{'\n\n for i in table4_data.index:\n\n pre_colposcopy_result = table4_data['Pre-Colpo Test Result'][i]\n\n if (str(table4_data['Post-Colpo Test Result - PAST HISTORY'][i]) != 'nan'):\n history = table4_data['Post-Colpo Test Result - PAST HISTORY'][i]\n else:\n history = 'none'\n hpv = table4_data['Current HPV Result'][i]\n pap = table4_data['Current PAP Result'][i]\n immediate = table4_data['CIN3+ Immediate risk (%)'][i]\n five_year = table4_data['CIN3+ 5 year risk (%)'][i]\n management = table4_data['Management'][i]\n\n post_colposcopy += '\\n\\t{' + \\\n '\\n\\t\\tpreColposcopyResult: \\'' + pre_colposcopy_result + '\\',' + \\\n '\\n\\t\\tpostColposcopyPastHistory: \\'' + history + '\\',' + \\\n '\\n\\t\\thpv: \\'' + hpv + '\\',' + \\\n '\\n\\t\\tpap: \\'' + pap + '\\',' + \\\n '\\n\\t\\tvalue: { immediate: ' + str(immediate) + ', five_year: ' + str(five_year) + ' },' + \\\n '\\n\\t\\tmanagement: \\'' + management + '\\'' + \\\n '\\n\\t},'\n\n post_colposcopy = post_colposcopy[:-1]\n post_colposcopy += '\\n}' + \\\n '\\n'\n\n # Table 5: Post Treatment\n table5_data = tables['5 - Post Treatment Surveillance']\n api += '\\n\\tPostTreatment: PostTreatment'\n post_treatment = '// Table 5: Post Treatment Surveillance' + \\\n '\\ndefine PostTreatment:' + \\\n '\\n{'\n\n for i in table5_data.index:\n biopsy = table5_data['Biopsy Result Before Treatment'][i]\n hpv = table5_data['Current HPV Result'][i]\n if (str(table5_data['Current PAP Result'][i]) != 'nan'):\n pap = table5_data['Current PAP Result'][i]\n else:\n pap = 'none'\n immediate = table5_data['CIN3+ Immediate risk (%)'][i]\n five_year = table5_data['CIN3+ 5 year risk (%)'][i]\n management = table5_data['Management'][i]\n\n post_treatment += '\\n\\t{' + \\\n '\\n\\t\\tbiopsy: \\'' + biopsy + '\\',' + \\\n '\\n\\t\\thpv: \\'' + hpv + '\\',' + \\\n '\\n\\t\\tpap: \\'' + pap + '\\',' + \\\n '\\n\\t\\tvalue: { immediate: ' + str(immediate) + ', five_year: ' + str(five_year) + ' },' + \\\n '\\n\\t\\tmanagement: \\'' + management + '\\'' + \\\n '\\n\\t},'\n\n post_treatment = post_treatment[:-1]\n post_treatment += '\\n}' + \\\n '\\n'\n\n api += '\\n}' + \\\n '\\n'\n\n # Append parts of CQL Library\n cql = '\\n'.join([header, meta, api, screening, surveillance, colposcopy_results, post_colposcopy, post_treatment])\n # Return CQL as a string\n return cql\n\ndef writeCqlFile(cqlString):\n file = open(output_path_cql_tables + \"AutogeneratedRiskTables.cql\", \"w\")\n file.write(cqlString)\n file.close\n print('CQL Risk Tables written to: ' + output_path_cql_tables)\n\n## MAIN ----------------------------------------------------------------------------\ndef main():\n tables = stripRiskTables()\n if (MASTERTABLE == True):\n createMasterTable(tables)\n if (GENERATECQLFILE == True):\n cql_output = createCQLFile(tables)\n writeCqlFile(cql_output)\n\nmain()\n", "id": "5510151", "language": "Python", "matching_score": 0, "max_stars_count": 3, "path": "risk-tables/generate-cql-risk-tables.py" } ]
0
iteal
[ { "content": "import numpy as np\nimport matplotlib.pyplot as plt\n\nthetas_onno = np.load(\"data/AQ2934_theta_onno.npy\")\nthetas_wp = np.load(\"data/AQ2934_theta_wp.npy\")\n\nfrom wormpose.pose.eigenworms import load_eigenworms_matrix, theta_to_modes, modes_to_theta\neigenworms_matrix = load_eigenworms_matrix(\"EigenWorms.csv\")\n\nfrom wormpose.pose.centerline import flip_theta\nfrom wormpose.pose.distance_metrics import angle_distance\n\ndef convert(theta):\n theta_flipped = flip_theta(theta)\n modes = theta_to_modes(theta, eigenworms_matrix)[:4]\n modes_flipped = theta_to_modes(theta_flipped, eigenworms_matrix)[:4]\n return (modes, modes_flipped), (theta, theta_flipped)\n\ndef mode_dist(m1, m2):\n return np.abs(m1 - m2)\n \nMODE_THRESHOLD = 12\n\nall_mode_errors = []\n\nfor index, (theta_wp, theta_onno) in enumerate(zip(thetas_wp, thetas_onno)):\n \n m_wp, t_wp = convert(theta_wp)\n m_onno, t_onno = convert(theta_onno)\n \n if np.abs(m_wp[0][2]) < MODE_THRESHOLD and np.abs(m_wp[1][2]) < MODE_THRESHOLD:\n continue\n \n options = [(0,0), (0,1)]\n \n dists = [angle_distance(t_wp[x], t_onno[y]) for x,y in options]\n min_dist = int(np.argmin(dists))\n \n chosen_theta_wp = t_wp[options[min_dist][0]]\n chosen_theta_onno = t_onno[options[min_dist][1]]\n \n chosen_modes_wp = m_wp[options[min_dist][0]]\n chosen_modes_onno = m_onno[options[min_dist][1]]\n \n mode_errors = mode_dist(chosen_modes_wp, chosen_modes_onno)\n all_mode_errors.append(mode_errors)\n\nall_mode_errors = np.array(all_mode_errors)\n\ndef plot_all_modes(no_text=False):\n xlim = 15\n ylim = 0.25\n axes_color = \"black\"\n\n def plot_mode_error(err, ax, index, label):\n weights = np.ones_like(err) / float(len(err))\n ax.hist(err, bins=np.arange(0, xlim, 0.5), weights=weights, label=label, alpha=0.7)\n if not no_text:\n ax.title.set_text(f\"a_{index + 1}\")\n ax.spines['bottom'].set_color(axes_color)\n ax.spines['left'].set_color(axes_color)\n ax.spines['top'].set_color(axes_color)\n ax.spines['right'].set_color(axes_color)\n ax.tick_params(axis='x', colors=axes_color)\n ax.tick_params(axis='y', colors=axes_color)\n\n fig, axes = plt.subplots(2, 2, gridspec_kw={'hspace': 0, 'wspace': 0})\n\n plt.setp(axes,\n xticks=np.arange(0, xlim, 5).tolist() + [xlim],\n yticks=np.arange(0, ylim, 0.1).tolist() + [ylim],\n ylim=[0, ylim],\n xlim=[0, xlim])\n\n for i in range(4):\n ax = axes[(int)(i / 2), i % 2]\n plot_mode_error(me[:, i], ax, i, label=\"\")\n\n for ax in fig.get_axes():\n ax.label_outer()\n if no_text:\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n suffix = \"\" if no_text else \"_with_text\"\n plt.savefig(f\"mode_error{suffix}.svg\", bbox_inches='tight', pad_inches=0)\n\n plt.show()\n \nme = all_mode_errors\nplot_all_modes(no_text=True)\nplot_all_modes(no_text=False)\n", "id": "3683965", "language": "Python", "matching_score": 4.005005836486816, "max_stars_count": 0, "path": "suppl comparison/UNUSED_make_fig_mode_error.py" }, { "content": "import numpy as np\nimport sys\nimport os\nimport matplotlib.pyplot as plt\n\n\ndef plot_all_modes(modes_errors, out_dir, no_text=False, name=\"\", xlim=2.3, ylim=0.3, show_median=False):\n\n axes_color = \"black\"\n\n def plot_mode_error(err, ax, index, label, show_median):\n weights = np.ones_like(err) / float(len(err))\n ax.hist(err, bins=np.arange(0, xlim, 0.1), weights=weights, label=label, alpha=0.7)\n if not no_text:\n ax.title.set_text(f\"a_{index + 1}\")\n ax.spines['bottom'].set_color(axes_color)\n ax.spines['left'].set_color(axes_color)\n ax.spines['top'].set_color(axes_color)\n ax.spines['right'].set_color(axes_color)\n ax.tick_params(axis='x', colors=axes_color)\n ax.tick_params(axis='y', colors=axes_color)\n\n if show_median:\n median = np.median(err)\n ax.axvline(x=median, color='k', linestyle='--', linewidth=1)\n\n fig, axes = plt.subplots(2, 2, gridspec_kw={'hspace': 0, 'wspace': 0})\n\n plt.setp(axes,\n xticks=np.arange(0, xlim, 1).tolist() + [xlim],\n yticks=np.arange(0, ylim, 0.1).tolist() + [ylim],\n ylim=[0, ylim],\n xlim=[0, xlim])\n\n for i in range(4):\n ax = axes[(int)(i / 2), i % 2]\n plot_mode_error(modes_errors[:, i], ax, i, label=\"\", show_median=show_median)\n\n for ax in fig.get_axes():\n ax.label_outer()\n if no_text:\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n suffix = \"\" if no_text else \"_with_text\"\n plt.savefig(os.path.join(out_dir, f\"mode_error_{name}_{suffix}.svg\"), bbox_inches='tight', pad_inches=0)\n\n\n\nif __name__ == \"__main__\":\n\n modes_errors_path = sys.argv[1]\n\n modes_errors = np.loadtxt(modes_errors_path)\n\n plot_all_modes(modes_errors, out_dir=\".\", no_text=True)\n\n plot_all_modes(modes_errors, out_dir=\".\", no_text=False)\n", "id": "1829035", "language": "Python", "matching_score": 3.1636250019073486, "max_stars_count": 0, "path": "fig5/mode_error.py" }, { "content": "import os\nimport shutil\nimport urllib.request\nimport sys\nimport numpy as np\nfrom wormpose.pose.eigenworms import load_eigenworms_matrix, theta_to_modes\n\nfrom mode_error import plot_all_modes\n\n\ndef convert_to_modes(theta_path):\n\n thetas = np.loadtxt(theta_path)\n\n eigenworms_matrix_path = \"EigenWorms.csv\"\n if not os.path.isfile(eigenworms_matrix_path):\n urllib.request.urlretrieve(\n \"https://raw.githubusercontent.com/iteal/wormpose/master/extras/EigenWorms.csv\", filename=\"EigenWorms.csv\"\n )\n\n eigenworms_matrix = load_eigenworms_matrix(eigenworms_matrix_path)\n\n all_modes = []\n for t in thetas:\n modes = theta_to_modes(t, eigenworms_matrix)\n all_modes.append(modes)\n\n return np.array(all_modes)\n\n\ndef save_mode_error(mode_error, out_dir, name):\n print(name, mode_error.shape)\n median = np.median(mode_error, axis=0)\n print(\"median mode error \", median[:4])\n\n np.savetxt(os.path.join(out_dir, \"modes_{}_error.txt\".format(name)), mode_error)\n\n\nif __name__ == \"__main__\":\n\n theta_labels = sys.argv[1]\n theta_predictions = sys.argv[2]\n\n outdir = \"fig5_mode_error\"\n if os.path.exists(outdir):\n shutil.rmtree(outdir)\n\n os.mkdir(outdir)\n\n modes_labels = convert_to_modes(theta_labels)\n modes_predictions = convert_to_modes(theta_predictions)\n\n coiled = np.abs(modes_labels[:, 2]) > 15\n uncoiled = ~coiled\n\n mode_error = np.abs(modes_labels - modes_predictions)\n\n tosave = {'all': mode_error,\n 'coiled': mode_error[coiled],\n 'uncoiled': mode_error[uncoiled]}\n show_median = {'all': False, 'coiled': True, 'uncoiled': True}\n\n for name, vals in tosave.items():\n save_mode_error(vals, out_dir=outdir, name=name)\n plot_all_modes(vals, out_dir=outdir, name=name, no_text=False, show_median=show_median[name])\n plot_all_modes(vals, out_dir=outdir, name=name, no_text=True, show_median=show_median[name])\n\n", "id": "2208327", "language": "Python", "matching_score": 3.1316001415252686, "max_stars_count": 0, "path": "fig5/fig5_all_mode_error.py" }, { "content": "import urllib.request\nimport os\nimport cv2\nimport numpy as np\nfrom wormpose.pose.eigenworms import load_eigenworms_matrix, modes_to_theta\nfrom wormpose.pose.centerline import calculate_skeleton\nfrom wormpose.images.worm_drawing import make_draw_worm_body\n\n\nif __name__ == \"__main__\":\n\n eigenworms_matrix_path = \"EigenWorms.csv\"\n if not os.path.isfile(eigenworms_matrix_path):\n urllib.request.urlretrieve(\n \"https://raw.githubusercontent.com/iteal/wormpose/master/extras/EigenWorms.csv\", filename=\"EigenWorms.csv\"\n )\n\n NUM_MODES = 4\n MODE_ERROR = 1.0\n REFERENCE = np.zeros(NUM_MODES)\n\n WORN_LENGTH = 1000\n\n THICKNESS = np.array(\n [\n 0.02410615,\n 0.02648045,\n 0.02832402,\n 0.02977654,\n 0.03114525,\n 0.03231844,\n 0.03340782,\n 0.03444134,\n 0.03527933,\n 0.03606145,\n 0.03673184,\n 0.03748603,\n 0.03804469,\n 0.03851955,\n 0.03893855,\n 0.03932961,\n 0.0396648,\n 0.03994413,\n 0.0400838,\n 0.0403352,\n 0.04055866,\n 0.04083799,\n 0.04111732,\n 0.04125698,\n 0.04150838,\n 0.04173184,\n 0.04209497,\n 0.04234637,\n 0.04256983,\n 0.04268156,\n 0.04276536,\n 0.04273743,\n 0.04287709,\n 0.04290503,\n 0.04293296,\n 0.04293296,\n 0.04307263,\n 0.04332402,\n 0.04346369,\n 0.04343575,\n 0.04346369,\n 0.04377095,\n 0.04413408,\n 0.04435754,\n 0.04458101,\n 0.04469274,\n 0.04472067,\n 0.04480447,\n 0.0448324,\n 0.04486034,\n 0.04488827,\n 0.04480447,\n 0.0447486,\n 0.0447486,\n 0.0448324,\n 0.04477654,\n 0.04469274,\n 0.04458101,\n 0.04452514,\n 0.04449721,\n 0.04435754,\n 0.04421788,\n 0.04421788,\n 0.04424581,\n 0.04407821,\n 0.04402235,\n 0.04391061,\n 0.04382682,\n 0.04360335,\n 0.04332402,\n 0.04304469,\n 0.04276536,\n 0.04256983,\n 0.04243017,\n 0.04217877,\n 0.04198324,\n 0.04156425,\n 0.04106145,\n 0.04050279,\n 0.0400838,\n 0.03969274,\n 0.03913408,\n 0.03871508,\n 0.03812849,\n 0.03756983,\n 0.03706704,\n 0.03667598,\n 0.03608939,\n 0.03539106,\n 0.03458101,\n 0.03357542,\n 0.03251397,\n 0.03122905,\n 0.0299162,\n 0.02832402,\n 0.02678771,\n 0.02502793,\n 0.02312849,\n 0.02075419,\n 0.01790503,\n ]\n )\n\n eigenworms_matrix = load_eigenworms_matrix(eigenworms_matrix_path)\n\n draw_worm_body = make_draw_worm_body(body_color=51)\n\n thickness = THICKNESS * WORN_LENGTH\n\n output_image_shape = (WORN_LENGTH + 100, WORN_LENGTH + 100)\n\n for i in range(NUM_MODES):\n modes = REFERENCE.copy()\n modes[i] = MODE_ERROR\n theta = modes_to_theta(modes, eigenworms_matrix)\n skeleton = calculate_skeleton(theta, worm_length=WORN_LENGTH, canvas_width_height=output_image_shape)\n\n im = np.zeros(output_image_shape, dtype=np.uint8)\n draw_worm_body(\n thickness, im, skeleton=skeleton,\n )\n\n bg = im == 0\n\n im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGRA)\n im[:, :, -1][bg] = 0\n cv2.imwrite(f\"{i}_mode.png\", im)\n", "id": "3765200", "language": "Python", "matching_score": 0.9643124938011169, "max_stars_count": 0, "path": "fig5/draw_mode_error_worm.py" }, { "content": "import os, glob\nimport cv2\nimport numpy as np\n\n\nimport shutil\n\nfrom wormpose.dataset.image_processing.simple_frame_preprocessing import SimpleFramePreprocessing\n\n\ndef make_images(root_path: str):\n\n out_dir = \"out\"\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n os.mkdir(out_dir)\n\n files = list(sorted(glob.glob(os.path.join(root_path, \"*.png\"))))\n\n sp = SimpleFramePreprocessing()\n out_shape = np.array((150, 150))\n\n for f in files:\n im = cv2.imread(f)\n\n segmentation_mask, _ = sp.process(cv2.cvtColor(im, cv2.COLOR_BGR2GRAY))\n im[segmentation_mask == 0] = 0\n\n where_worm = np.where(segmentation_mask != 0)\n worm_roi = np.s_[\n np.min(where_worm[0]) : np.max(where_worm[0]), np.min(where_worm[1]) : np.max(where_worm[1]),\n ]\n center = (\n (worm_roi[0].start + worm_roi[0].stop) // 2,\n (worm_roi[1].start + worm_roi[1].stop) // 2,\n )\n\n top_left = (center - out_shape / 2).astype(int)\n bottom_right = (center + out_shape / 2).astype(int)\n\n roi_coord = np.s_[top_left[0] : bottom_right[0], top_left[1] : bottom_right[1]]\n\n im_seg = im[roi_coord]\n img_path = os.path.join(out_dir, os.path.basename(f))\n\n only_worm = (im_seg[:,:,0] == im_seg[:,:,1]) & (im_seg[:,:,1] == im_seg[:,:,2]) & (im_seg[:,:,0] > 0)\n\n worm_colors = im_seg[only_worm]\n\n min_val = np.min(worm_colors[worm_colors>0])\n max_val = np.max(worm_colors[worm_colors>0])\n\n spread_histo = ((im_seg[:,:,0].astype(float) - min_val)/(max_val - min_val)*200 + 50).astype(np.uint8)\n im_seg[:,:,0][only_worm] = spread_histo[only_worm]\n im_seg[:, :, 1][only_worm] = spread_histo[only_worm]\n im_seg[:, :, 2][only_worm] = spread_histo[only_worm]\n\n cv2.imwrite(os.path.join(out_dir, os.path.basename(f)), im_seg)\n\n cmd = f\"convert \\\"{img_path}\\\" -transparent black \\\"{img_path}\\\"\"\n #print(cmd)\n os.system(cmd)\n\n \n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"root_path\", type=str, help=\"Root folder of results images\")\n args = parser.parse_args()\n\n make_images(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "10671843", "language": "Python", "matching_score": 6.184239387512207, "max_stars_count": 0, "path": "suppl comparison/compare_viz/make_images.py" }, { "content": "import os, glob\nimport cv2\nimport numpy as np\nimport tempfile\n\nimport shutil\n\nfrom wormpose.dataset.image_processing.simple_frame_preprocessing import SimpleFramePreprocessing\n\n\ndef make_gif(root_path: str, fps: int, rescale: float):\n\n temp_d = tempfile.mkdtemp()\n\n files = list(sorted(glob.glob(os.path.join(root_path, \"*.png\"))))\n\n sp = SimpleFramePreprocessing()\n out_shape = np.array((200, 200))\n\n for f in files:\n im = cv2.imread(f)\n\n if rescale != 1.:\n im = cv2.resize(im, None, fx=rescale, fy=rescale)\n\n segmentation_mask, _ = sp.process(cv2.cvtColor(im, cv2.COLOR_BGR2GRAY))\n\n where_worm = np.where(segmentation_mask != 0)\n worm_roi = np.s_[\n np.min(where_worm[0]) : np.max(where_worm[0]), np.min(where_worm[1]) : np.max(where_worm[1]),\n ]\n center = (\n (worm_roi[0].start + worm_roi[0].stop) // 2,\n (worm_roi[1].start + worm_roi[1].stop) // 2,\n )\n\n top_left = (center - out_shape / 2).astype(int)\n bottom_right = (center + out_shape / 2).astype(int)\n\n roi_coord = np.s_[top_left[0] : bottom_right[0], top_left[1] : bottom_right[1]]\n\n im_seg = im[roi_coord]\n cv2.imwrite(os.path.join(temp_d, os.path.basename(f)), im_seg)\n\n delay = int(100 / float(fps))\n cmd = f\"convert -delay {delay} {temp_d}/*.png movie.gif\"\n print(cmd)\n os.system(cmd)\n\n shutil.rmtree(temp_d)\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"root_path\", type=str, help=\"Root folder of results images\")\n parser.add_argument(\"--fps\", type=int, help=\"frames per sec\", default=20)\n parser.add_argument(\"--rescale\", type=float, help=\"resize\", default=1)\n args = parser.parse_args()\n\n make_gif(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "6419769", "language": "Python", "matching_score": 0.44998258352279663, "max_stars_count": 0, "path": "extras/animations/make_gif.py" }, { "content": "\"\"\"\nImplementation of BaseFeaturesDataset to load Tierpsy tracker features\n\"\"\"\n\nimport glob\nimport json\nimport os\n\nimport h5py\nimport numpy as np\nfrom xml.dom import minidom\n\nfrom wormpose.dataset.base_dataset import BaseFeaturesDataset\n\n\ndef get_frames_timestamp(f):\n frames_timestamp = f[\"timestamp\"][\"raw\"][:]\n\n # check if the timestamp is valid (not all NaN)\n if np.all(np.isnan(frames_timestamp)):\n raise ValueError(\n \"Timestamp is invalid (field timestamp/raw), \"\n \"Please check that you selected the option 'extract_timestamp'\"\n \" in Tierpsy, or refer to the Tierpsy documentation.\"\n )\n\n return frames_timestamp\n\n\ndef get_frame_rate(f):\n mask_attrs = f[\"mask\"].attrs\n if \"fps\" in mask_attrs:\n frame_rate = mask_attrs[\"fps\"]\n elif \"expected_fps\" in mask_attrs:\n frame_rate = mask_attrs[\"expected_fps\"]\n else:\n frame_rate = 1\n return frame_rate\n\n\ndef get_ventral_side(f):\n # Trying to get the ventral side attribute (optional)\n try:\n ventral_side = json.loads(f[\"experiment_info\"][()].decode(\"utf-8\"))[\"ventral_side\"]\n except:\n ventral_side = None\n return ventral_side\n\n\ndef get_stage_position(f):\n len_frames = len(f[\"mask\"])\n try:\n stage_position_pix = f[\"stage_position_pix\"][:]\n except:\n stage_position_pix = np.zeros((len_frames, 2), dtype=float)\n return stage_position_pix\n\n\ndef get_ratio_microns_pixels(f):\n try:\n xml_info = f[\"xml_info\"][()]\n xml = minidom.parseString(xml_info)\n microns = xml.getElementsByTagName(\"microns\")[0].getElementsByTagName(\"x\")[0].firstChild.nodeValue\n pixels = xml.getElementsByTagName(\"pixels\")[0].getElementsByTagName(\"x\")[0].firstChild.nodeValue\n ratio_microns_pixel = abs(float(microns)) / abs(float(pixels))\n except:\n ratio_microns_pixel = 1.0\n\n return ratio_microns_pixel\n\n\ndef _match_indexes(x, y):\n sorted_index = np.searchsorted(x, y)\n yindex = np.take(np.arange(len(x)), sorted_index, mode=\"clip\")\n mask = x[yindex] != y\n result = np.ma.array(yindex, mask=mask)\n return result\n\n\ndef _get_width_measurement(name, ratio_microns_pixel, features_timeseries):\n feature_names = features_timeseries.dtype.names\n measurement_name = list(filter(lambda x: name in x and \"width\" in x, feature_names))[0]\n measurement = features_timeseries[measurement_name] * ratio_microns_pixel\n return measurement\n\n\ndef get_skeletons_timestamp(features_f, skeletons, features_timestamp):\n if \"trajectories_data\" in features_f:\n\n trajectories_data = features_f.get(\"trajectories_data\")\n dt = [(\"skeleton_id\", int)]\n with trajectories_data.astype(dt):\n skeletons_id = trajectories_data[\"skeleton_id\"][:]\n\n skeletons_timestamp = np.zeros(len(skeletons), dtype=int)\n for i in range(len(skeletons)):\n skeletons_timestamp[i] = np.where(skeletons_id == i)[0][0]\n else:\n skeletons_timestamp = features_timestamp\n\n return skeletons_timestamp\n\n\ndef _resample(series, cur_timestamp, new_timestamp):\n len_new = len(new_timestamp)\n resampled_series = np.full((len_new,) + series.shape[1:], np.nan, dtype=series.dtype)\n\n matched_indexes = _match_indexes(cur_timestamp, new_timestamp)\n\n for index in range(len_new):\n\n if not matched_indexes.mask[index]:\n resampled_series[index] = series[matched_indexes[index]]\n\n return resampled_series\n\n\ndef get_features_filename(root_dir: str, name: str):\n \"\"\"\n The features filename has different formats:\n ex: videoname_features.hdf5 or sometimes videoname_featuresN.hdf5\n \"\"\"\n return glob.glob(os.path.join(root_dir, name, name + \"*features*.hdf5\"))[0]\n\n\ndef get_frames_filename(root_dir: str, name: str):\n return os.path.join(root_dir, name, name + \".hdf5\")\n\n\ndef _read_features(root_dir, name):\n h5file = get_frames_filename(root_dir, name)\n h5featurefile = get_features_filename(root_dir, name)\n\n with h5py.File(h5file, \"r\") as f:\n frames_timestamp = get_frames_timestamp(f)\n frame_rate = get_frame_rate(f)\n ventral_side = get_ventral_side(f)\n stage_position_pix = get_stage_position(f)\n ratio_microns_pixel = get_ratio_microns_pixels(f)\n\n with h5py.File(h5featurefile, \"r\") as f:\n skeletons = f[\"coordinates\"][\"skeletons\"][:]\n features_timeseries = get_features_timeseries(f)\n features_timestamp = features_timeseries[\"timestamp\"].astype(int)\n skeletons_timestamp = get_skeletons_timestamp(f, skeletons, features_timestamp)\n\n head_width = _get_width_measurement(\"head\", ratio_microns_pixel, features_timeseries)\n midbody_width = _get_width_measurement(\"midbody\", ratio_microns_pixel, features_timeseries)\n tail_width = _get_width_measurement(\"tail\", ratio_microns_pixel, features_timeseries)\n measurements = np.stack([head_width, midbody_width, tail_width], axis=1)\n\n measurements = _resample(measurements, cur_timestamp=features_timestamp, new_timestamp=frames_timestamp)\n skeletons = _resample(skeletons, cur_timestamp=skeletons_timestamp, new_timestamp=frames_timestamp)\n\n # convert skeletons coordinates from microns to pixels\n skeletons = skeletons * ratio_microns_pixel - stage_position_pix[:, np.newaxis, :]\n\n head_width = measurements[:, 0]\n midbody_width = measurements[:, 1]\n tail_width = measurements[:, 2]\n\n return {\n \"skeletons\": skeletons,\n \"head_width\": head_width,\n \"midbody_width\": midbody_width,\n \"tail_width\": tail_width,\n \"frame_rate\": frame_rate,\n \"ventral_side\": ventral_side,\n \"timestamp\": frames_timestamp,\n }\n\n\ndef get_features_timeseries(f):\n features_timeseries = f[list(filter(lambda x: \"timeseries\" in x, f))[0]][:]\n # only use one worm (smallest index)\n worm_indexes = features_timeseries[\"worm_index\"]\n worm_index = np.min(worm_indexes)\n cur_worm = np.where(worm_indexes == worm_index)[0]\n features_timeseries = features_timeseries[cur_worm]\n return features_timeseries\n\n\nclass FeaturesDataset(BaseFeaturesDataset):\n def __init__(self, dataset_path, video_names):\n self._features = {}\n for video_name in video_names:\n self._features[video_name] = _read_features(dataset_path, video_name)\n\n def get_features(self, video_name):\n return self._features[video_name]\n", "id": "2482309", "language": "Python", "matching_score": 3.4058327674865723, "max_stars_count": 29, "path": "wormpose/dataset/loaders/tierpsy/features_dataset.py" }, { "content": "\"\"\"\nThis module contains the WormPose API: abstract classes to subclass in order to add a custom dataset\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom typing import List, Tuple\nimport numpy as np\n\n\nclass BaseFramePreprocessing(ABC):\n \"\"\"\n Specific image processing logic to isolate the worm in the dataset images\n This object must be pickable (no inner functions for example)\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n pass\n\n @abstractmethod\n def process(self, frame: np.ndarray) -> Tuple[np.ndarray, int]:\n \"\"\"\n Segment the worm object of interest in the image: returns a mask image of the same shape as frame,\n where the pixels belong to the worm object of interest are 1, and all the others are 0\n Also calculates the average value of the background pixels.\n\n :param frame: image to process\n :return: Segmentation mask image , background color\n \"\"\"\n pass\n\n\nclass BaseFramesDataset(ABC):\n \"\"\"\n Specific code to the dataset to access the frames only.\n A dataset is divided into several \"videos\". Each video contain a list of images or \"frames\".\n \"\"\"\n\n @abstractmethod\n def video_names(self) -> List[str]:\n \"\"\"\n A dataset is composed of several videos\n\n :return: A list of unique ids (string) identifying a video in the dataset\n \"\"\"\n pass\n\n @abstractmethod\n def open(self, video_name: str):\n \"\"\"\n The frames of the dataset are accessed trough a context manager object, in this way we have the option of not\n entirely loading a big image array in memory if possible\n\n :param video_name: One video unique id (should be one value of video_names())\n :return: A context manager object that can be used with the \"with\" python statement giving access to the\n frames (array of images) of the dataset\n Example use : frames = open(\"video0\")\n \"\"\"\n pass\n\n\nclass BaseFeaturesDataset(ABC):\n \"\"\"\n Specific code to the dataset to access the features for each video of the dataset\n \"\"\"\n\n @abstractmethod\n def get_features(self, video_name: str) -> dict:\n \"\"\"\n Returns a dictionary of features\n\n :return: dictionary with keys: skeletons, head_width, midbody_width, tail_width, frame_rate, ventral_side, timestamp\n WHERE\n skeletons: Coordinates x y of the centerline for each frame in pixel coordinates,\n a numpy floating point array of shape (N number of frames, J number of joints, 2)\n The quality of the synthetic images will start degrading when J < 50, consider interpolating if less joints\n head_width: numpy floating point array of shape N\n midbody_width: numpy floating point array of shape N\n tail_width: numpy floating point array of shape N\n frame_rate: One float number for the frame rate of the video.\n ventral_side: Optional One string value for the entire video. \\'clockwise\\' or \\'anticlockwise\\'. If None, defaults to anticlockwise\n timestamp: Optional Timestamp of each frame, a numpy array of shape (N number of frames). If None, will consider each frame to be equidistant in time\n \"\"\"\n pass\n\n\nclass BaseResultsExporter(ABC):\n \"\"\"\n Optional Results Exporter\n \"\"\"\n\n @abstractmethod\n def export(self, video_name: str, **kwargs):\n pass\n", "id": "8943376", "language": "Python", "matching_score": 0.5775814056396484, "max_stars_count": 29, "path": "wormpose/dataset/base_dataset.py" }, { "content": "import glob, os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef _ecdf_helper(ax, data, weights, swapaxes, label):\n data = np.asarray(data)\n mask = ~np.isnan(data)\n data = data[mask]\n sort = np.argsort(data)\n data = data[sort]\n if weights is None:\n # Ensure that we end at exactly 1, avoiding floating point errors.\n cweights = (1 + np.arange(len(data))) / len(data)\n else:\n weights = weights[mask]\n weights = weights / np.sum(weights)\n weights = weights[sort]\n cweights = np.cumsum(weights)\n if not swapaxes:\n if not ax.get_ylabel():\n ax.set_ylabel(label)\n else:\n if not ax.get_xlabel():\n ax.set_xlabel(label)\n return data, cweights\n\n\ndef plot_ecdf(ax, data, *, weights=None, swapaxes=False, **kwargs):\n \"\"\"Plot an empirical cumulative distribution function.\"\"\"\n data, cweights = _ecdf_helper(ax, data, weights, swapaxes, \"\")\n if not swapaxes:\n return ax.plot([data[0], *data], [0, *cweights], drawstyle=\"steps-post\", **kwargs)[0]\n else:\n return ax.plot([0, *cweights], [data[0], *data], drawstyle=\"steps-pre\", **kwargs)[0]\n\n\ndef plot_eccdf(ax, data, *, weights=None, swapaxes=False, **kwargs):\n \"\"\"Plot an empirical, complementary cumulative distribution function.\"\"\"\n data, cweights = _ecdf_helper(ax, data, weights, swapaxes, \"\")\n if not swapaxes:\n return ax.plot([*data, data[-1]], [1, *1 - cweights], drawstyle=\"steps-pre\", **kwargs)[0]\n else:\n return ax.plot([1, *1 - cweights], [*data, data[-1]], drawstyle=\"steps-post\", **kwargs)[0]\n\n\ndef make_ecdf(filenames, out_name, remove_all_labels):\n\n plt.clf()\n fig, ax = plt.subplots()\n\n for path in filenames:\n name = os.path.basename(path)[: -len(\"_score.txt\")]\n print(name)\n scores = np.loadtxt(path)\n\n errors = 1 - scores\n\n label = \" \" if remove_all_labels else name\n plot_ecdf(ax, errors, swapaxes=False, label=label, color=\"black\" if \"synth\" in name else None)\n\n frame1 = plt.gca()\n\n if remove_all_labels:\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n\n frame1.axes.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])\n\n plt.legend(loc=\"upper right\", bbox_to_anchor=(0.99, 0.85))\n plt.savefig(out_name, bbox_inches=\"tight\")\n plt.show()\n\n\ndef ecdf(sample):\n\n # convert sample to a numpy array, if it isn't already\n sample = np.atleast_1d(sample)\n\n # find the unique values and their corresponding counts\n quantiles, counts = np.unique(sample, return_counts=True)\n\n # take the cumulative sum of the counts and divide by the sample size to\n # get the cumulative probabilities between 0 and 1\n cumprob = np.cumsum(counts).astype(np.double) / sample.size\n\n return quantiles, cumprob\n\n\nif __name__ == \"__main__\":\n\n make_ecdf(sorted(glob.glob(os.path.join(\"data\", \"*.txt\"))), \"all.svg\", remove_all_labels=True)\n make_ecdf(sorted(glob.glob(os.path.join(\"data\", \"*.txt\"))), \"withlabels_all.svg\", remove_all_labels=False)\n", "id": "3796913", "language": "Python", "matching_score": 0.9796387553215027, "max_stars_count": 0, "path": "suppl comparison/A_image_error/make_error_plot.py" }, { "content": "import numpy as np\nimport h5py\nimport os\n\nfrom wormpose.dataset.loader import load_dataset\n\ndef save_results(dataset_loader, dataset_path, results_root_dir):\n\n dataset = load_dataset(dataset_loader, dataset_path)\n\n all_scores = []\n all_theta = []\n\n for video_name in sorted(os.listdir(results_root_dir)):\n results_file = os.path.join(results_root_dir, video_name, \"results.h5\")\n \n features = dataset.features_dataset[video_name]\n timestamp = features.timestamp\n\n with h5py.File(results_file, \"r\") as f:\n scores = f[\"unaligned\"][\"scores\"][:] \n thetas = f[\"unaligned\"][\"theta\"][:] \n max_scores = np.argmax(scores, axis=1)\n results_scores = scores[np.arange(scores.shape[0]), max_scores]\n results_theta = thetas[np.arange(thetas.shape[0]), max_scores]\n\n non_resampled_scores = []\n non_resampled_theta= []\n for cur_time, (score, theta) in enumerate(zip(results_scores, results_theta)):\n\n frame_index = np.where(timestamp == cur_time)[0]\n if len(frame_index) == 0:\n continue\n cur_frame_index = frame_index[0]\n non_resampled_scores.append(score)\n non_resampled_theta.append(theta)\n\n all_scores.append(non_resampled_scores)\n all_theta.append(non_resampled_theta)\n\n print(video_name, len(non_resampled_scores))\n\n all_scores = np.concatenate(all_scores)\n all_theta = np.concatenate(all_theta)\n print(len(all_scores))\n\n np.savetxt(\"all_scores.txt\", all_scores)\n np.save(\"all_theta.npy\", all_theta)\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset_loader\", type=str, help=\"Dataset loader (tierpsy or other)\") \n parser.add_argument(\"dataset_path\", type=str, help=\"root path of a wormpose Dataset\") \n parser.add_argument(\"results_root_dir\", type=str, help=\"Root folder where to find wormpose results\") \n args = parser.parse_args()\n\n save_results(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "3254863", "language": "Python", "matching_score": 3.3616573810577393, "max_stars_count": 0, "path": "fig5/assemble_results_scores.py" }, { "content": "#!/usr/bin/env python\n\n\"\"\"\nCalculates the image similarity on a random selection of labeled frames from a dataset.\n\"\"\"\n\nimport logging\nimport os\nimport random\nfrom argparse import Namespace\nfrom typing import Tuple\n\nimport h5py\nimport numpy as np\n\nfrom wormpose.commands import _log_parameters\nfrom wormpose.config import default_paths\nfrom wormpose.dataset import Dataset\nfrom wormpose.dataset.image_processing.options import add_image_processing_arguments\nfrom wormpose.dataset.loader import get_dataset_name\nfrom wormpose.dataset.loader import load_dataset\nfrom wormpose.images.scoring.centerline_accuracy_check import CenterlineAccuracyCheck\nfrom wormpose.pose.centerline import skeletons_to_angles\nfrom wormpose.dataset.loaders.resizer import add_resizing_arguments, ResizeOptions\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass _ScoresWriter(object):\n def __init__(self):\n self.all_scores = []\n\n def add(self, kwargs):\n self.all_scores.append(kwargs[\"score\"])\n\n def write(self, results_file):\n with h5py.File(results_file, \"a\") as f:\n f.create_dataset(\"scores\", data=self.all_scores)\n\n\nclass _ImagesAndScoresWriter(_ScoresWriter):\n def __init__(self):\n self.all_synth = []\n self.all_real = []\n super().__init__()\n\n def add(self, kwargs):\n centerline_accuracy: CenterlineAccuracyCheck = kwargs[\"centerline_accuracy\"]\n self.all_real.append(np.array(centerline_accuracy.last_real_image))\n self.all_synth.append(np.array(centerline_accuracy.last_synth_image))\n super().add(kwargs)\n\n def write(self, results_file):\n with h5py.File(results_file, \"a\") as f:\n f.create_dataset(\"real_images\", data=self.all_real)\n f.create_dataset(\"synth_images\", data=self.all_synth)\n super().write(results_file)\n\n\nclass _Calibrator(object):\n def __init__(\n self,\n dataset: Dataset,\n results_dir: str,\n image_shape: Tuple[int, int],\n num_samples: int,\n theta_dims: int,\n ):\n self.dataset = dataset\n self.results_dir = results_dir\n self.image_shape = image_shape\n self.num_samples = num_samples\n self.theta_dims = theta_dims\n\n def __call__(self, video_name: str, writer: _ScoresWriter):\n \"\"\"\n Evaluate image metric score on labelled frames.\n \"\"\"\n features = self.dataset.features_dataset[video_name]\n labelled_thetas = skeletons_to_angles(features.skeletons, theta_dims=self.theta_dims)\n labelled_indexes = features.labelled_indexes\n\n centerline_accuracy = CenterlineAccuracyCheck(\n frame_preprocessing=self.dataset.frame_preprocessing,\n image_shape=self.image_shape,\n )\n\n with self.dataset.frames_dataset.open(video_name) as frames:\n frames_amount = min(self.num_samples, len(labelled_indexes))\n\n random_label_index = np.random.choice(labelled_indexes, frames_amount, replace=False)\n thetas = labelled_thetas[random_label_index]\n\n for theta, index in zip(thetas, random_label_index):\n cur_frame = frames[index]\n\n score, _ = centerline_accuracy(\n theta=theta,\n template_skeleton=features.skeletons[index],\n template_measurements=features.measurements,\n template_frame=cur_frame,\n real_frame_orig=cur_frame,\n )\n writer.add(locals())\n\n results_file = os.path.join(self.results_dir, video_name + \"_calibration.h5\")\n if os.path.exists(results_file):\n os.remove(results_file)\n writer.write(results_file=results_file)\n\n logger.info(\n f\"Evaluated known skeletons reconstruction for {video_name},\"\n f\" average score {np.mean(writer.all_scores):.4f}\"\n )\n return results_file\n\n\ndef _parse_arguments(kwargs: dict):\n if kwargs.get(\"num_samples\") is None:\n kwargs[\"num_samples\"] = 500\n if kwargs.get(\"work_dir\") is None:\n kwargs[\"work_dir\"] = default_paths.WORK_DIR\n if kwargs.get(\"theta_dims\") is None:\n kwargs[\"theta_dims\"] = 100\n if kwargs.get(\"video_names\") is None:\n kwargs[\"video_names\"] = None\n if kwargs.get(\"save_images\") is None:\n kwargs[\"save_images\"] = False\n if kwargs.get(\"random_seed\") is None:\n kwargs[\"random_seed\"] = None\n kwargs[\"resize_options\"] = ResizeOptions(**kwargs)\n\n _log_parameters(logger.info, kwargs)\n return Namespace(**kwargs)\n\n\ndef calibrate(dataset_loader: str, dataset_path: str, **kwargs):\n \"\"\"\n Calculate the image score for a certain number of labelled frames in the dataset,\n this will give an indication on choosing the image similarity threshold when predicting all frames in the dataset.\n\n :param dataset_loader: Name of the dataset loader, for example \"tierpsy\"\n :param dataset_path: Root path of the dataset containing videos of worm\n \"\"\"\n _log_parameters(logger.info, {\"dataset_loader\": dataset_loader, \"dataset_path\": dataset_path})\n args = _parse_arguments(kwargs)\n\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n\n dataset_name = get_dataset_name(dataset_path)\n experiment_dir = os.path.join(args.work_dir, dataset_name)\n calibration_results_dir = os.path.join(experiment_dir, default_paths.CALIBRATION_RESULTS_DIR)\n os.makedirs(calibration_results_dir, exist_ok=True)\n\n dataset = load_dataset(\n dataset_loader,\n dataset_path,\n selected_video_names=args.video_names,\n **vars(args),\n )\n\n calibrator = _Calibrator(\n dataset=dataset,\n results_dir=calibration_results_dir,\n image_shape=dataset.image_shape,\n num_samples=args.num_samples,\n theta_dims=args.theta_dims,\n )\n\n writer = _ImagesAndScoresWriter() if kwargs[\"save_images\"] else _ScoresWriter()\n\n for video_name in dataset.video_names:\n results_file = calibrator(video_name=video_name, writer=writer)\n yield video_name, results_file\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"dataset_loader\", type=str)\n parser.add_argument(\"dataset_path\", type=str)\n parser.add_argument(\n \"--video_names\",\n type=str,\n nargs=\"+\",\n help=\"Only evaluate using a subset of videos. \" \"If not set, will include all videos in dataset_path.\",\n )\n parser.add_argument(\n \"--num_samples\",\n type=int,\n help=\"How many frames to perform the calibration in order to evaluate the image metric\",\n )\n parser.add_argument(\"--theta_dims\", type=int)\n parser.add_argument(\"--work_dir\", type=str, help=\"Root folder for all experiments\")\n parser.add_argument(\n \"--save_images\", default=False, action=\"store_true\", help=\"Also save the images used for the calibration\"\n )\n parser.add_argument(\"--random_seed\", type=int, help=\"Optional random seed for deterministic results\")\n add_resizing_arguments(parser)\n add_image_processing_arguments(parser)\n\n args = parser.parse_args()\n\n list(calibrate(**vars(args)))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "2645203", "language": "Python", "matching_score": 4.75614595413208, "max_stars_count": 29, "path": "wormpose/commands/calibrate_dataset.py" }, { "content": "#!/usr/bin/env python\n\n\"\"\"\nVisualizer for the synthetic images\n\"\"\"\n\nimport random\nfrom typing import Optional, Generator\nimport numpy as np\n\nfrom wormpose.dataset.image_processing.options import add_image_processing_arguments\nfrom wormpose.dataset.loader import load_dataset\nfrom wormpose.dataset.loaders.resizer import add_resizing_arguments, ResizeOptions\nfrom wormpose.images.synthetic import SyntheticDataset\nfrom wormpose.pose.postures_model import PosturesModel\n\n\nclass SyntheticSimpleVisualizer(object):\n \"\"\"\n Utility class to visualize the synthetic images\n \"\"\"\n\n def __init__(\n self,\n dataset_loader: str,\n dataset_path: str,\n postures_generator: Optional[Generator] = None,\n video_name: str = None,\n **kwargs\n ):\n resize_options = ResizeOptions(**kwargs)\n dataset = load_dataset(dataset_loader, dataset_path, resize_options=resize_options, **kwargs)\n\n if postures_generator is None:\n postures_generator = PosturesModel().generate()\n if video_name is None:\n video_name = dataset.video_names[0]\n\n features = dataset.features_dataset[video_name]\n self.skeletons = features.skeletons\n self.measurements = features.measurements\n\n self.output_image_shape = dataset.image_shape\n\n self.synthetic_dataset = SyntheticDataset(\n frame_preprocessing=dataset.frame_preprocessing,\n output_image_shape=self.output_image_shape,\n enable_random_augmentations=False,\n )\n skel_is_not_nan = ~np.any(np.isnan(self.skeletons), axis=(1, 2))\n self.labelled_indexes = np.where(skel_is_not_nan)[0]\n if len(self.labelled_indexes) == 0:\n raise ValueError(\"No template frames found in the dataset, can't generate synthetic images.\")\n self.frames_dataset = dataset.frames_dataset\n self.video_name = video_name\n self.postures_generator = postures_generator\n\n def generate(self):\n out_image = np.empty(self.output_image_shape, dtype=np.uint8)\n\n with self.frames_dataset.open(self.video_name) as frames:\n while True:\n theta = next(self.postures_generator)\n random_label_index = np.random.choice(self.labelled_indexes)\n self.synthetic_dataset.generate(\n theta=theta,\n template_skeleton=self.skeletons[random_label_index],\n template_frame=frames[random_label_index],\n out_image=out_image,\n template_measurements=self.measurements,\n )\n yield out_image, theta\n\n\ndef main():\n import argparse\n import cv2\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset_loader\", type=str)\n parser.add_argument(\"dataset_path\", type=str)\n parser.add_argument(\"--video_name\", type=str)\n parser.add_argument(\"--random_seed\", type=int, help=\"Optional random seed for deterministic results\")\n add_resizing_arguments(parser)\n add_image_processing_arguments(parser)\n\n args = parser.parse_args()\n\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n\n synth_visualizer_gen = SyntheticSimpleVisualizer(**vars(args)).generate()\n\n while True:\n synth_image, _ = next(synth_visualizer_gen)\n\n cv2.imshow(\"synth_image\", synth_image)\n cv2.waitKey()\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "6260854", "language": "Python", "matching_score": 2.9625041484832764, "max_stars_count": 29, "path": "wormpose/demo/synthetic_simple_visualizer.py" }, { "content": "import shutil\nimport tempfile\n\nimport numpy as np\nimport os\nimport cv2\n\nfrom wormpose.dataset import load_dataset\nfrom wormpose.dataset.loaders.resizer import ResizeOptions\nfrom wormpose.images.scoring import ScoringDataManager, ResultsScoring\nfrom wormpose.images.worm_drawing import draw_skeleton\nfrom wormpose.pose.eigenworms import load_eigenworms_matrix, modes_to_theta\nfrom wormpose.pose.results_datatypes import BaseResults\n\n\ndef export_as_images(dataset_loader: str,\n dataset_path: str,\n video_name: str,\n results_path: str,\n eigenworms_matrix_path: str,\n out_dir: str,\n num_process,\n temp_dir,\n image_size=128):\n if out_dir is None:\n out_dir = \"out\"\n if num_process is None:\n num_process = os.cpu_count()\n if temp_dir is None:\n temp_dir = tempfile.gettempdir()\n temp_dir = tempfile.mkdtemp(dir=temp_dir)\n\n out_dir = os.path.join(out_dir, video_name)\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n os.makedirs(out_dir, exist_ok=True)\n\n eigenworms_matrix = load_eigenworms_matrix(eigenworms_matrix_path)\n\n dataset = load_dataset(\n dataset_loader=dataset_loader,\n dataset_path=dataset_path,\n selected_video_names=[video_name],\n resize_options=ResizeOptions(image_size=image_size)\n )\n features = dataset.features_dataset[video_name]\n scoring_data_manager = ScoringDataManager(\n video_name=video_name,\n frames_dataset=dataset.frames_dataset,\n features=features,\n )\n results_scoring = ResultsScoring(\n frame_preprocessing=dataset.frame_preprocessing,\n num_process=num_process,\n temp_dir=temp_dir,\n image_shape=(image_size, image_size),\n )\n\n results = np.loadtxt(results_path)\n\n modes = results[:, :5]\n theta_mean = results[:, 5]\n\n # convert RCS results to theta\n thetas = []\n for m, t_m in zip(modes, theta_mean):\n theta = modes_to_theta(m, eigenworms_matrix) + t_m\n thetas.append(theta)\n thetas = np.array(thetas)\n\n # calculate score and associated skeleton\n results_to_score = BaseResults(theta=thetas)\n results_scoring(results_to_score, scoring_data_manager)\n\n skeletons = results_to_score.skeletons[:, 0]\n scores = results_to_score.scores[:, 0]\n\n # draw skeleton on top of image\n color = (0, 255, 0)\n image_filename_format = \"frame_{{:0{}d}}_score_{{:.2f}}.png\".format(\n len(str(skeletons.shape[0])), len(str(len(scores)))\n )\n\n with dataset.frames_dataset.open(video_name) as frames:\n for index, (frame, skel, score) in enumerate(zip(frames, skeletons, scores)):\n frame_color = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)\n\n draw_skeleton(frame_color, skel, color, color)\n\n cv2.imwrite(os.path.join(out_dir, image_filename_format.format(index, score)), frame_color)\n\n # cleanup\n shutil.rmtree(temp_dir)\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"dataset_loader\", type=str, help=\"tierpsy or other\")\n parser.add_argument(\"dataset_path\", type=str, help=\"dataset root path\")\n parser.add_argument(\"video_name\", type=str, help=\"name of video of the RCS results\")\n\n parser.add_argument(\"results_path\", type=str, help=\"File path of RCS results txt file\")\n parser.add_argument(\n \"eigenworms_matrix_path\",\n type=str,\n help=\"Path to eigenworms matrix to convert RCS modes to theta\",\n )\n parser.add_argument(\"--out_dir\", type=str, help=\"where will the results images go\")\n\n parser.add_argument(\"--temp_dir\", type=str, help=\"Where to store temporary intermediate results\")\n parser.add_argument(\"--num_process\", type=int, help=\"How many worker processes\")\n\n args = parser.parse_args()\n\n export_as_images(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "9100269", "language": "Python", "matching_score": 4.540072441101074, "max_stars_count": 0, "path": "suppl comparison/export_RCS_images/export_RCS_results_as_images.py" }, { "content": "#!/usr/bin/env python\n\n\"\"\"\nPredicts videos using a trained model\n\"\"\"\n\nimport logging\nimport multiprocessing as mp\nimport os\nimport random\nimport shutil\nimport tempfile\nfrom argparse import Namespace\nfrom functools import partial\nfrom typing import Tuple\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom wormpose.commands import _log_parameters\nfrom wormpose.commands.utils.results_saver import ResultsSaver\nfrom wormpose.commands.utils.time_sampling import resample_results\nfrom wormpose.config import default_paths\nfrom wormpose.config.default_paths import RESULTS_FILENAME, CONFIG_FILENAME\nfrom wormpose.config.experiment_config import load_config, add_config_argument\nfrom wormpose.dataset.features import Features\nfrom wormpose.dataset.image_processing.options import WORM_IS_LIGHTER\nfrom wormpose.dataset.loader import get_dataset_name\nfrom wormpose.dataset.loader import load_dataset\nfrom wormpose.dataset.loaders.resizer import ResizeOptions\nfrom wormpose.images.scoring import BaseScoringDataManager, ScoringDataManager, ResultsScoring\nfrom wormpose.machine_learning.best_models_saver import BestModels\nfrom wormpose.machine_learning.predict_data_generator import PredictDataGenerator\nfrom wormpose.pose.centerline import skeletons_to_angles\nfrom wormpose.pose.headtail_resolution import resolve_head_tail\nfrom wormpose.pose.results_datatypes import (\n ShuffledResults,\n OriginalResults,\n BaseResults,\n)\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\ntf.get_logger().setLevel(logging.INFO)\n\n\ndef _make_tf_dataset(data_generator, batch_size: int, image_shape):\n def run(video_name):\n data_gen = partial(data_generator.run, video_name=video_name)\n tf_dset = tf.data.Dataset.from_generator(\n data_gen,\n tf.float32,\n tf.TensorShape([batch_size, image_shape[0], image_shape[1], 1]),\n )\n return tf_dset\n\n return run\n\n\ndef _can_resolve_results(shuffled_results: ShuffledResults, score_threshold: float, video_name: str) -> bool:\n scores = shuffled_results.scores\n if np.all(np.isnan(scores)):\n logger.error(f\"Calculated scores are all invalid, stopping analysis for {video_name}\")\n return False\n\n if np.max(scores) < score_threshold:\n logger.error(\n f\"There is not one frame where the error metric is above the threshold {score_threshold} \"\n f\"in the whole video {video_name}, stopping analysis. Maybe the model didn't train properly.\"\n )\n return False\n return True\n\n\nclass _Predictor(object):\n def __init__(self, results_scoring: ResultsScoring, keras_model):\n self.keras_model = keras_model\n self.results_scoring = results_scoring\n\n def __call__(\n self,\n num_frames: int,\n input_frames,\n scoring_data_manager: BaseScoringDataManager,\n features: Features,\n ) -> Tuple[OriginalResults, ShuffledResults]:\n # run all frames through the neural network to get a result theta without head/tail decision\n network_predictions = self.keras_model.predict(input_frames)[:num_frames]\n logger.info(f\"Predicted {len(network_predictions)} frames\")\n\n shuffled_results = ShuffledResults(random_theta=network_predictions)\n\n original_results = OriginalResults(\n theta=skeletons_to_angles(features.skeletons, theta_dims=network_predictions.shape[1]),\n skeletons=features.skeletons,\n scores=None,\n )\n\n # calculate image similarity for each frame, for the two solutions\n self.results_scoring(results=shuffled_results, scoring_data_manager=scoring_data_manager)\n\n avg_score = np.max(shuffled_results.scores, axis=1).mean()\n logger.info(f\"Calculated image similarity, average: {avg_score:.4f}\")\n\n resample_results(shuffled_results, features.timestamp)\n resample_results(original_results, features.timestamp)\n\n return original_results, shuffled_results\n\n\ndef _apply_resize_factor(results: BaseResults, resize_factor: float):\n results.skeletons /= resize_factor\n\n\ndef _parse_arguments(dataset_path: str, kwargs: dict):\n if kwargs.get(\"work_dir\") is None:\n kwargs[\"work_dir\"] = default_paths.WORK_DIR\n if kwargs.get(\"num_process\") is None:\n kwargs[\"num_process\"] = os.cpu_count()\n if kwargs.get(\"temp_dir\") is None:\n kwargs[\"temp_dir\"] = tempfile.gettempdir()\n if kwargs.get(\"batch_size\") is None:\n kwargs[\"batch_size\"] = 512\n if kwargs.get(\"score_threshold\") is None:\n kwargs[\"score_threshold\"] = 0.7\n if kwargs.get(\"video_names\") is None:\n kwargs[\"video_names\"] = None\n if kwargs.get(\"random_seed\") is None:\n kwargs[\"random_seed\"] = None\n\n kwargs[\"temp_dir\"] = tempfile.mkdtemp(dir=kwargs[\"temp_dir\"])\n\n dataset_name = get_dataset_name(dataset_path)\n kwargs[\"experiment_dir\"] = os.path.join(kwargs[\"work_dir\"], dataset_name)\n\n if kwargs.get(\"model_path\") is None:\n default_models_dir = os.path.join(kwargs[\"experiment_dir\"], default_paths.MODELS_DIRS)\n kwargs[\"model_path\"] = BestModels(default_models_dir).best_model_path\n if kwargs.get(\"config\") is None:\n kwargs[\"config\"] = os.path.join(kwargs[\"experiment_dir\"], CONFIG_FILENAME)\n\n _log_parameters(logger.info, {\"dataset_path\": dataset_path})\n _log_parameters(logger.info, kwargs)\n\n return Namespace(**kwargs)\n\n\ndef predict(dataset_path: str, **kwargs):\n \"\"\"\n Use a trained model to predict the centerlines of worm for videos in a dataset\n\n :param dataset_path: Root path of the dataset containing videos of worm\n \"\"\"\n args = _parse_arguments(dataset_path, kwargs)\n\n mp.set_start_method(\"spawn\", force=True)\n\n if args.random_seed is not None:\n os.environ[\"TF_DETERMINISTIC_OPS\"] = \"1\"\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n\n results_root_dir = os.path.join(args.experiment_dir, default_paths.RESULTS_DIR)\n os.makedirs(results_root_dir, exist_ok=True)\n\n config = load_config(args.config)\n\n dataset = load_dataset(\n dataset_loader=config.dataset_loader,\n dataset_path=dataset_path,\n selected_video_names=args.video_names,\n resize_options=ResizeOptions(resize_factor=config.resize_factor),\n **{WORM_IS_LIGHTER: config.worm_is_lighter},\n )\n\n keras_model = tf.keras.models.load_model(args.model_path, compile=False)\n\n results_saver = ResultsSaver(\n temp_dir=args.temp_dir, results_root_dir=results_root_dir, results_filename=RESULTS_FILENAME\n )\n\n tf_dataset_maker = _make_tf_dataset(\n data_generator=PredictDataGenerator(\n dataset=dataset,\n num_process=args.num_process,\n temp_dir=args.temp_dir,\n image_shape=config.image_shape,\n batch_size=args.batch_size,\n ),\n batch_size=args.batch_size,\n image_shape=config.image_shape,\n )\n\n results_scoring = ResultsScoring(\n frame_preprocessing=dataset.frame_preprocessing,\n num_process=args.num_process,\n temp_dir=args.temp_dir,\n image_shape=config.image_shape,\n )\n predictor = _Predictor(results_scoring=results_scoring, keras_model=keras_model)\n\n for video_name in dataset.video_names:\n logger.info(f'Processing video: \"{video_name}\"')\n features = dataset.features_dataset[video_name]\n\n template_indexes = features.labelled_indexes\n if len(template_indexes) == 0:\n logger.error(\n f\"Can't calculate image metric, there is no labelled frame in the video to use as a template, \"\n f\"stopping analysis for {video_name}.\"\n )\n continue\n\n original_results, shuffled_results = predictor(\n input_frames=tf_dataset_maker(video_name),\n num_frames=dataset.num_frames(video_name),\n features=features,\n scoring_data_manager=ScoringDataManager(\n video_name=video_name,\n frames_dataset=dataset.frames_dataset,\n features=features,\n ),\n )\n\n results = {\"original\": original_results, \"unaligned\": shuffled_results}\n if _can_resolve_results(\n shuffled_results,\n video_name=video_name,\n score_threshold=args.score_threshold,\n ):\n final_results = resolve_head_tail(\n shuffled_results=shuffled_results,\n original_results=original_results,\n frame_rate=features.frame_rate,\n score_threshold=args.score_threshold,\n )\n results[\"resolved\"] = final_results\n _apply_resize_factor(results[\"resolved\"], config.resize_factor)\n\n _apply_resize_factor(results[\"unaligned\"], config.resize_factor)\n\n results_saver.save(results=results, video_name=video_name)\n\n # cleanup\n shutil.rmtree(args.temp_dir)\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n\n # model infos\n parser.add_argument(\n \"--model_path\",\n type=str,\n help=\"Load model from this path, or use best model from work_dir.\",\n )\n parser.add_argument(\"--batch_size\", type=int)\n\n # inputs\n parser.add_argument(\"dataset_path\", type=str)\n parser.add_argument(\n \"--video_names\",\n type=str,\n nargs=\"+\",\n help=\"Only analyze a subset of videos. If not set, will analyze all videos in dataset_path.\",\n )\n add_config_argument(parser)\n parser.add_argument(\"--temp_dir\", type=str, help=\"Where to store temporary intermediate results\")\n parser.add_argument(\"--work_dir\", type=str, help=\"Root folder for all experiments\")\n # multiprocessing params\n parser.add_argument(\"--num_process\", type=int, help=\"How many worker processes\")\n # parameters of results processing\n parser.add_argument(\n \"--score_threshold\",\n type=float,\n help=\"Image metric score threshold : discard results scoring lower than this value.\"\n \" Fine tune this value using the script calibrate_dataset.py\",\n )\n parser.add_argument(\"--random_seed\", type=int, help=\"Optional random seed for deterministic results\")\n args = parser.parse_args()\n\n predict(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "1571657", "language": "Python", "matching_score": 6.790984630584717, "max_stars_count": 29, "path": "wormpose/commands/predict_dataset.py" }, { "content": "#!/usr/bin/env python\n\n\"\"\"\nEvaluates a trained model by predicting new synthetic images and calculating the image similarity and the angle error\n\"\"\"\n\nimport glob\nimport logging\nimport multiprocessing as mp\nimport os\nimport pickle\nimport random\nimport tempfile\nfrom argparse import Namespace\nfrom functools import partial\nfrom typing import List, Optional\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom wormpose.commands import _log_parameters\nfrom wormpose.config import default_paths\nfrom wormpose.config.default_paths import CONFIG_FILENAME\nfrom wormpose.config.experiment_config import load_config, add_config_argument\nfrom wormpose.dataset import Dataset\nfrom wormpose.dataset.image_processing.options import WORM_IS_LIGHTER\nfrom wormpose.dataset.loader import load_dataset, get_dataset_name\nfrom wormpose.dataset.loaders.resizer import ResizeOptions\nfrom wormpose.images.scoring import ResultsScoring, BaseScoringDataManager\nfrom wormpose.machine_learning.best_models_saver import BestModels\nfrom wormpose.machine_learning.generic_file_writer import GenericFileWriter\nfrom wormpose.machine_learning.synthetic_data_generator import SyntheticDataGenerator\nfrom wormpose.pose.distance_metrics import angle_distance\nfrom wormpose.pose.eigenworms import load_eigenworms_matrix, theta_to_modes\nfrom wormpose.pose.postures_model import PosturesModel\nfrom wormpose.pose.results_datatypes import ShuffledResults\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef _write_detailed_labeled_data_to_pickle(f, **kwargs):\n pickle.dump(\n (\n kwargs[\"image_data\"],\n kwargs[\"template_measurements\"],\n kwargs[\"template_frame\"],\n kwargs[\"template_skeleton\"],\n kwargs[\"theta\"],\n ),\n f,\n )\n\n\nclass _PickleDetailedLabeledDataWriter(GenericFileWriter):\n \"\"\"This labeled data writer saves detailed information about the synthetic image generation\"\"\"\n\n def __init__(self, filename):\n super().__init__(\n open_file=partial(open, filename, \"wb\"),\n write_file=lambda f, data: _write_detailed_labeled_data_to_pickle(f, **data),\n )\n\n\ndef _eval_data_gen(filenames: List[str]):\n for filename in filenames:\n with open(filename, \"rb\") as f:\n while True:\n try:\n res = pickle.load(f)\n im = res[0]\n im = im[:, :, np.newaxis]\n im = im.astype(np.float32) / 255\n yield im\n except EOFError:\n break\n\n\ndef _load_templates(pkl_filenames: List[str]):\n all_templates_data = []\n all_labels = []\n for pkl_filename in pkl_filenames:\n with open(pkl_filename, \"rb\") as pkl_file:\n try:\n while True:\n (frame, template_measurements, template_frame, template_skeleton, label_theta) = pickle.load(\n pkl_file\n )\n all_templates_data.append([template_frame, template_skeleton, template_measurements, frame])\n all_labels.append(label_theta)\n except EOFError:\n pass\n return all_templates_data, all_labels\n\n\nclass _ScoringDataManager(BaseScoringDataManager):\n def __init__(self, pkl_filenames):\n self._all_templates_data, _ = _load_templates(pkl_filenames)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n def __getitem__(self, frame_index):\n return self._all_templates_data[frame_index]\n\n def __len__(self):\n return len(self._all_templates_data)\n\n\ndef _parse_arguments(dataset_path: str, kwargs: dict):\n if kwargs.get(\"work_dir\") is None:\n kwargs[\"work_dir\"] = default_paths.WORK_DIR\n if kwargs.get(\"num_process\") is None:\n kwargs[\"num_process\"] = os.cpu_count()\n if kwargs.get(\"temp_dir\") is None:\n kwargs[\"temp_dir\"] = tempfile.gettempdir()\n if kwargs.get(\"batch_size\") is None:\n kwargs[\"batch_size\"] = 512\n if kwargs.get(\"num_samples\") is None:\n kwargs[\"num_samples\"] = 1000\n if kwargs.get(\"postures_generation\") is None:\n kwargs[\"postures_generation\"] = PosturesModel().generate\n if kwargs.get(\"video_names\") is None:\n kwargs[\"video_names\"] = None\n if kwargs.get(\"model_path\") is None:\n kwargs[\"model_path\"] = None\n if kwargs.get(\"random_seed\") is None:\n kwargs[\"random_seed\"] = None\n if kwargs.get(\"eigenworms_matrix_path\") is None:\n kwargs[\"eigenworms_matrix_path\"] = None\n kwargs[\"temp_dir\"] = tempfile.mkdtemp(dir=kwargs[\"temp_dir\"])\n\n dataset_name = get_dataset_name(dataset_path)\n kwargs[\"experiment_dir\"] = os.path.join(kwargs[\"work_dir\"], dataset_name)\n\n if kwargs.get(\"model_path\") is None:\n default_models_dir = os.path.join(kwargs[\"experiment_dir\"], default_paths.MODELS_DIRS)\n kwargs[\"model_path\"] = BestModels(default_models_dir).best_model_path\n if kwargs.get(\"config\") is None:\n kwargs[\"config\"] = os.path.join(kwargs[\"experiment_dir\"], CONFIG_FILENAME)\n\n _log_parameters(logger.info, {\"dataset_path\": dataset_path})\n _log_parameters(logger.info, kwargs)\n\n return Namespace(**kwargs)\n\n\ndef evaluate(dataset_path: str, **kwargs):\n \"\"\"\n Evaluate a trained model by predicting synthetic data and recording the image similarity\n\n :param dataset_path: Root path of the dataset containing videos of worm\n \"\"\"\n args = _parse_arguments(dataset_path, kwargs)\n\n mp.set_start_method(\"spawn\", force=True)\n\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n\n results_dir = os.path.join(args.experiment_dir, \"evaluation\")\n os.makedirs(results_dir, exist_ok=True)\n\n config = load_config(args.config)\n eigenworms_matrix = load_eigenworms_matrix(args.eigenworms_matrix_path)\n\n dataset = load_dataset(\n dataset_loader=config.dataset_loader,\n dataset_path=dataset_path,\n selected_video_names=args.video_names,\n resize_options=ResizeOptions(resize_factor=config.resize_factor),\n **{WORM_IS_LIGHTER: config.worm_is_lighter},\n )\n\n pkl_filenames = _generate_synthetic_data(\n dataset,\n args.num_process,\n args.num_samples,\n args.postures_generation,\n args.temp_dir,\n args.random_seed,\n )\n\n keras_model = tf.keras.models.load_model(args.model_path, compile=False)\n\n tf_dataset = tf.data.Dataset.from_generator(\n partial(_eval_data_gen, pkl_filenames),\n tf.float32,\n tf.TensorShape(dataset.image_shape + (1,)),\n ).batch(args.batch_size)\n\n network_predictions = keras_model.predict(tf_dataset)[: args.num_samples]\n shuffled_results = ShuffledResults(random_theta=network_predictions)\n\n ResultsScoring(\n frame_preprocessing=dataset.frame_preprocessing,\n num_process=args.num_process,\n temp_dir=args.temp_dir,\n image_shape=dataset.image_shape,\n )(\n results=shuffled_results,\n scoring_data_manager=_ScoringDataManager(pkl_filenames),\n )\n # Keep the maximum score between the two head/tail options for this evaluation\n image_scores = np.max(shuffled_results.scores, axis=1)\n\n # Now calculate the angle error and mode error\n angle_error = []\n modes_error = []\n theta_predictions = []\n _, theta_labels = _load_templates(pkl_filenames)\n for theta_label, theta_results in zip(theta_labels, shuffled_results.theta):\n dists = [angle_distance(theta_result, theta_label) for theta_result in theta_results]\n closest_index = int(np.argmin(dists))\n closest_theta = theta_results[closest_index]\n theta_predictions.append(closest_theta)\n angle_error.append(dists[closest_index])\n if eigenworms_matrix is not None:\n modes_label = theta_to_modes(theta_label, eigenworms_matrix)\n modes_prediction = theta_to_modes(closest_theta, eigenworms_matrix)\n mode_error = np.abs(modes_label - modes_prediction)\n modes_error.append(mode_error)\n\n np.savetxt(os.path.join(results_dir, \"image_score.txt\"), image_scores)\n np.savetxt(os.path.join(results_dir, \"angle_error.txt\"), angle_error)\n np.savetxt(os.path.join(results_dir, \"theta_labels.txt\"), theta_labels)\n np.savetxt(os.path.join(results_dir, \"theta_predictions.txt\"), theta_predictions)\n if eigenworms_matrix is not None:\n np.savetxt(os.path.join(results_dir, \"modes_error.txt\"), modes_error)\n\n logger.info(\n f\"Evaluated model with synthetic data,\"\n f\" average image similarity: {np.mean(image_scores):.4f},\"\n f\" average angle error (degrees): {np.rad2deg(np.mean(angle_error)):.2f}\"\n )\n\n\ndef _generate_synthetic_data(\n dataset: Dataset,\n num_process: int,\n num_samples: int,\n postures_generation,\n temp_dir: str,\n random_seed: Optional[int],\n):\n syn_data_file_pattern = os.path.join(temp_dir, \"synthetic_{index}.pkl\")\n synthetic_data_generator = SyntheticDataGenerator(\n num_process=num_process,\n temp_dir=temp_dir,\n dataset=dataset,\n postures_generation_fn=postures_generation,\n writer=_PickleDetailedLabeledDataWriter,\n enable_random_augmentations=False,\n random_seed=random_seed,\n )\n gen = synthetic_data_generator.generate(num_samples=num_samples, file_pattern=syn_data_file_pattern)\n list(gen)\n pkl_filenames = list(sorted(glob.glob(syn_data_file_pattern.format(index=\"*\"))))\n return pkl_filenames\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset_path\", type=str)\n parser.add_argument(\"--model_path\", type=str, help=\"Load models from this path.\")\n parser.add_argument(\n \"--video_names\",\n type=str,\n nargs=\"+\",\n help=\"Only evaluate using a subset of videos. \" \"If not set, will include all videos in dataset_path.\",\n )\n parser.add_argument(\"--work_dir\", type=str, help=\"Root folder for all experiments\")\n parser.add_argument(\n \"--num_samples\",\n type=int,\n help=\"How many synthetic samples to evaluate the model with\",\n )\n parser.add_argument(\n \"--eigenworms_matrix_path\",\n help=\"Path to optional eigenworms matrix to also calculate mode error\",\n )\n add_config_argument(parser)\n parser.add_argument(\"--temp_dir\", type=str, help=\"Where to store temporary intermediate results\")\n parser.add_argument(\"--num_process\", type=int, help=\"How many worker processes\")\n parser.add_argument(\"--batch_size\", type=int)\n parser.add_argument(\"--random_seed\", type=int, help=\"Optional random seed for deterministic results\")\n args = parser.parse_args()\n\n evaluate(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11018995", "language": "Python", "matching_score": 7.56934928894043, "max_stars_count": 29, "path": "wormpose/commands/evaluate_model.py" }, { "content": "#!/usr/bin/env python\n\n\"\"\"\nGenerates the training and evaluation data from a dataset.\n\"\"\"\n\nimport logging\nimport multiprocessing as mp\nimport os\nimport random\nimport shutil\nimport tempfile\nimport time\nfrom argparse import Namespace\n\nimport numpy as np\n\nfrom wormpose.commands import _log_parameters\nfrom wormpose.config import default_paths\nfrom wormpose.config.default_paths import (\n SYNTH_TRAIN_DATASET_NAMES,\n REAL_EVAL_DATASET_NAMES,\n CONFIG_FILENAME,\n)\nfrom wormpose.config.experiment_config import save_config, ExperimentConfig\nfrom wormpose.dataset.image_processing.options import (\n add_image_processing_arguments,\n WORM_IS_LIGHTER,\n)\nfrom wormpose.dataset.loader import get_dataset_name\nfrom wormpose.dataset.loader import load_dataset\nfrom wormpose.dataset.loaders.resizer import add_resizing_arguments, ResizeOptions\nfrom wormpose.machine_learning import eval_data_generator\nfrom wormpose.machine_learning.synthetic_data_generator import SyntheticDataGenerator\nfrom wormpose.machine_learning.tfrecord_file import TfrecordLabeledDataWriter\nfrom wormpose.pose.postures_model import PosturesModel\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef _parse_arguments(kwargs: dict):\n if kwargs.get(\"num_process\") is None:\n kwargs[\"num_process\"] = os.cpu_count()\n if kwargs.get(\"temp_dir\") is None:\n kwargs[\"temp_dir\"] = tempfile.gettempdir()\n if kwargs.get(\"num_train_samples\") is None:\n kwargs[\"num_train_samples\"] = int(5e5)\n if kwargs.get(\"num_eval_samples\") is None:\n kwargs[\"num_eval_samples\"] = int(1e4)\n if kwargs.get(\"work_dir\") is None:\n kwargs[\"work_dir\"] = default_paths.WORK_DIR\n if kwargs.get(\"postures_generation\") is None:\n kwargs[\"postures_generation\"] = PosturesModel().generate\n if kwargs.get(\"video_names\") is None:\n kwargs[\"video_names\"] = None\n if kwargs.get(\"random_seed\") is None:\n kwargs[\"random_seed\"] = None\n if kwargs.get(WORM_IS_LIGHTER) is None:\n kwargs[WORM_IS_LIGHTER] = False\n kwargs[\"temp_dir\"] = tempfile.mkdtemp(dir=kwargs[\"temp_dir\"])\n kwargs[\"resize_options\"] = ResizeOptions(**kwargs)\n\n _log_parameters(logger.info, kwargs)\n\n return Namespace(**kwargs)\n\n\ndef generate(dataset_loader: str, dataset_path: str, **kwargs):\n \"\"\"\n Generate synthetic images (training data) and processed real images (evaluation data)\n and save them to TFrecord files using multiprocessing\n\n :param dataset_loader: Name of the dataset loader, for example \"tierpsy\"\n :param dataset_path: Root path of the dataset containing videos of worm\n \"\"\"\n _log_parameters(logger.info, {\"dataset_loader\": dataset_loader, \"dataset_path\": dataset_path})\n args = _parse_arguments(kwargs)\n\n mp.set_start_method(\"spawn\", force=True)\n\n random.seed(args.random_seed)\n np.random.seed(args.random_seed)\n\n # setup folders\n if not os.path.exists(args.work_dir):\n os.mkdir(args.work_dir)\n experiment_dir = os.path.join(args.work_dir, get_dataset_name(dataset_path))\n os.makedirs(experiment_dir, exist_ok=True)\n tfrecords_dataset_root = os.path.join(experiment_dir, default_paths.TRAINING_DATA_DIR)\n if os.path.exists(tfrecords_dataset_root):\n shutil.rmtree(tfrecords_dataset_root)\n\n dataset = load_dataset(\n dataset_loader=dataset_loader,\n dataset_path=dataset_path,\n selected_video_names=args.video_names,\n **vars(args),\n )\n\n start = time.time()\n synthetic_data_generator = SyntheticDataGenerator(\n num_process=args.num_process,\n temp_dir=args.temp_dir,\n dataset=dataset,\n postures_generation_fn=args.postures_generation,\n enable_random_augmentations=True,\n writer=TfrecordLabeledDataWriter,\n random_seed=args.random_seed,\n )\n gen = synthetic_data_generator.generate(\n num_samples=args.num_train_samples,\n file_pattern=os.path.join(args.temp_dir, SYNTH_TRAIN_DATASET_NAMES),\n )\n for progress in gen:\n yield progress\n yield 1.0\n\n theta_dims = len(next(args.postures_generation()))\n num_eval_samples = eval_data_generator.generate(\n dataset=dataset,\n num_samples=args.num_eval_samples,\n theta_dims=theta_dims,\n file_pattern=os.path.join(args.temp_dir, REAL_EVAL_DATASET_NAMES),\n )\n\n shutil.copytree(args.temp_dir, tfrecords_dataset_root)\n save_config(\n ExperimentConfig(\n dataset_loader=dataset_loader,\n image_shape=dataset.image_shape,\n theta_dimensions=theta_dims,\n num_train_samples=args.num_train_samples,\n num_eval_samples=num_eval_samples,\n resize_factor=args.resize_options.resize_factor,\n video_names=dataset.video_names,\n worm_is_lighter=getattr(args, WORM_IS_LIGHTER),\n ),\n os.path.join(experiment_dir, CONFIG_FILENAME),\n )\n\n end = time.time()\n logger.info(f\"Done generating training data in : {end - start:.1f}s\")\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset_loader\", type=str)\n parser.add_argument(\"dataset_path\", type=str)\n parser.add_argument(\n \"--video_names\",\n type=str,\n nargs=\"+\",\n help=\"Only generate training data for a subset of videos. \"\n \"If not set, will include all videos in dataset_path.\",\n )\n parser.add_argument(\"--num_train_samples\", type=int, help=\"How many training samples to generate\")\n parser.add_argument(\"--num_eval_samples\", type=int, help=\"How many evaluation samples to generate\")\n parser.add_argument(\"--temp_dir\", type=str, help=\"Where to store temporary intermediate results\")\n parser.add_argument(\"--work_dir\", type=str, help=\"Root folder for all experiments\")\n parser.add_argument(\"--num_process\", type=int, help=\"How many worker processes\")\n parser.add_argument(\"--random_seed\", type=int, help=\"Optional random seed for deterministic results\")\n add_resizing_arguments(parser)\n add_image_processing_arguments(parser)\n args = parser.parse_args()\n\n last_progress = None\n for progress in generate(**vars(args)):\n prog_percent = int(progress * 100)\n if prog_percent != last_progress:\n logger.info(f\"Generating training data: {prog_percent}% done\")\n last_progress = prog_percent\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "3028413", "language": "Python", "matching_score": 4.3527374267578125, "max_stars_count": 29, "path": "wormpose/commands/generate_training_data.py" }, { "content": "import json\nimport os\n\nfrom typing import Tuple, List\n\n\ndef add_config_argument(parser):\n \"\"\"\n For command line arguments, add the option to pass the path of the configuration file\n\n :param parser: Argparse parser\n \"\"\"\n parser.add_argument(\n \"--config\",\n type=str,\n help=\"Path of the configuration file.\"\n \" The file is created when generating the training dataset.\"\n \" If not set, will look for it in the default location: at\"\n \" {work_dir}/{dataset_name}/config.json\",\n )\n\n\nclass ExperimentConfig(object):\n \"\"\"\n Data container for the experiment config, created when generating training data\n \"\"\"\n\n def __init__(\n self,\n num_train_samples: int = None,\n num_eval_samples: int = None,\n image_shape: Tuple[int, int] = None,\n dataset_loader: str = None,\n theta_dimensions: int = None,\n resize_factor: float = 1.0,\n video_names: List[str] = None,\n worm_is_lighter: bool = False,\n ):\n self.num_train_samples = num_train_samples\n self.num_eval_samples = num_eval_samples\n self.image_shape = image_shape\n self.dataset_loader = dataset_loader\n self.theta_dimensions = theta_dimensions\n self.resize_factor = resize_factor\n self.video_names = video_names\n self.worm_is_lighter = worm_is_lighter\n\n\ndef save_config(experiment_config: ExperimentConfig, config_filepath: str):\n \"\"\"\n Save the experiment config to a json file\n\n :param experiment_config: config object to save\n :param config_filepath: path where to write the config json file\n \"\"\"\n with open(config_filepath, \"w\") as f:\n json.dump(experiment_config, f, indent=4, default=lambda x: x.__dict__)\n\n\ndef load_config(config_filepath: str) -> ExperimentConfig:\n \"\"\"\n Load the experiment config from a json file\n\n :param config_filepath: path of the config json file to load\n :return: loaded config object\n \"\"\"\n if not os.path.isfile(config_filepath):\n raise FileNotFoundError(f\"Configuration file not found at path: '{config_filepath}'.\")\n\n with open(config_filepath, \"r\") as f:\n return ExperimentConfig(**json.load(f))\n", "id": "2783528", "language": "Python", "matching_score": 1.7145154476165771, "max_stars_count": 29, "path": "wormpose/config/experiment_config.py" }, { "content": "WORM_IS_LIGHTER = \"worm_is_lighter\"\n\n\ndef add_image_processing_arguments(parser):\n parser.add_argument(f\"--{WORM_IS_LIGHTER}\", action=\"store_true\")\n", "id": "9088752", "language": "Python", "matching_score": 0.24246755242347717, "max_stars_count": 29, "path": "wormpose/dataset/image_processing/options.py" }, { "content": "\"\"\"\nSimple BaseFramePreprocesing implementation\n\"\"\"\n\nfrom typing import Optional, Tuple, Callable\n\nimport cv2\nimport numpy as np\n\nfrom wormpose.dataset.base_dataset import BaseFramePreprocessing\nfrom wormpose.dataset.image_processing.image_utils import segment_foreground, OtsuThreshold\n\n\nclass SimpleFramePreprocessing(BaseFramePreprocessing):\n def __init__(\n self,\n is_foreground_lighter_than_background: bool,\n foreground_dilate_struct_element=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)),\n foreground_close_struct_element=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)),\n threshold_fn: Callable = OtsuThreshold(blur_kernel=(5, 5)),\n ):\n self.is_foreground_lighter_than_background = is_foreground_lighter_than_background\n self.foreground_dilate_struct_element = foreground_dilate_struct_element\n self.foreground_close_struct_element = foreground_close_struct_element\n self.threshold_fn = threshold_fn\n\n def process(self, frame: np.ndarray, background_threshold: Optional[int] = None) -> Tuple[np.ndarray, int]:\n return segment_foreground(\n frame,\n self.foreground_close_struct_element,\n self.foreground_dilate_struct_element,\n self.threshold_fn,\n self.is_foreground_lighter_than_background,\n )\n", "id": "12319031", "language": "Python", "matching_score": 0.21441173553466797, "max_stars_count": 29, "path": "wormpose/dataset/image_processing/simple_frame_preprocessing.py" }, { "content": "import sys\n\ninput_log = sys.argv[1]\n\nwith open(input_log, \"r\") as f:\n lines = f.readlines()\n\nwp_solved = 0\ntotal = 0\ntierpsy_solved = 0\n\nfor line in lines:\n numbers = [int(s) for s in line.split() if s.isdigit()]\n wp_solved += numbers[0]\n total += numbers[1]\n tierpsy_solved += numbers[2]\n\nname = input_log[:-4]\n\nprint(f\"For: {name} (total frames:{total}), tierpsy solved {tierpsy_solved} ({100*tierpsy_solved / total:.1f}%), wormpose solved {wp_solved} ({100*wp_solved / total:.1f}%) or {100*(wp_solved - tierpsy_solved)/total:.1f}% more than tierpsy\")\n\n", "id": "929607", "language": "Python", "matching_score": 0.24211473762989044, "max_stars_count": 0, "path": "extras/results_compare/count_solved.py" } ]
3.147613
GKeppler
[ { "content": "from model.semseg.base import BaseNet\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\n\n\nclass DeepLabV3Plus(BaseNet):\n def __init__(self, backbone, nclass, args=None):\n super(DeepLabV3Plus, self).__init__(backbone, nclass, args)\n\n low_level_channels = self.backbone.channels[0]\n high_level_channels = self.backbone.channels[-1]\n\n self.head = ASPPModule(high_level_channels, (12, 24, 36))\n\n self.reduce = nn.Sequential(\n nn.Conv2d(low_level_channels, 48, 1, bias=False),\n nn.BatchNorm2d(48),\n nn.ReLU(True),\n )\n\n self.fuse = nn.Sequential(\n nn.Conv2d(high_level_channels // 8 + 48, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Dropout(0.1, False),\n )\n\n self.classifier = nn.Conv2d(256, nclass, 1, bias=True)\n\n def base_forward(self, x):\n h, w = x.shape[-2:]\n\n c1, _, _, c4 = self.backbone.base_forward(x)\n\n c4 = self.head(c4)\n c4 = F.interpolate(c4, size=c1.shape[-2:], mode=\"bilinear\", align_corners=True)\n\n c1 = self.reduce(c1)\n\n out = torch.cat([c1, c4], dim=1)\n out = self.fuse(out)\n\n out = self.classifier(out)\n out = F.interpolate(out, size=(h, w), mode=\"bilinear\", align_corners=True)\n\n return out\n\n\ndef ASPPConv(in_channels, out_channels, atrous_rate):\n block = nn.Sequential(\n nn.Conv2d(\n in_channels,\n out_channels,\n 3,\n padding=atrous_rate,\n dilation=atrous_rate,\n bias=False,\n ),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(True),\n )\n return block\n\n\nclass ASPPPooling(pl.LightningModule):\n def __init__(self, in_channels, out_channels):\n super(ASPPPooling, self).__init__()\n self.gap = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(True),\n )\n\n def forward(self, x):\n h, w = x.shape[-2:]\n pool = self.gap(x)\n return F.interpolate(pool, (h, w), mode=\"bilinear\", align_corners=True)\n\n\nclass ASPPModule(pl.LightningModule):\n def __init__(self, in_channels, atrous_rates):\n super(ASPPModule, self).__init__()\n out_channels = in_channels // 8\n rate1, rate2, rate3 = atrous_rates\n\n self.b0 = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(True),\n )\n self.b1 = ASPPConv(in_channels, out_channels, rate1)\n self.b2 = ASPPConv(in_channels, out_channels, rate2)\n self.b3 = ASPPConv(in_channels, out_channels, rate3)\n self.b4 = ASPPPooling(in_channels, out_channels)\n\n self.project = nn.Sequential(\n nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(True),\n nn.Dropout2d(0.5, False),\n )\n\n def forward(self, x):\n feat0 = self.b0(x)\n feat1 = self.b1(x)\n feat2 = self.b2(x)\n feat3 = self.b3(x)\n feat4 = self.b4(x)\n y = torch.cat((feat0, feat1, feat2, feat3, feat4), 1)\n return self.project(y)\n", "id": "9504478", "language": "Python", "matching_score": 2.935969591140747, "max_stars_count": 0, "path": "model/semseg/deeplabv3plus.py" }, { "content": "from model.semseg.base import BaseNet\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass DeepLabV2(BaseNet):\n def __init__(self, backbone, nclass, args=None):\n super(DeepLabV2, self).__init__(backbone, nclass, args)\n\n self.classifier = nn.ModuleList()\n for dilation in [6, 12, 18, 24]:\n self.classifier.append(\n nn.Conv2d(\n 2048,\n nclass,\n kernel_size=3,\n stride=1,\n padding=dilation,\n dilation=dilation,\n bias=True,\n )\n )\n\n for m in self.classifier:\n m.weight.data.normal_(0, 0.01)\n\n def base_forward(self, x):\n h, w = x.shape[-2:]\n\n x = self.backbone.base_forward(x)[-1]\n\n out = self.classifier[0](x)\n for i in range(len(self.classifier) - 1):\n out += self.classifier[i + 1](x)\n\n out = F.interpolate(out, size=(h, w), mode=\"bilinear\", align_corners=True)\n\n return out\n", "id": "3463263", "language": "Python", "matching_score": 0.8694609999656677, "max_stars_count": 0, "path": "model/semseg/deeplabv2.py" }, { "content": "import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\nimport pytorch_lightning as pl\n\n# this is a customized uent from https://github.com/milesial/Pytorch-UNet\n# it uses padding such that the input shape is the same as the output.\n# additional batchnormalization\n\n\nclass Unet2(pl.LightningModule):\n def __init__(self, backbone, nclass, args=None):\n super(Unet2, self).__init__()\n enc_chs = (3, 64, 128, 256, 512, 1024)\n dec_chs = (1024, 512, 256, 128, 64)\n self.out_sz = (572, 572)\n retain_dim = (False,)\n self.encoder = Encoder(enc_chs)\n self.decoder = Decoder(dec_chs)\n self.head = nn.Conv2d(dec_chs[-1], nclass, 1)\n self.retain_dim = retain_dim\n\n def forward(self, x):\n enc_ftrs = self.encoder(x)\n out = self.decoder(enc_ftrs[::-1][0], enc_ftrs[::-1][1:])\n out = self.head(out)\n if self.retain_dim:\n out = F.interpolate(out, self.out_sz)\n return out\n\n def training_step(self, batch, batch_idx):\n img, mask = batch\n pred = self(img)\n loss = self.criterion(pred, mask)\n # loss = F.cross_entropy(pred, mask, ignore_index=255)\n # loss.requires_grad = True\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n img, mask, id = batch\n pred = self(img)\n self.metric.add_batch(\n torch.argmax(pred, dim=1).cpu().numpy(), mask.cpu().numpy()\n )\n val_loss = self.metric.evaluate()[-1]\n # wandb.log({\"mIOU\": mIOU,\"step_val\":step_val})\n return {\"val_loss\": val_loss}\n\n def validation_epoch_end(self, outputs):\n val_loss = outputs[-1][\"val_loss\"]\n log = {\"mean mIOU\": val_loss * 100}\n mIOU = val_loss * 100.0\n if mIOU > self.previous_best:\n if self.previous_best != 0:\n os.remove(\n os.path.join(\n self.args[\"save_path\"],\n \"%s_%s_mIOU%.2f.pth\"\n % (self.args[\"model\"], self.backbone_name, self.previous_best),\n )\n )\n self.previous_best = mIOU\n torch.save(\n self.state_dict(),\n os.path.join(\n self.args[\"save_path\"],\n \"%s_%s_mIOU%.2f.pth\"\n % (self.args[\"model\"], self.backbone_name, mIOU),\n ),\n )\n return {\"log\": log, \"val_loss\": val_loss}\n\n def predict_step(self, batch, batch_idx: int, dataloader_idx: int = None):\n img, mask, id = batch\n pred = self(img)\n # batch_size = batch[0].size(0)\n # prediction_file = getattr(self, \"prediction_file\", \"predictions.pt\")\n # lazy_ids = torch.arange(batch_idx * batch_size, batch_idx * batch_size + batch_size)\n # self.write_prediction(\"idxs\", lazy_ids, prediction_file)\n # self.write_prediction(\"preds\", output, prediction_file)\n return [pred, mask, id]\n\n def configure_optimizers(self):\n optimizer = SGD(\n self.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4 # self.lr,\n )\n # scheduler = torch.optim.ReduceLROnPlateau(optimizer, mode='min')\n return [optimizer] # , [scheduler]\n\n\nclass Block(pl.LightningModule):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.conv1 = nn.Conv2d(in_ch, out_ch, 3)\n self.relu = nn.ReLU()\n self.conv2 = nn.Conv2d(out_ch, out_ch, 3)\n\n def forward(self, x):\n return self.conv2(self.relu(self.conv1(x)))\n\n\nclass Encoder(pl.LightningModule):\n def __init__(self, chs=(3, 64, 128, 256, 512, 1024)):\n super().__init__()\n self.enc_blocks = nn.ModuleList(\n [Block(chs[i], chs[i + 1]) for i in range(len(chs) - 1)]\n )\n self.pool = nn.MaxPool2d(2)\n\n def forward(self, x):\n ftrs = []\n for block in self.enc_blocks:\n x = block(x)\n ftrs.append(x)\n x = self.pool(x)\n return ftrs\n\n\nclass Decoder(pl.LightningModule):\n def __init__(self, chs=(1024, 512, 256, 128, 64)):\n super().__init__()\n self.chs = chs\n self.upconvs = nn.ModuleList(\n [nn.ConvTranspose2d(chs[i], chs[i + 1], 2, 2) for i in range(len(chs) - 1)]\n )\n self.dec_blocks = nn.ModuleList(\n [Block(chs[i], chs[i + 1]) for i in range(len(chs) - 1)]\n )\n\n def forward(self, x, encoder_features):\n for i in range(len(self.chs) - 1):\n x = self.upconvs[i](x)\n enc_ftrs = self.crop(encoder_features[i], x)\n x = torch.cat([x, enc_ftrs], dim=1)\n x = self.dec_blocks[i](x)\n return x\n\n def crop(self, enc_ftrs, x):\n _, _, H, W = x.shape\n enc_ftrs = transforms.CenterCrop([H, W])(enc_ftrs)\n return enc_ftrs\n", "id": "7159145", "language": "Python", "matching_score": 5.556571006774902, "max_stars_count": 0, "path": "model/semseg/unet2.py" }, { "content": "from model.backbone.resnet import resnet50, resnet101, resnet18\n\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\nfrom torch.nn import CrossEntropyLoss\nfrom torch.optim import SGD\nfrom utils import meanIOU, color_map\nimport torch\nimport os\nfrom PIL import Image\nimport numpy as np\n\n\nclass BaseNet(pl.LightningModule):\n # @staticmethod\n # def add_model_specific_args(parent_parser):\n # parser = parent_parser.add_argument_group(\"BaseNet\")\n # # initial learing rate\n # # parser.add_argument(\"--lr\", type=float, default=0.001)\n # return parent_parser\n\n def __init__(self, backbone, nclass, args):\n super(BaseNet, self).__init__()\n backbone_zoo = {\n \"resnet18\": resnet18,\n \"resnet50\": resnet50,\n \"resnet101\": resnet101,\n }\n self.backbone_name = backbone\n if backbone is not None:\n self.backbone = backbone_zoo[backbone](pretrained=True)\n self.metric = meanIOU(num_classes=nclass)\n self.predict_metric = meanIOU(num_classes=nclass)\n self.criterion = CrossEntropyLoss() # ignore_index=255)\n self.previous_best = 0.0\n self.args = args\n\n def base_forward(self, x):\n h, w = x.shape[-2:]\n x = self.backbone.base_forward(x)[-1]\n x = self.head(x)\n x = F.interpolate(x, (h, w), mode=\"bilinear\", align_corners=True)\n return x\n\n def forward(self, x, tta=False):\n if not tta:\n return self.base_forward(x)\n\n else:\n h, w = x.shape[-2:]\n # scales = [0.5, 0.75, 1.0]\n # to avoid cuda out of memory\n scales = [0.5, 0.75, 1.0, 1.5, 2.0]\n\n final_result = None\n\n for scale in scales:\n cur_h, cur_w = int(h * scale), int(w * scale)\n cur_x = F.interpolate(\n x, size=(cur_h, cur_w), mode=\"bilinear\", align_corners=True\n )\n\n out = F.softmax(self.base_forward(cur_x), dim=1)\n out = F.interpolate(out, (h, w), mode=\"bilinear\", align_corners=True)\n final_result = out if final_result is None else (final_result + out)\n\n out = F.softmax(self.base_forward(cur_x.flip(3)), dim=1).flip(3)\n out = F.interpolate(out, (h, w), mode=\"bilinear\", align_corners=True)\n final_result += out\n\n return final_result\n\n def training_step(self, batch, batch_idx):\n img, mask = batch\n pred = self(img)\n if self.args.dataset == \"melanoma\":\n mask = mask.clip(max=1) # clips max value to 1: 255 to 1\n loss = self.criterion(pred, mask)\n # loss = F.cross_entropy(pred, mask, ignore_index=255)\n # loss.requires_grad = True\n return {\"loss\": loss}\n\n def validation_step(self, batch, batch_idx):\n img, mask, id = batch\n pred = self(img)\n self.metric.add_batch(\n torch.argmax(pred, dim=1).cpu().numpy(), mask.cpu().numpy()\n )\n val_acc = self.metric.evaluate()[-1]\n # wandb.log({\"mIOU\": mIOU,\"step_val\":step_val})\n return {\"val_acc\": val_acc}\n\n def validation_epoch_end(self, outputs):\n val_acc = outputs[-1][\"val_acc\"]\n log = {\"mean mIOU\": val_acc * 100}\n mIOU = val_acc * 100.0\n if mIOU > self.previous_best:\n if self.previous_best != 0:\n os.remove(\n os.path.join(\n self.args.save_path,\n \"%s_%s_mIOU%.2f.pth\"\n % (self.args.model, self.backbone_name, self.previous_best),\n )\n )\n self.previous_best = mIOU\n torch.save(\n self.state_dict(),\n os.path.join(\n self.args.save_path,\n \"%s_%s_mIOU%.2f.pth\" % (self.args.model, self.backbone_name, mIOU),\n ),\n )\n return {\"log\": log, \"val_acc\": val_acc}\n\n def predict_step(self, batch, batch_idx: int, dataloader_idx: int = None):\n img, mask, id = batch\n pred = self(img)\n pred = torch.argmax(pred, dim=1).cpu()\n\n # for metric checking progressbar callback not implemented\n # self.predict_metric.add_batch(pred.numpy(), mask.cpu().numpy())\n # mIOU = self.predict_metric.evaluate()[-1]\n pred = Image.fromarray(pred.squeeze(0).numpy().astype(np.uint8), mode=\"P\")\n pred.putpalette(color_map(self.args.dataset))\n pred.save(\n \"%s/%s\"\n % (self.args.pseudo_mask_path, os.path.basename(id[0].split(\" \")[1]))\n )\n return [pred, mask, id]\n\n def configure_optimizers(self):\n optimizer = SGD(\n self.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4 # self.lr,\n )\n # scheduler = torch.optim.ReduceLROnPlateau(optimizer, mode='min')\n return [optimizer] # , [scheduler]\n", "id": "9679863", "language": "Python", "matching_score": 3.497620105743408, "max_stars_count": 0, "path": "model/semseg/base.py" }, { "content": "from dataset.semi import SemiDataset\nfrom model.semseg.deeplabv2 import DeepLabV2\nfrom model.semseg.deeplabv3plus import DeepLabV3Plus\nfrom model.semseg.small_unet import SmallUnet\nfrom model.semseg.pspnet import PSPNet\nfrom model.semseg.unet import Unet\nfrom utils import count_params, meanIOU, color_map, mulitmetrics\n\nimport argparse\nfrom copy import deepcopy\nimport numpy as np\nimport os\nfrom PIL import Image\nimport torch\nfrom torch.nn import CrossEntropyLoss, DataParallel\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport wandb\nimport cv2\nimport yaml\n\nMODE = None\nglobal step_train\nglobal step_val\nstep_train = 0\nstep_val = 0\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"ST and ST++ Framework\")\n\n # basic settings\n parser.add_argument(\n \"--data-root\",\n type=str,\n default=\"/lsdf/kit/iai/projects/iai-aida/Daten_Keppler/BreastCancer\",\n )\n parser.add_argument(\n \"--dataset\",\n type=str,\n choices=[\"pascal\", \"cityscapes\", \"melanoma\", \"pneumothorax\", \"breastCancer\"],\n default=\"breastCancer\",\n )\n parser.add_argument(\"--batch-size\", type=int, default=16)\n parser.add_argument(\"--lr\", type=float, default=None)\n parser.add_argument(\"--epochs\", type=int, default=30)\n parser.add_argument(\"--crop-size\", type=int, default=None)\n parser.add_argument(\n \"--backbone\",\n type=str,\n choices=[\"resnet18\", \"resnet50\", \"resnet101\"],\n default=\"resnet50\",\n )\n parser.add_argument(\n \"--model\",\n type=str,\n choices=[\"deeplabv3plus\", \"pspnet\", \"deeplabv2\", \"unet\", \"smallUnet\"],\n default=\"unet\",\n )\n parser.add_argument(\n \"--val-split\", type=str, default=\"val_split_0\"\n ) # need to implement in Dataloader, crrently not working\n\n # semi-supervised settings\n parser.add_argument(\"--split\", type=str, default=\"1_4\")\n parser.add_argument(\"--shuffle\", type=int, default=0)\n # these are derived from the above split, shuffle and dataset. They dont need to be set\n parser.add_argument(\n \"--split-file-path\", type=str, default=None\n ) # \"dataset/splits/melanoma/1_30/split_0/split_sample.yaml\")\n parser.add_argument(\n \"--test-file-path\", type=str, default=None\n ) # \"dataset/splits/melanoma/test_sample.yaml\")\n parser.add_argument(\"--pseudo-mask-path\", type=str, default=None)\n parser.add_argument(\"--save-path\", type=str, default=None)\n parser.add_argument(\"--reliable-id-path\", type=str, default=None)\n\n parser.add_argument(\n \"--plus\",\n dest=\"plus\",\n default=True,\n action=\"store_true\",\n help=\"whether to use ST++\",\n )\n parser.add_argument(\n \"--use-wandb\", default=False, help=\"whether to use WandB for logging\"\n )\n parser.add_argument(\n \"--use-tta\", default=True, help=\"whether to use Test Time Augmentation\"\n )\n\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n if args.use_wandb:\n wandb.init(project=\"ST++\", entity=\"gkeppler\")\n wandb.run.name = (\n args.dataset\n + \" \"\n + args.split_file_path.split(\"/\")[-3]\n + (\" ST++\" if args.plus else \" ST\")\n )\n wandb.define_metric(\"step_train\")\n wandb.define_metric(\"step_val\")\n wandb.define_metric(\"step_epoch\")\n wandb.define_metric(\"Pictures\", step_metric=\"step_epoch\")\n wandb.define_metric(\"loss\", step_metric=\"step_train\")\n wandb.define_metric(\"mIOU\", step_metric=\"step_val\")\n\n wandb.config.update(args)\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n if not os.path.exists(args.pseudo_mask_path):\n os.makedirs(args.pseudo_mask_path)\n if args.plus and args.reliable_id_path is None:\n exit(\"Please specify reliable-id-path in ST++.\")\n\n criterion = CrossEntropyLoss() # ignore_index=255) 255 is white is melanoma\n # changed crop from None to args.crop_size\n valset = SemiDataset(\n args.dataset, args.data_root, \"val\", args.crop_size, args.split_file_path\n )\n valloader = DataLoader(\n valset,\n batch_size=4 if args.dataset == \"cityscapes\" else 1,\n shuffle=False,\n pin_memory=True,\n num_workers=4,\n drop_last=False,\n )\n\n # <====================== Supervised training with labeled images (SupOnly) ======================>\n print(\n \"\\n================> Total stage 1/%i: \"\n \"Supervised training on labeled images (SupOnly)\" % (6 if args.plus else 3)\n )\n\n global MODE\n MODE = \"train\"\n\n trainset = SemiDataset(\n args.dataset, args.data_root, MODE, args.crop_size, args.split_file_path\n )\n trainset.ids = 2 * trainset.ids if len(trainset.ids) < 200 else trainset.ids\n trainloader = DataLoader(\n trainset,\n batch_size=args.batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=16,\n drop_last=True,\n ) # ,sampler=torch.utils.data.SubsetRandomSampler(subset_indices))\n\n model, optimizer = init_basic_elems(args)\n print(\"\\nParams: %.1fM\" % count_params(model))\n\n best_model, checkpoints = train(\n model, trainloader, valloader, criterion, optimizer, args\n )\n\n # <====================== Test supervised model on testset (SupOnly) ======================>\n print(\"\\n\\n\\n================> Test supervised model on testset (SupOnly)\")\n testset = SemiDataset(\n args.dataset, args.data_root, \"test\", args.crop_size, args.test_file_path\n )\n testloader = DataLoader(\n testset, 1, shuffle=False, pin_memory=True, num_workers=4, drop_last=False\n )\n\n test(best_model, testloader, args)\n\n \"\"\"\n ST framework without selective re-training\n \"\"\"\n if not args.plus:\n # <============================= Pseudo label all unlabeled images =============================>\n print(\n \"\\n\\n\\n================> Total stage 2/3: Pseudo labeling all unlabeled images\"\n )\n\n dataset = SemiDataset(\n args.dataset, args.data_root, \"label\", args.crop_size, args.split_file_path\n )\n dataloader = DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n pin_memory=True,\n num_workers=4,\n drop_last=False,\n )\n\n label(best_model, dataloader, args)\n\n # <======================== Re-training on labeled and unlabeled images ========================>\n print(\n \"\\n\\n\\n================> Total stage 3/3: Re-training on labeled and unlabeled images\"\n )\n\n MODE = \"semi_train\"\n\n trainset = SemiDataset(\n args.dataset,\n args.data_root,\n MODE,\n args.crop_size,\n args.split_file_path,\n args.pseudo_mask_path,\n )\n trainloader = DataLoader(\n trainset,\n batch_size=args.batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=16,\n drop_last=True,\n )\n\n model, optimizer = init_basic_elems(args)\n\n best_model = train(model, trainloader, valloader, criterion, optimizer, args)\n\n # <====================== Test supervised model on testset (SupOnly) ======================>\n print(\"\\n\\n\\n================> Test supervised model on testset (Re-trained)\")\n\n test(best_model, testloader, args)\n\n return\n\n \"\"\"\n ST++ framework with selective re-training\n \"\"\"\n # <===================================== Select Reliable IDs =====================================>\n print(\n \"\\n\\n\\n================> Total stage 2/6: Select reliable images for the 1st stage re-training\"\n )\n\n dataset = SemiDataset(\n args.dataset, args.data_root, \"label\", args.crop_size, args.split_file_path\n )\n dataloader = DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n pin_memory=True,\n num_workers=4,\n drop_last=False,\n )\n\n select_reliable(checkpoints, dataloader, args)\n\n # <================================ Pseudo label reliable images =================================>\n print(\"\\n\\n\\n================> Total stage 3/6: Pseudo labeling reliable images\")\n\n cur_unlabeled_id_path = os.path.join(args.reliable_id_path, \"reliable_ids.yaml\")\n dataset = SemiDataset(\n args.dataset,\n args.data_root,\n \"label\",\n args.crop_size,\n cur_unlabeled_id_path,\n None,\n True,\n )\n dataloader = DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n pin_memory=True,\n num_workers=4,\n drop_last=False,\n )\n\n label(best_model, dataloader, args)\n\n # <================================== The 1st stage re-training ==================================>\n print(\n \"\\n\\n\\n================> Total stage 4/6: The 1st stage re-training on labeled and reliable unlabeled images\"\n )\n\n MODE = \"semi_train\"\n\n trainset = SemiDataset(\n args.dataset,\n args.data_root,\n MODE,\n args.crop_size,\n cur_unlabeled_id_path,\n args.pseudo_mask_path,\n True,\n )\n trainloader = DataLoader(\n trainset,\n batch_size=args.batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=16,\n drop_last=True,\n )\n\n model, optimizer = init_basic_elems(args)\n\n best_model = train(model, trainloader, valloader, criterion, optimizer, args)\n\n # <=============================== Pseudo label unreliable images ================================>\n print(\"\\n\\n\\n================> Total stage 5/6: Pseudo labeling unreliable images\")\n\n cur_unlabeled_id_path = os.path.join(args.reliable_id_path, \"reliable_ids.yaml\")\n dataset = SemiDataset(\n args.dataset,\n args.data_root,\n \"label\",\n args.crop_size,\n cur_unlabeled_id_path,\n None,\n False,\n )\n dataloader = DataLoader(\n dataset,\n batch_size=1,\n shuffle=False,\n pin_memory=True,\n num_workers=4,\n drop_last=False,\n )\n\n label(best_model, dataloader, args)\n\n # <================================== The 2nd stage re-training ==================================>\n print(\n \"\\n\\n\\n================> Total stage 6/6: The 2nd stage re-training on labeled and all unlabeled images\"\n )\n\n trainset = SemiDataset(\n args.dataset,\n args.data_root,\n MODE,\n args.crop_size,\n args.split_file_path,\n args.pseudo_mask_path,\n )\n trainloader = DataLoader(\n trainset,\n batch_size=args.batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=16,\n drop_last=True,\n )\n\n model, optimizer = init_basic_elems(args)\n\n best_model = train(model, trainloader, valloader, criterion, optimizer, args)\n\n # <====================== Test supervised model on testset (Re-trained) ======================>\n print(\"\\n\\n\\n================> Test supervised model on testset (Re-trained)\")\n\n test(best_model, testloader, args)\n\n wandb.finish()\n\n\ndef init_basic_elems(args):\n model_zoo = {\n \"deeplabv3plus\": DeepLabV3Plus,\n \"pspnet\": PSPNet,\n \"deeplabv2\": DeepLabV2,\n \"unet\": Unet,\n \"smallUnet\": SmallUnet,\n }\n model = model_zoo[args.model](\n args.backbone,\n 21\n if args.dataset == \"pascal\"\n else 2\n if args.dataset == \"melanoma\"\n else 2\n if args.dataset == \"breastCancer\"\n else 19,\n )\n\n head_lr_multiple = 10.0\n if args.model == \"deeplabv2\":\n assert args.backbone == \"resnet101\"\n model.load_state_dict(\n torch.load(\"pretrained/deeplabv2_resnet101_coco_pretrained.pth\")\n )\n head_lr_multiple = 1.0\n\n optimizer = SGD(\n [\n {\"params\": model.backbone.parameters(), \"lr\": args.lr},\n {\n \"params\": [\n param\n for name, param in model.named_parameters()\n if \"backbone\" not in name\n ],\n \"lr\": args.lr * head_lr_multiple,\n },\n ],\n lr=args.lr,\n momentum=0.9,\n weight_decay=1e-4,\n )\n\n model = DataParallel(model).cuda()\n\n return model, optimizer\n\n\ndef train(model, trainloader, valloader, criterion, optimizer, args):\n iters = 0\n total_iters = len(trainloader) * args.epochs\n global step_train\n global step_val\n\n previous_best = 0.0\n\n global MODE\n\n if MODE == \"train\":\n checkpoints = []\n\n for epoch in range(args.epochs):\n print(\n \"\\n==> Epoch %i, learning rate = %.4f\\t\\t\\t\\t\\t previous best = %.2f\"\n % (epoch, optimizer.param_groups[0][\"lr\"], previous_best)\n )\n\n model.train()\n total_loss = 0.0\n tbar = tqdm(trainloader)\n\n for i, (img, mask) in enumerate(tbar):\n if args.dataset == \"melanoma\" or args.dataset == \"breastCancer\":\n mask = mask.clip(max=1)\n\n img, mask = img.cuda(), mask.cuda()\n\n pred = model(img)\n loss = criterion(pred, mask)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n\n iters += 1\n lr = args.lr * (1 - iters / total_iters) ** 0.9\n optimizer.param_groups[0][\"lr\"] = lr\n optimizer.param_groups[1][\"lr\"] = (\n lr * 1.0 if args.model == \"deeplabv2\" else lr * 10.0\n )\n\n # wandb log with custom step\n if args.use_wandb:\n wandb.log({\"loss\": loss, \"step_train\": step_train, \"epoch\": epoch})\n step_train += 1\n tbar.set_description(\"Loss: %.3f\" % (total_loss / (i + 1)))\n\n metric = meanIOU(\n num_classes=21\n if args.dataset == \"pascal\"\n else 2\n if args.dataset == \"melanoma\"\n else 2\n if args.dataset == \"breastCancer\"\n else 19\n )\n\n model.eval()\n tbar = tqdm(valloader)\n # set i for sample images\n i = 0\n wandb_iamges = []\n torch.cuda.empty_cache()\n with torch.no_grad():\n for img, mask, _ in tbar:\n if args.dataset == \"melanoma\" or args.dataset == \"breastCancer\":\n mask = mask.clip(max=1)\n i = i + 1\n img = img.cuda()\n pred = model(img)\n pred = torch.argmax(pred, dim=1)\n\n metric.add_batch(pred.cpu().numpy(), mask.numpy())\n mIOU = metric.evaluate()[-1]\n if args.use_wandb:\n wandb.log({\"mIOU\": mIOU, \"step_val\": step_val})\n if i <= 10:\n # wandb.log({\"img\": [wandb.Image(img, caption=\"img\")]})\n # wandb.log({\"mask\": [wandb.Image(pred.cpu().numpy(), caption=\"mask\")]})\n class_lables = dict((el, \"something\") for el in list(range(21)))\n class_lables.update({255: \"boarder\"})\n class_lables.update({0: \"nothing\"})\n wandb_iamge = wandb.Image(\n cv2.resize(\n np.moveaxis(\n np.squeeze(img.cpu().numpy(), axis=0), 0, -1\n ),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n masks={\n \"predictions\": {\n \"mask_data\": cv2.resize(\n np.squeeze(pred.cpu().numpy(), axis=0),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n \"class_labels\": class_lables,\n },\n \"ground_truth\": {\n \"mask_data\": cv2.resize(\n np.squeeze(mask.numpy(), axis=0),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n \"class_labels\": class_lables,\n },\n },\n )\n wandb_iamges.append(wandb_iamge)\n step_val += 1\n\n tbar.set_description(\"mIOU: %.2f\" % (mIOU * 100.0))\n if args.use_wandb:\n wandb.log({\"Pictures\": wandb_iamges, \"step_epoch\": epoch})\n wandb.log({\"final mIOU\": mIOU})\n mIOU *= 100.0\n if mIOU > previous_best:\n if previous_best != 0:\n os.remove(\n os.path.join(\n args.save_path,\n \"%s_%s_%.2f.pth\" % (args.model, args.backbone, previous_best),\n )\n )\n previous_best = mIOU\n torch.save(\n model.module.state_dict(),\n os.path.join(\n args.save_path, \"%s_%s_%.2f.pth\" % (args.model, args.backbone, mIOU)\n ),\n )\n\n best_model = deepcopy(model)\n\n if MODE == \"train\" and (\n (epoch + 1) in [args.epochs // 3, args.epochs * 2 // 3, args.epochs]\n ):\n checkpoints.append(deepcopy(model))\n\n if MODE == \"train\":\n return best_model, checkpoints\n\n return best_model\n\n\ndef select_reliable(models, dataloader, args):\n if not os.path.exists(args.reliable_id_path):\n os.makedirs(args.reliable_id_path)\n\n for i in range(len(models)):\n models[i].eval()\n tbar = tqdm(dataloader)\n\n id_to_reliability = []\n\n with torch.no_grad():\n for img, mask, id in tbar:\n if args.dataset == \"melanoma\" or args.dataset == \"breastCancer\":\n mask = mask.clip(max=1)\n img = img.cuda()\n\n preds = []\n for model in models:\n preds.append(torch.argmax(model(img), dim=1).cpu().numpy())\n\n mIOU = []\n for i in range(len(preds) - 1):\n metric = meanIOU(\n num_classes=21\n if args.dataset == \"pascal\"\n else 2\n if args.dataset == \"melanoma\"\n else 2\n if args.dataset == \"breastCancer\"\n else 19\n )\n metric.add_batch(preds[i], preds[-1])\n mIOU.append(metric.evaluate()[-1])\n\n reliability = sum(mIOU) / len(mIOU)\n id_to_reliability.append((id[0], reliability))\n\n labeled_ids = []\n with open(args.split_file_path, \"r\") as file:\n split_dict = yaml.load(file, Loader=yaml.FullLoader)\n labeled_ids = split_dict[args.val_split][\"labeled\"]\n\n yaml_dict = dict()\n yaml_dict[args.val_split] = dict(\n labeled=labeled_ids,\n reliable=[i[0] for i in id_to_reliability[: len(id_to_reliability) // 2]],\n unreliable=[i[0] for i in id_to_reliability[len(id_to_reliability) // 2:]],\n )\n # save to yaml\n with open(\n os.path.join(args.reliable_id_path, \"reliable_ids.yaml\"), \"w+\"\n ) as outfile:\n yaml.dump(yaml_dict, outfile, default_flow_style=False)\n\n\ndef label(model, dataloader, args):\n model.eval()\n tbar = tqdm(dataloader)\n\n metric = meanIOU(\n num_classes=21\n if args.dataset == \"pascal\"\n else 2\n if args.dataset == \"melanoma\"\n else 2\n if args.dataset == \"breastCancer\"\n else 19\n )\n cmap = color_map(args.dataset)\n\n with torch.no_grad():\n for img, mask, id in tbar:\n if args.dataset == \"melanoma\" or args.dataset == \"breastCancer\":\n mask = mask.clip(max=1) # clips max value to 1: 255 to 1\n img = img.cuda()\n pred = model(img, args.use_tta)\n pred = torch.argmax(pred, dim=1).cpu()\n\n metric.add_batch(pred.numpy(), mask.numpy())\n mIOU = metric.evaluate()[-1]\n\n pred = Image.fromarray(pred.squeeze(0).numpy().astype(np.uint8), mode=\"P\")\n pred.putpalette(cmap)\n\n pred.save(\n \"%s/%s\" % (args.pseudo_mask_path, os.path.basename(id[0].split(\" \")[1]))\n )\n\n tbar.set_description(\"mIOU: %.2f\" % (mIOU * 100.0))\n\n\ndef test(model, dataloader, args):\n metric = mulitmetrics(\n num_classes=21\n if args.dataset == \"pascal\"\n else 2\n if args.dataset == \"melanoma\"\n else 2\n if args.dataset == \"breastCancer\"\n else 19\n )\n model.eval()\n tbar = tqdm(dataloader)\n # set i for sample images\n i = 0\n wandb_iamges = []\n torch.cuda.empty_cache()\n with torch.no_grad():\n for img, mask, _ in tbar:\n if args.dataset == \"melanoma\" or args.dataset == \"breastCancer\":\n mask = mask.clip(max=1) # clips max value to 1: 255 to 1\n i = i + 1\n img = img.cuda()\n pred = model(img, args.use_tta)\n pred = torch.argmax(pred, dim=1)\n\n metric.add_batch(pred.cpu().numpy(), mask.numpy())\n overall_acc, mIOU, mDICE = metric.evaluate()\n tbar.set_description(\n \"test mIOU: %.2f, mDICE: %.2f,overall_acc: %.2f\"\n % (mIOU * 100.0, mDICE * 100.0, overall_acc * 100.0)\n )\n if args.use_wandb:\n if i <= 10:\n # wandb.log({\"img\": [wandb.Image(img, caption=\"img\")]})\n # wandb.log({\"mask\": [wandb.Image(pred.cpu().numpy(), caption=\"mask\")]})\n class_lables = dict((el, \"something\") for el in list(range(21)))\n class_lables.update({255: \"boarder\"})\n class_lables.update({0: \"nothing\"})\n wandb_iamge = wandb.Image(\n cv2.resize(\n np.moveaxis(np.squeeze(img.cpu().numpy(), axis=0), 0, -1),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n masks={\n \"predictions\": {\n \"mask_data\": cv2.resize(\n np.squeeze(pred.cpu().numpy(), axis=0),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n \"class_labels\": class_lables,\n },\n \"ground_truth\": {\n \"mask_data\": cv2.resize(\n np.squeeze(mask.numpy(), axis=0),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n \"class_labels\": class_lables,\n },\n },\n )\n wandb_iamges.append(wandb_iamge)\n if args.use_wandb:\n wandb.log({\"Test Pictures\": wandb_iamges})\n wandb.log(\n {\n \"test mIOU\": mIOU,\n \"test mDICE\": mDICE,\n \"test overall_acc\": overall_acc,\n }\n )\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n if args.lr is None:\n args.lr = 0.001\n if args.epochs is None:\n args.epochs = {\n \"pascal\": 80,\n \"cityscapes\": 240,\n \"melanoma\": 80,\n \"breastCancer\": 80,\n }[args.dataset]\n # if args.lr is None:\n # args.lr = {'pascal': 0.001, 'cityscapes': 0.004, 'melanoma': 0.001, 'breastCancer': 0.001}[args.dataset] / 16 * args.batch_size\n if args.crop_size is None:\n args.crop_size = {\n \"pascal\": 321,\n \"cityscapes\": 721,\n \"melanoma\": 256,\n \"breastCancer\": 256,\n }[args.dataset]\n\n if args.split_file_path is None:\n args.split_file_path = f\"dataset/splits/{args.dataset}/{args.split}/split_{args.shuffle}/split.yaml\"\n if args.test_file_path is None:\n args.test_file_path = f\"dataset/splits/{args.dataset}/test.yaml\"\n if args.pseudo_mask_path is None:\n args.pseudo_mask_path = (\n f\"outdir/pseudo_masks/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n if args.save_path is None:\n args.save_path = (\n f\"outdir/models/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n if args.reliable_id_path is None:\n args.reliable_id_path = (\n f\"outdir/reliable_ids/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n print()\n print(args)\n\n main(args)\n", "id": "8069036", "language": "Python", "matching_score": 8.909623146057129, "max_stars_count": 0, "path": "main.py" }, { "content": "from dataset.semi import SemiDataset\nfrom model.semseg.deeplabv2 import DeepLabV2\nfrom model.semseg.deeplabv3plus import DeepLabV3Plus\nfrom model.semseg.pspnet import PSPNet\nfrom model.semseg.unet import Unet\nfrom utils import mulitmetrics\n\nimport argparse\nimport numpy as np\nimport os\nimport torch\nfrom torch.nn import DataParallel\nfrom torch.optim import SGD\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nimport wandb\nimport cv2\n\nMODE = None\nglobal step_train\nglobal step_val\nstep_train = 0\nstep_val = 0\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"ST and ST++ Framework\")\n\n # basic settings\n parser.add_argument(\n \"--data-root\",\n type=str,\n default=\"/lsdf/kit/iai/projects/iai-aida/Daten_Keppler/ISIC_Demo_2017\",\n )\n parser.add_argument(\n \"--dataset\",\n type=str,\n choices=[\"pascal\", \"cityscapes\", \"melanoma\"],\n default=\"melanoma\",\n )\n parser.add_argument(\"--batch-size\", type=int, default=16)\n parser.add_argument(\"--lr\", type=float, default=None)\n parser.add_argument(\"--epochs\", type=int, default=80)\n parser.add_argument(\"--crop-size\", type=int, default=None)\n parser.add_argument(\n \"--backbone\",\n type=str,\n choices=[\"resnet18\", \"resnet50\", \"resnet101\"],\n default=\"resnet50\",\n )\n parser.add_argument(\n \"--model\",\n type=str,\n choices=[\"deeplabv3plus\", \"pspnet\", \"deeplabv2\", \"unet\"],\n default=\"unet\",\n )\n parser.add_argument(\n \"--val-split\", type=str, default=\"val_split_0\"\n ) # need to implement in Dataloader, crrently not working\n\n # semi-supervised settings\n parser.add_argument(\"--split\", type=str, default=\"1_30\")\n parser.add_argument(\"--shuffle\", type=int, default=0)\n # these are derived from the above split, shuffle and dataset. They dont need to be set\n parser.add_argument(\n \"--split-file-path\", type=str, default=None\n ) # \"dataset/splits/melanoma/1_30/split_0/split_sample.yaml\")\n parser.add_argument(\n \"--test-file-path\", type=str, default=None\n ) # \"dataset/splits/melanoma/test_sample.yaml\")\n parser.add_argument(\"--pseudo-mask-path\", type=str, default=None)\n parser.add_argument(\"--save-path\", type=str, default=None)\n parser.add_argument(\"--reliable-id-path\", type=str, default=None)\n\n parser.add_argument(\n \"--plus\",\n dest=\"plus\",\n default=True,\n action=\"store_true\",\n help=\"whether to use ST++\",\n )\n parser.add_argument(\n \"--use-wandb\", default=True, help=\"whether to use WandB for logging\"\n )\n\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n if args.use_wandb:\n wandb.init(project=\"ST++\", entity=\"gkeppler\")\n wandb.run.name = (\n args.dataset\n + \" \"\n + args.split_file_path.split(\"/\")[-3]\n + (\" ST++\" if args.plus else \" ST\")\n )\n wandb.define_metric(\"step_train\")\n wandb.define_metric(\"step_val\")\n wandb.define_metric(\"step_epoch\")\n wandb.define_metric(\"Pictures\", step_metric=\"step_epoch\")\n wandb.define_metric(\"loss\", step_metric=\"step_train\")\n wandb.define_metric(\"mIOU\", step_metric=\"step_val\")\n\n wandb.config.update(args)\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n if not os.path.exists(args.pseudo_mask_path):\n os.makedirs(args.pseudo_mask_path)\n if args.plus and args.reliable_id_path is None:\n exit(\"Please specify reliable-id-path in ST++.\")\n\n model, optimizer = init_basic_elems(args)\n\n # best_model, checkpoints = train(model, trainloader, valloader, criterion, optimizer, args)\n model.module.load_state_dict(\n torch.load(\n r\"outdir/models/melanoma/1_30/split_0/unet_resnet50_87.46.pth\",\n map_location=\"cuda:0\",\n )\n )\n best_model = model\n # <====================== Test supervised model on testset (SupOnly) ======================>\n print(\"\\n\\n\\n================> Test supervised model on testset (SupOnly)\")\n testset = SemiDataset(\n args.dataset, args.data_root, \"test\", args.crop_size, args.test_file_path\n )\n testloader = DataLoader(\n testset, 1, shuffle=False, pin_memory=True, num_workers=2, drop_last=False\n )\n\n test(best_model, testloader, args)\n\n\ndef init_basic_elems(args):\n model_zoo = {\n \"deeplabv3plus\": DeepLabV3Plus,\n \"pspnet\": PSPNet,\n \"deeplabv2\": DeepLabV2,\n \"unet\": Unet,\n }\n model = model_zoo[args.model](\n args.backbone,\n 21 if args.dataset == \"pascal\" else 2 if args.dataset == \"melanoma\" else 19,\n )\n\n head_lr_multiple = 10.0\n if args.model == \"deeplabv2\":\n assert args.backbone == \"resnet101\"\n model.load_state_dict(\n torch.load(\"pretrained/deeplabv2_resnet101_coco_pretrained.pth\")\n )\n head_lr_multiple = 1.0\n\n optimizer = SGD(\n [\n {\"params\": model.backbone.parameters(), \"lr\": args.lr},\n {\n \"params\": [\n param\n for name, param in model.named_parameters()\n if \"backbone\" not in name\n ],\n \"lr\": args.lr * head_lr_multiple,\n },\n ],\n lr=args.lr,\n momentum=0.9,\n weight_decay=1e-4,\n )\n\n model = DataParallel(model).cuda()\n\n return model, optimizer\n\n\ndef test(model, dataloader, args):\n metric = mulitmetrics(\n num_classes=21\n if args.dataset == \"pascal\"\n else 2\n if args.dataset == \"melanoma\"\n else 19\n )\n model.eval()\n tbar = tqdm(dataloader)\n # set i for sample images\n i = 0\n wandb_iamges = []\n torch.cuda.empty_cache()\n with torch.no_grad():\n for img, mask, _ in tbar:\n if args.dataset == \"melanoma\":\n mask = mask.clip(max=1) # clips max value to 1: 255 to 1\n i = i + 1\n img = img.cuda()\n pred = model(img)\n pred = torch.argmax(pred, dim=1)\n\n metric.add_batch(pred.cpu().numpy(), mask.numpy())\n overall_acc, mIOU, mDICE = metric.evaluate()\n tbar.set_description(\n \"test mIOU: %.2f, mDICE: %.2f,overall_acc: %.2f\"\n % (mIOU * 100.0, mDICE * 100.0, overall_acc * 100.0)\n )\n if args.use_wandb:\n if i <= 10:\n # wandb.log({\"img\": [wandb.Image(img, caption=\"img\")]})\n # wandb.log({\"mask\": [wandb.Image(pred.cpu().numpy(), caption=\"mask\")]})\n class_lables = dict((el, \"something\") for el in list(range(21)))\n class_lables.update({255: \"boarder\"})\n class_lables.update({0: \"nothing\"})\n wandb_iamge = wandb.Image(\n cv2.resize(\n np.moveaxis(np.squeeze(img.cpu().numpy(), axis=0), 0, -1),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n masks={\n \"predictions\": {\n \"mask_data\": cv2.resize(\n np.squeeze(pred.cpu().numpy(), axis=0),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n \"class_labels\": class_lables,\n },\n \"ground_truth\": {\n \"mask_data\": cv2.resize(\n np.squeeze(mask.numpy(), axis=0),\n dsize=(100, 100),\n interpolation=cv2.INTER_NEAREST,\n ),\n \"class_labels\": class_lables,\n },\n },\n )\n wandb_iamges.append(wandb_iamge)\n if args.use_wandb:\n wandb.log({\"Test Pictures\": wandb_iamges})\n wandb.log(\n {\n \"test mIOU\": mIOU,\n \"test mDICE\": mDICE,\n \"test overall_acc\": overall_acc,\n }\n )\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n if args.lr is None:\n args.lr = 0.001\n if args.epochs is None:\n args.epochs = {\"pascal\": 80, \"cityscapes\": 240, \"melanoma\": 80}[args.dataset]\n if args.lr is None:\n args.lr = (\n {\"pascal\": 0.001, \"cityscapes\": 0.004, \"melanoma\": 0.001}[args.dataset]\n / 16\n * args.batch_size\n )\n if args.crop_size is None:\n args.crop_size = {\"pascal\": 321, \"cityscapes\": 721, \"melanoma\": 256}[\n args.dataset\n ]\n\n if args.split_file_path is None:\n args.split_file_path = f\"dataset/splits/{args.dataset}/{args.split}/split_{args.shuffle}/split.yaml\"\n if args.test_file_path is None:\n args.test_file_path = f\"dataset/splits/{args.dataset}/test.yaml\"\n if args.pseudo_mask_path is None:\n args.pseudo_mask_path = (\n f\"outdir/pseudo_masks/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n if args.save_path is None:\n args.save_path = (\n f\"outdir/models/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n if args.reliable_id_path is None:\n args.reliable_id_path = (\n f\"outdir/reliable_ids/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n print()\n print(args)\n\n main(args)\n", "id": "6876913", "language": "Python", "matching_score": 2.232053756713867, "max_stars_count": 0, "path": "inference.py" }, { "content": "import numpy as np\n\nEPS = 1e-10\n\n\ndef count_params(model):\n param_num = sum(p.numel() for p in model.parameters())\n return param_num / 1e6\n\n\nclass meanIOU:\n def __init__(self, num_classes):\n self.num_classes = num_classes\n self.hist = np.zeros((num_classes, num_classes))\n\n def _fast_hist(self, label_pred, label_true):\n mask = (label_true >= 0) & (label_true < self.num_classes)\n hist = np.bincount(\n self.num_classes * label_true[mask].astype(int) + label_pred[mask],\n minlength=self.num_classes**2,\n ).reshape(self.num_classes, self.num_classes)\n return hist\n\n def add_batch(self, predictions, gts):\n for lp, lt in zip(predictions, gts):\n self.hist += self._fast_hist(lp.flatten(), lt.flatten())\n\n def evaluate(self):\n iu = np.diag(self.hist) / (\n self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist) + EPS\n )\n return iu, np.nanmean(iu)\n\n\nclass mulitmetrics:\n # from https://github.com/kevinzakka/pytorch-goodies/blob/c039691f349be9f21527bb38b907a940bfc5e8f3/metrics.py\n def __init__(self, num_classes):\n self.num_classes = num_classes\n self.hist = np.zeros((num_classes, num_classes))\n\n def _fast_hist(self, label_pred, label_true):\n mask = (label_true >= 0) & (label_true < self.num_classes)\n hist = np.bincount(\n self.num_classes * label_true[mask].astype(int) + label_pred[mask],\n minlength=self.num_classes**2,\n ).reshape(self.num_classes, self.num_classes)\n return hist\n\n def add_batch(self, predictions, gts):\n for lp, lt in zip(predictions, gts):\n self.hist += self._fast_hist(lp.flatten(), lt.flatten())\n\n def evaluate(self):\n A_inter_B = np.diag(self.hist)\n A = self.hist.sum(axis=1)\n B = self.hist.sum(axis=0)\n # jaccard_index\n iu = A_inter_B / (A + B - A_inter_B + EPS)\n meanIOU = np.nanmean(iu)\n\n # dice_coefficient\n dice = (2 * A_inter_B) / (A + B + EPS)\n avg_dice = np.nanmean(dice)\n\n # overall_pixel_accuracy\n correct = A_inter_B.sum()\n total = self.hist.sum()\n overall_acc = correct / (total + EPS)\n\n return overall_acc, meanIOU, avg_dice\n\n\ndef color_map(dataset=\"pascal\"):\n cmap = np.zeros((256, 3), dtype=\"uint8\")\n\n if dataset == \"pascal\" or dataset == \"coco\":\n\n def bitget(byteval, idx):\n return (byteval & (1 << idx)) != 0\n\n for i in range(256):\n r = g = b = 0\n c = i\n for j in range(8):\n r = r | (bitget(c, 0) << 7 - j)\n g = g | (bitget(c, 1) << 7 - j)\n b = b | (bitget(c, 2) << 7 - j)\n c = c >> 3\n\n cmap[i] = np.array([r, g, b])\n\n elif dataset == \"cityscapes\":\n cmap[0] = np.array([128, 64, 128])\n cmap[1] = np.array([244, 35, 232])\n cmap[2] = np.array([70, 70, 70])\n cmap[3] = np.array([102, 102, 156])\n cmap[4] = np.array([190, 153, 153])\n cmap[5] = np.array([153, 153, 153])\n cmap[6] = np.array([250, 170, 30])\n cmap[7] = np.array([220, 220, 0])\n cmap[8] = np.array([107, 142, 35])\n cmap[9] = np.array([152, 251, 152])\n cmap[10] = np.array([70, 130, 180])\n cmap[11] = np.array([220, 20, 60])\n cmap[12] = np.array([255, 0, 0])\n cmap[13] = np.array([0, 0, 142])\n cmap[14] = np.array([0, 0, 70])\n cmap[15] = np.array([0, 60, 100])\n cmap[16] = np.array([0, 80, 100])\n cmap[17] = np.array([0, 0, 230])\n cmap[18] = np.array([119, 11, 32])\n\n elif dataset == \"melanoma\":\n cmap[1] = np.array([255, 255, 255])\n\n elif dataset == \"breastCancer\":\n cmap[1] = np.array([255, 255, 255])\n # cmap[1] = np.array([128, 64, 128]) #benign\n # cmap[2] = np.array([244, 35, 232]) #malign\n\n return cmap\n", "id": "9823484", "language": "Python", "matching_score": 0.9749563336372375, "max_stars_count": 0, "path": "utils.py" }, { "content": "from model.semseg.deeplabv2 import DeepLabV2\nfrom model.semseg.deeplabv3plus import DeepLabV3Plus\nfrom model.semseg.pspnet import PSPNet\nfrom model.semseg.base import BaseNet\nfrom model.semseg.unet import Unet\nfrom model.semseg.small_unet import SmallUnet\nfrom utils import count_params\n\nimport argparse\nimport os\nimport pytorch_lightning as pl\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import WandbLogger\nimport wandb\n\nfrom dataset.isic_dermo_data_module import IsicDermoDataModule\n\nMODE = None\nglobal step_train\nglobal step_val\nstep_train = 0\nstep_val = 0\nuse_wandb = False\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"ST and ST++ Framework\")\n\n # basic settings\n parser.add_argument(\n \"--data-root\",\n type=str,\n default=\"/lsdf/kit/iai/projects/iai-aida/Daten_Keppler/BreastCancer\",\n )\n parser.add_argument(\n \"--dataset\",\n type=str,\n choices=[\"pascal\", \"cityscapes\", \"melanoma\", \"pneumothorax\", \"breastCancer\"],\n default=\"breastCancer\",\n )\n parser.add_argument(\"--batch-size\", type=int, default=16)\n parser.add_argument(\"--lr\", type=float, default=None)\n parser.add_argument(\"--epochs\", type=int, default=5)\n parser.add_argument(\"--crop-size\", type=int, default=None)\n parser.add_argument(\n \"--backbone\",\n type=str,\n choices=[\"resnet18\", \"resnet50\", \"resnet101\"],\n default=\"resnet50\",\n )\n parser.add_argument(\n \"--model\",\n type=str,\n choices=[\"deeplabv3plus\", \"pspnet\", \"deeplabv2\", \"unet\", \"smallUnet\"],\n default=\"smallUnet\",\n )\n parser.add_argument(\n \"--val-split\", type=str, default=\"val_split_0\"\n ) # need to implement in Dataloader, crrently not working\n\n # semi-supervised settings\n parser.add_argument(\"--split\", type=str, default=\"1_30\")\n parser.add_argument(\"--shuffle\", type=int, default=0)\n # these are derived from the above split, shuffle and dataset. They dont need to be set\n parser.add_argument(\n \"--split-file-path\", type=str, default=None\n ) # \"dataset/splits/melanoma/1_30/split_0/split_sample.yaml\")\n parser.add_argument(\n \"--test-file-path\", type=str, default=None\n ) # \"dataset/splits/melanoma/test_sample.yaml\")\n parser.add_argument(\"--pseudo-mask-path\", type=str, default=None)\n parser.add_argument(\"--save-path\", type=str, default=None)\n parser.add_argument(\"--reliable-id-path\", type=str, default=None)\n parser.add_argument(\n \"--plus\",\n dest=\"plus\",\n default=False,\n action=\"store_true\",\n help=\"whether to use ST++\",\n )\n parser.add_argument(\n \"--use-wandb\", default=False, help=\"whether to use WandB for logging\"\n )\n parser.add_argument(\n \"--use-tta\", default=True, help=\"whether to use Test Time Augmentation\"\n )\n\n args = parser.parse_args()\n\n # autoparse? bzw use ******LightningCLI*********\n\n # add model specific args\n parser = BaseNet.add_model_specific_args(parser)\n\n # add all the available trainer options to argparse\n # ie: now --gpus --num_nodes ... --fast_dev_run all work in the cli\n parser = pl.Trainer.add_argparse_args(parser)\n\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n if use_wandb:\n wandb.init(project=\"ST++\", entity=\"gkeppler\")\n wandb_logger = WandbLogger(project=\"ST++\")\n wandb.define_metric(\"step_train\")\n wandb.define_metric(\"step_val\")\n wandb.define_metric(\"step_epoch\")\n wandb.define_metric(\"Pictures\", step_metric=\"step_epoch\")\n wandb.define_metric(\"loss\", step_metric=\"step_train\")\n wandb.define_metric(\"mIOU\", step_metric=\"step_val\")\n wandb.config.update(args)\n\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n if not os.path.exists(args.pseudo_mask_path):\n os.makedirs(args.pseudo_mask_path)\n if args.plus and args.reliable_id_path is None:\n exit(\"Please specify reliable-id-path in ST++.\")\n\n dataModule = IsicDermoDataModule(\n root_dir=args.data_root,\n batch_size=args.batch_size,\n train_yaml_path=args.split_file_path,\n test_yaml_path=args.test_file_path,\n pseudo_mask_path=args.pseudo_mask_path,\n )\n num_classes = {\"pascal\": 21, \"cityscapes\": 19, \"melanoma\": 2, \"breastCancer\": 3}[\n args.dataset\n ]\n model_zoo = {\n \"deeplabv3plus\": DeepLabV3Plus,\n \"pspnet\": PSPNet,\n \"deeplabv2\": DeepLabV2,\n \"unet\": Unet,\n \"smallUnet\": SmallUnet,\n }\n model = model_zoo[args.model](backbone=args.backbone, nclass=num_classes, args=args)\n\n # saves a file like: my/path/sample-epoch=02-val_loss=0.32.ckpt\n checkpoint_callback = ModelCheckpoint(\n dirpath=os.path.join(\"./\", f\"{args.save_path}\"),\n filename=f\"{args.model}\" + \"-{epoch:02d}-{val_acc:.2f}\",\n mode=\"max\",\n save_weights_only=True,\n )\n\n dev_run = False # not working when predicting with best_model checkpoint\n Trainer = pl.Trainer.from_argparse_args(\n args,\n fast_dev_run=dev_run,\n max_epochs=args.epochs,\n log_every_n_steps=2,\n logger=wandb_logger if args.use_wandb else None,\n callbacks=[checkpoint_callback],\n # gpus=[0],\n accelerator=\"cpu\",\n )\n # <====================== Supervised training with labeled images (SupOnly) ======================>\n print(\n \"\\n================> Total stage 1/%i: \"\n \"Supervised training on labeled images (SupOnly)\" % (6 if args.plus else 3)\n )\n\n Trainer.fit(model=model, datamodule=dataModule)\n\n if not args.plus:\n print(\"\\nParams: %.1fM\" % count_params(model))\n\n \"\"\"\n ST framework without selective re-training\n \"\"\"\n # <============================= Pseudolabel all unlabeled images =============================>\n print(\n \"\\n\\n\\n================> Total stage 2/3: Pseudo labeling all unlabeled images\"\n )\n\n Trainer.predict(\n datamodule=dataModule, ckpt_path=checkpoint_callback.best_model_path\n )\n\n # <======================== Re-training on labeled and unlabeled images ========================>\n print(\n \"\\n\\n\\n================> Total stage 3/3: Re-training on labeled and unlabeled images\"\n )\n\n model = model_zoo[args.model](\n backbone=args.backbone, nclass=num_classes, args=args\n )\n # increase max epochs to double the amount\n Trainer.fit_loop.max_epochs *= 2\n dataModule.mode = \"semi_train\"\n Trainer.fit(\n model=model, datamodule=dataModule\n )\n return\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n if args.lr is None:\n args.lr = 0.001\n if args.epochs is None:\n args.epochs = {\"pascal\": 80, \"cityscapes\": 240, \"melanoma\": 80}[args.dataset]\n # if args.lr is None:\n # args.lr = {'pascal': 0.001, 'cityscapes': 0.004, 'melanoma': 0.001}[args.dataset] / 16 * args.batch_size\n if args.crop_size is None:\n args.crop_size = {\n \"pascal\": 321,\n \"cityscapes\": 721,\n \"melanoma\": 256,\n \"breastCancer\": 256,\n }[args.dataset]\n if args.split_file_path is None:\n args.split_file_path = f\"dataset/splits/{args.dataset}/{args.split}/split_{args.shuffle}/split.yaml\"\n if args.test_file_path is None:\n args.test_file_path = f\"dataset/splits/{args.dataset}/test.yaml\"\n if args.pseudo_mask_path is None:\n args.pseudo_mask_path = (\n f\"outdir/pseudo_masks/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n if args.save_path is None:\n args.save_path = (\n f\"outdir/models/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n if args.reliable_id_path is None:\n args.reliable_id_path = (\n f\"outdir/reliable_ids/{args.dataset}/{args.split}/split_{args.shuffle}\"\n )\n print()\n print(args)\n\n main(args)\n", "id": "1946155", "language": "Python", "matching_score": 6.0353312492370605, "max_stars_count": 0, "path": "main_lightning.py" }, { "content": "import pytorch_lightning as pl\nfrom dataset.isic_dermo_data_module import IsicDermoDataModule\nfrom model.semseg.deeplabv3plus import DeepLabV3Plus\n\ndataset = \"melanoma\"\ndata_root = r\"/lsdf/kit/iai/projects/iai-aida/Daten_Keppler/ISIC_Demo_2017\"\nbatch_size = 2\ncrop_size = 100\n\ndataModule = IsicDermoDataModule(\n root_dir=data_root,\n batch_size=batch_size,\n train_yaml_path=\"dataset/splits/melanoma/1_8/split_0/split.yaml\",\n test_yaml_path=\"dataset/splits/melanoma/test.yaml\",\n)\n\ntest = dataModule.train_dataset.__getitem__(2)\ntest2 = dataModule.test_dataset.__getitem__(2)\ntest4 = dataModule.val_dataset.__getitem__(2)\n\nTrainer = pl.Trainer(fast_dev_run=True, accelerator=\"cpu\")\nTrainer.fit(model=DeepLabV3Plus(backbone=\"resnet50\", nclass=2), datamodule=dataModule)\n", "id": "11895123", "language": "Python", "matching_score": 2.7826876640319824, "max_stars_count": 0, "path": "dataloader_test.py" }, { "content": "import yaml\nimport pytorch_lightning as pl\nfrom torch.utils.data import DataLoader\nfrom dataset.isic_dermo_dataset import IsicDermoDataset\nimport logging\n\n\nclass IsicDermoDataModule(pl.LightningDataModule):\n def __init__(\n self,\n root_dir: str,\n batch_size: int,\n train_yaml_path: str,\n test_yaml_path: str,\n pseudo_mask_path: str,\n num_workers=16,\n pin_memory=False,\n ):\n super().__init__()\n self.num_workers = num_workers\n self.pin_memory = pin_memory\n logging.info(f\"Using {self.num_workers} workers for data loading\")\n self.root_dir = root_dir\n self.batch_size = batch_size\n self.train_yaml_path = train_yaml_path\n self.test_yaml_path = test_yaml_path\n self.pseudo_mask_path = pseudo_mask_path\n # transformations not used currently\n # self.train_transforms = train_transforms\n # self.train_transforms_unlabeled = (\n # train_transforms_unlabeled\n # if train_transforms_unlabeled is not None\n # else train_transforms\n # )\n # self.val_transforms = val_transforms\n # self.test_transforms = test_transforms\n\n self.sup_train_dataset: IsicDermoDataset = None\n self.semi_train_dataset: IsicDermoDataset = None\n self.val_dataset: IsicDermoDataset = None\n self.predict_dataset: IsicDermoDataset = None\n self.test_dataset: IsicDermoDataset = None\n self.mode = \"train\"\n self.__init_datasets()\n\n def __init_datasets(self):\n with open(self.train_yaml_path, \"r\") as file:\n split_dict = yaml.load(file, Loader=yaml.FullLoader)\n val_split_0 = split_dict[\"val_split_0\"]\n\n self.sup_train_dataset = IsicDermoDataset(\n root_dir=self.root_dir, labeled_id_list=val_split_0[\"labeled\"], mode=\"train\"\n )\n\n self.semi_train_dataset = IsicDermoDataset(\n root_dir=self.root_dir,\n labeled_id_list=val_split_0[\"labeled\"],\n unlabeled_id_list=val_split_0[\"unlabeled\"],\n pseudo_mask_path=self.pseudo_mask_path,\n mode=\"semi_train\",\n )\n\n self.val_dataset = IsicDermoDataset(\n root_dir=self.root_dir, labeled_id_list=val_split_0[\"val\"], mode=\"val\"\n )\n\n self.predict_dataset = IsicDermoDataset(\n root_dir=self.root_dir,\n unlabeled_id_list=val_split_0[\"unlabeled\"],\n mode=\"label\",\n )\n\n with open(self.test_yaml_path, \"r\") as file:\n test_dict = yaml.load(file, Loader=yaml.FullLoader)\n\n self.test_dataset = IsicDermoDataset(\n root_dir=self.root_dir,\n # transforms=self.test_transforms,\n labeled_id_list=test_dict,\n mode=\"test\",\n )\n\n def train_dataloader(self):\n return DataLoader(\n self.sup_train_dataset\n if self.mode == \"train\"\n else self.semi_train_dataset\n if self.mode == \"semi_train\"\n else None,\n batch_size=self.batch_size,\n # collate_fn=custom_collate,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n # worker_init_fn=seed_worker,\n # shuffle=self.shuffle,\n # drop_last=self.drop_last\n )\n\n def predict_dataloader(self):\n return DataLoader(\n self.predict_dataset,\n batch_size=1,\n # shuffle=False,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n # worker_init_fn=seed_worker\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val_dataset,\n batch_size=1,\n # shuffle=False,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n # worker_init_fn=seed_worker\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.test_dataset,\n batch_size=self.batch_size,\n # shuffle=False,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n # worker_init_fn=seed_worker\n )\n", "id": "1962360", "language": "Python", "matching_score": 3.5121915340423584, "max_stars_count": 0, "path": "dataset/isic_dermo_data_module.py" }, { "content": "import os\nfrom dataset.transform import crop, hflip, normalize, resize, blur, cutout\nfrom torchvision import transforms\nimport random\nfrom PIL import Image\nimport math\n\n\nclass IsicDermoDataset:\n \"\"\"\n :param root_dir: root path of the dataset.\n :param id_path: path of labeled or unlabeled image ids\n :param pseudo_mask_path: path of generated pseudo masks, needed in semi_train mode.\n \"\"\"\n\n def __init__(\n self,\n root_dir: str,\n mode: str,\n base_size=256,\n crop_size=256,\n labeled_id_list=None,\n unlabeled_id_list=None,\n pseudo_mask_path=None,\n ):\n self.root_dir = root_dir\n self.mode = mode\n self.base_size = base_size\n self.size = crop_size\n self.unlabeled_id_list = unlabeled_id_list\n self.labeled_id_list = labeled_id_list\n self.pseudo_mask_path = pseudo_mask_path\n\n if mode == \"semi_train\":\n self.ids = (\n self.labeled_id_list\n * math.ceil(len(self.unlabeled_id_list) / len(self.labeled_id_list))\n + self.unlabeled_id_list\n )\n elif mode == \"val\" or mode == \"train\":\n self.ids = labeled_id_list\n elif mode == \"label\":\n self.ids = unlabeled_id_list\n\n def __len__(self):\n return len(self.ids)\n\n def __getitem__(self, idx):\n id = self.ids[idx]\n img_path = os.path.join(self.root_dir, id.split(\" \")[0])\n img = Image.open(img_path)\n\n if self.mode == \"val\" or self.mode == \"label\" or self.mode == \"test\":\n mask = Image.open(os.path.join(self.root_dir, id.split(\" \")[1]))\n img, mask = resize(img, mask, self.base_size, (0.5, 2.0))\n img, mask = normalize(img, mask)\n return img, mask, id\n\n if self.mode == \"train\" or (\n self.mode == \"semi_train\" and id in self.labeled_id_list\n ):\n mask = Image.open(os.path.join(self.root_dir, id.split(\" \")[1]))\n else:\n # mode == 'semi_train' and the id corresponds to unlabeled image\n fname = os.path.basename(id.split(\" \")[1])\n mask = Image.open(os.path.join(self.pseudo_mask_path, fname))\n\n # basic augmentation on all training images\n img, mask = resize(img, mask, self.base_size, (0.5, 2.0))\n img, mask = crop(img, mask, self.size)\n img, mask = hflip(img, mask, p=0.5)\n\n # strong augmentation on unlabeled images\n if self.mode == \"semi_train\" and id in self.unlabeled_id_list:\n if random.random() < 0.8:\n img = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img)\n img = transforms.RandomGrayscale(p=0.2)(img)\n img = blur(img, p=0.5)\n img, mask = cutout(img, mask, p=0.5)\n\n img, mask = normalize(img, mask)\n\n return (\n img,\n mask,\n )\n", "id": "4628262", "language": "Python", "matching_score": 2.927797317504883, "max_stars_count": 0, "path": "dataset/isic_dermo_dataset.py" }, { "content": "from dataset.transform import (\n crop,\n hflip,\n normalize,\n resize,\n blur,\n cutout,\n resize_crop,\n # to_polar,\n # to_cart,\n)\n\nimport math\nimport os\nfrom PIL import Image\nimport random\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\nimport yaml\n\n\nclass SemiDataset(Dataset):\n def __init__(\n self,\n name,\n root,\n mode,\n size,\n split_file_path=None,\n pseudo_mask_path=None,\n reliable=None,\n val_split=\"val_split_0\",\n ):\n \"\"\"\n :param name: dataset name, pascal, melanoma or cityscapes\n :param root: root path of the dataset.\n :param mode: train: supervised learning only with labeled images, no unlabeled images are leveraged.\n label: pseudo labeling the remaining unlabeled images.\n semi_train: semi-supervised learning with both labeled and unlabeled images.\n val: validation.\n\n :param size: crop size of training images.\n :param split_file_path: path of yaml file for splits.\n :param pseudo_mask_path: path of generated pseudo masks, needed in semi_train mode.\n \"\"\"\n self.name = name\n self.root = root\n self.mode = mode\n self.size = size\n\n self.pseudo_mask_path = pseudo_mask_path\n\n if mode == \"semi_train\":\n with open(split_file_path, \"r\") as file:\n split_dict = yaml.load(file, Loader=yaml.FullLoader)[val_split]\n self.labeled_ids = split_dict[\"labeled\"]\n if reliable is None:\n self.unlabeled_ids = split_dict[\"unlabeled\"]\n elif reliable is True:\n self.unlabeled_ids = split_dict[\"reliable\"]\n elif reliable is False:\n self.unlabeled_ids = split_dict[\"unreliable\"]\n # multiply label to match the cound of unlabled\n self.ids = (\n self.labeled_ids * math.ceil(len(self.unlabeled_ids) / len(self.labeled_ids)) + self.unlabeled_ids\n )\n elif mode == \"test\":\n with open(split_file_path, \"r\") as file:\n self.ids = yaml.load(file, Loader=yaml.FullLoader)\n else:\n with open(split_file_path) as file:\n split_dict = yaml.load(file, Loader=yaml.FullLoader)[val_split]\n if mode == \"val\":\n self.ids = split_dict[\"val\"]\n elif mode == \"label\":\n if reliable is None:\n self.ids = split_dict[\"unlabeled\"]\n elif reliable is True:\n self.ids = split_dict[\"reliable\"]\n elif reliable is False:\n self.ids = split_dict[\"unreliable\"]\n elif mode == \"train\":\n self.ids = split_dict[\"labeled\"]\n\n def __getitem__(self, item):\n id = self.ids[item]\n img = Image.open(os.path.join(self.root, id.split(\" \")[0]))\n\n if self.mode == \"val\" or self.mode == \"label\" or self.mode == \"test\":\n mask = Image.open(os.path.join(self.root, id.split(\" \")[1]))\n # unet needs much memory on\n if self.name == \"melanoma\":\n img, mask = resize_crop(img, mask, self.size)\n img, mask = normalize(img, mask)\n # print(img.cpu().numpy().shape)\n return img, mask, id\n\n if self.mode == \"train\" or (\n self.mode == \"semi_train\" and id in self.labeled_ids\n ):\n mask = Image.open(os.path.join(self.root, id.split(\" \")[1]))\n else:\n # mode == 'semi_train' and the id corresponds to unlabeled image\n fname = os.path.basename(id.split(\" \")[1])\n mask = Image.open(os.path.join(self.pseudo_mask_path, fname))\n\n # basic augmentation on all training images\n base_size = (\n 400\n if self.name == \"pascal\"\n else 256\n if self.name == \"melanoma\"\n else 500\n if self.name == \"breastCancer\"\n else 2048\n )\n img, mask = resize(img, mask, base_size, (0.5, 2.0))\n img, mask = crop(img, mask, self.size)\n img, mask = hflip(img, mask, p=0.5)\n\n # strong augmentation on unlabeled images\n if self.mode == \"semi_train\" and id in self.unlabeled_ids:\n if random.random() < 0.8:\n img = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(img)\n img = transforms.RandomGrayscale(p=0.2)(img)\n img = blur(img, p=0.5)\n img, mask = cutout(img, mask, p=0.5)\n\n img, mask = normalize(img, mask)\n\n return img, mask\n\n def __len__(self):\n return len(self.ids)\n", "id": "1501046", "language": "Python", "matching_score": 1.992746114730835, "max_stars_count": 0, "path": "dataset/semi.py" }, { "content": "import numpy as np\nfrom PIL import Image, ImageOps, ImageFilter\nimport random\nimport torch\nfrom torchvision import transforms\nimport cv2\n\n\ndef crop(img, mask, size):\n # padding height or width if smaller than cropping size\n w, h = img.size\n padw = size - w if w < size else 0\n padh = size - h if h < size else 0\n img = ImageOps.expand(img, border=max(0, 0, padw, padh), fill=0)\n mask = ImageOps.expand(mask, border=max(0, 0, padw, padh), fill=255)\n\n # cropping\n w, h = img.size\n x = random.randint(0, abs(w - size))\n y = random.randint(0, abs(h - size))\n img = img.crop((x, y, x + size, y + size))\n mask = mask.crop((x, y, x + size, y + size))\n\n return img, mask\n\n\ndef hflip(img, mask, p=0.5):\n if random.random() < p:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n return img, mask\n\n\ndef normalize(img, mask=None):\n \"\"\"\n :param img: PIL image\n :param mask: PIL image, corresponding mask\n :return: normalized torch tensor of image and mask\n \"\"\"\n img = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ]\n )(img)\n if mask is not None:\n mask = torch.from_numpy(np.array(mask)).long()\n return img, mask\n return img\n\n\ndef resize(img, mask, base_size, ratio_range):\n w, h = img.size\n long_side = random.randint(\n int(base_size * ratio_range[0]), int(base_size * ratio_range[1])\n )\n\n if h > w:\n oh = long_side\n ow = int(1.0 * w * long_side / h + 0.5)\n else:\n ow = long_side\n oh = int(1.0 * h * long_side / w + 0.5)\n\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n return img, mask\n\n\n# center crop to sqaure, then base_size\ndef resize_crop(img, mask, base_size):\n w, h = img.size\n if h > w:\n crop_size = w\n else:\n crop_size = h\n left = (w - crop_size) / 2\n top = (h - crop_size) / 2\n right = (w + crop_size) / 2\n bottom = (h + crop_size) / 2\n # make it sqaure\n img = img.crop((left, top, right, bottom))\n mask = mask.crop((left, top, right, bottom))\n\n # resize to base_size\n img = img.resize((base_size, base_size), Image.BILINEAR)\n mask = mask.resize((base_size, base_size), Image.NEAREST)\n\n return img, mask\n\n\ndef downsample(img, mask, base_size):\n w, h = img.size\n\n if h > w:\n oh = base_size\n ow = int(1.0 * w * base_size / h + 0.5)\n else:\n ow = base_size\n oh = int(1.0 * h * base_size / w + 0.5)\n\n img = img.resize((ow, oh), Image.BILINEAR)\n mask = mask.resize((ow, oh), Image.NEAREST)\n return img, mask\n\n\ndef blur(img, p=0.5):\n if random.random() < p:\n sigma = np.random.uniform(0.1, 2.0)\n img = img.filter(ImageFilter.GaussianBlur(radius=sigma))\n return img\n\n\ndef cutout(\n img,\n mask,\n p=0.5,\n size_min=0.02,\n size_max=0.4,\n ratio_1=0.3,\n ratio_2=1 / 0.3,\n value_min=0,\n value_max=255,\n pixel_level=True,\n):\n if random.random() < p:\n img = np.array(img)\n mask = np.array(mask)\n\n img_h, img_w, img_c = img.shape\n\n while True:\n size = np.random.uniform(size_min, size_max) * img_h * img_w\n ratio = np.random.uniform(ratio_1, ratio_2)\n erase_w = int(np.sqrt(size / ratio))\n erase_h = int(np.sqrt(size * ratio))\n x = np.random.randint(0, img_w)\n y = np.random.randint(0, img_h)\n\n if x + erase_w <= img_w and y + erase_h <= img_h:\n break\n\n if pixel_level:\n value = np.random.uniform(value_min, value_max, (erase_h, erase_w, img_c))\n else:\n value = np.random.uniform(value_min, value_max)\n\n img[y: y + erase_h, x: x + erase_w] = value\n mask[y: y + erase_h, x: x + erase_w] = 255\n\n img = Image.fromarray(img.astype(np.uint8))\n mask = Image.fromarray(mask.astype(np.uint8))\n\n return img, mask\n\n\n# from https://github.com/marinbenc/medical-polar-training/blob/main/polar_transformations.py\ndef to_polar(img, mask, center=None):\n img = np.float32(img)\n mask = np.float32(mask)\n value = np.sqrt(((img.shape[0] / 2.0) ** 2.0) + ((img.shape[1] / 2.0) ** 2.0))\n if center is None:\n center = (img.shape[0] // 2, img.shape[1] // 2)\n polar_image = cv2.linearPolar(img, center, value, cv2.WARP_FILL_OUTLIERS)\n polar_mask = cv2.linearPolar(mask, center, value, cv2.WARP_FILL_OUTLIERS)\n polar_image = cv2.rotate(polar_image, cv2.ROTATE_90_COUNTERCLOCKWISE)\n polar_mask = cv2.rotate(polar_mask, cv2.ROTATE_90_COUNTERCLOCKWISE)\n polar_image = Image.fromarray(polar_image.astype(\"uint8\"))\n polar_mask = Image.fromarray(polar_mask.astype(\"uint8\"))\n return polar_image, polar_mask\n\n\ndef to_cart(polar_image, polar_mask, center=None):\n polar_image = np.float32(polar_image)\n polar_mask = np.float32(polar_mask)\n polar_image = cv2.rotate(polar_image, cv2.ROTATE_90_CLOCKWISE)\n polar_mask = cv2.rotate(polar_mask, cv2.ROTATE_90_CLOCKWISE)\n if center is None:\n center = (polar_image.shape[1] // 2, polar_image.shape[0] // 2)\n value = np.sqrt(\n ((polar_image.shape[1] / 2.0) ** 2.0) + ((polar_image.shape[0] / 2.0) ** 2.0)\n )\n img = cv2.linearPolar(\n polar_image, center, value, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP\n )\n mask = cv2.linearPolar(\n polar_mask, center, value, cv2.WARP_FILL_OUTLIERS + cv2.WARP_INVERSE_MAP\n )\n img = Image.fromarray(img.astype(\"uint8\"))\n mask = Image.fromarray(mask.astype(\"uint8\"))\n return img, mask\n", "id": "4858870", "language": "Python", "matching_score": 2.6061508655548096, "max_stars_count": 0, "path": "dataset/transform.py" }, { "content": "# this notebook resizes all images in a folder to center crop\nfrom PIL import Image\nimport os\n\npath = \"/home/gustav/datasets/ISIC_Demo_2017/\"\nold_name = \"ISIC_Demo_2017\"\nnew_name = \"ISIC_Demo_2017_small\"\ndirs = os.listdir(path)\n\n\ndef resize_crop(img, base_size):\n w, h = img.size\n if h > w:\n crop_size = w\n else:\n crop_size = h\n left = (w - crop_size) / 2\n top = (h - crop_size) / 2\n right = (w + crop_size) / 2\n bottom = (h + crop_size) / 2\n # make it sqaure\n img = img.crop((left, top, right, bottom))\n\n # resize to base_size\n img = img.resize((base_size, base_size), Image.BILINEAR)\n return img\n\n\nfor path, subdirs, files in os.walk(path):\n for name in files:\n img_path = os.path.join(path, name)\n im = Image.open(img_path)\n imResize = resize_crop(im, 512)\n img_path_new = img_path.replace(old_name, new_name)\n if not os.path.exists(os.path.dirname(img_path_new)):\n os.makedirs(os.path.dirname(img_path_new))\n imResize.save(img_path_new)\n", "id": "2771369", "language": "Python", "matching_score": 0.69044029712677, "max_stars_count": 0, "path": "resize.py" }, { "content": "import random\nfrom os import listdir\nfrom os.path import isfile, join\nimport yaml\nfrom pathlib import Path\nfrom sklearn.model_selection import StratifiedKFold\n\n# set basic params and load file list\ncross_val_splits = 5\nnum_shuffels = 5\nsplits = [\"1\", \"1/4\", \"1/8\", \"1/30\"]\n# /lsdf/kit/iai/projects/iai-aida/Daten_Keppler/ISIC_Demo_2017\")\nimages_folder = \"images\"\nlabels_folder = \"labels\"\ntraining_filelist = []\nval_filelist = []\ntest_filelist = []\n\n# pnuemothorax dataset\ndataset = r\"breastCancer\"\nbase_path = r\"/lsdf/kit/iai/projects/iai-aida/Daten_Keppler/BreastCancer\"\ntraining_filelist = [\n \"train/images/%s train/labels/%s_mask.png\" % (f, f[:-4])\n for f in listdir(join(base_path, \"train\", images_folder))\n if isfile(join(base_path, \"train\", images_folder, f))\n]\n# sanity check if file in image folder are same as in\ndifferences = set(\n [\n \"train/images/%s.png train/labels/%s_mask.png\" % (f[:-9], f[:-9])\n for f in listdir(join(base_path, \"train\", labels_folder))\n if isfile(join(base_path, \"train\", labels_folder, f))\n ]\n).symmetric_difference(set(training_filelist))\nif len(differences) != 0:\n raise Exception(\n f\"files in folders '{images_folder}' and '{labels_folder}' do not match because of: {differences}\"\n )\n\ntest_filelist = [\n \"test/images/%s test/labels/%s_mask.png\" % (f, f[:-4])\n for f in listdir(join(base_path, \"test\", images_folder))\n if isfile(join(base_path, \"test\", images_folder, f))\n]\n\nlist_len = len(training_filelist)\nprint(training_filelist[:2])\n\n# shuffle labeled/unlabled\nfor shuffle in range(num_shuffels):\n yaml_dict = {}\n for split in splits:\n random.shuffle(training_filelist)\n # calc splitpoint\n labeled_splitpoint = int(list_len * float(eval(split)))\n print(\n f\"splitpoint for {split} in dataset with list_len {list_len} are {labeled_splitpoint}\"\n )\n unlabeled = training_filelist[labeled_splitpoint:]\n labeled = training_filelist[:labeled_splitpoint]\n skf = StratifiedKFold(n_splits=cross_val_splits)\n y = [(0 if name[0] == \"n\" else 1 if name[0] == \"m\" else 2) for name in labeled]\n count = 0\n for train_index, val_index in skf.split(labeled, y):\n unlabeled_copy = unlabeled.copy() # or elese it cant be reused\n train = [labeled[i] for i in train_index]\n val = [labeled[i] for i in val_index]\n yaml_dict[\"val_split_\" + str(count)] = dict(\n unlabeled=unlabeled_copy, labeled=train, val=val\n )\n count += 1\n\n # save to yaml\n # e.g 1/4 -> 1_4 for folder name\n zw = list(split)\n if len(zw) > 1:\n zw[1] = \"_\"\n split = \"\".join(zw)\n\n yaml_path = rf\"dataset/splits/{dataset}/{split}/split_{shuffle}\"\n Path(yaml_path).mkdir(parents=True, exist_ok=True)\n with open(yaml_path + \"/split.yaml\", \"w+\") as outfile:\n yaml.dump(yaml_dict, outfile, default_flow_style=False)\n# test yaml file\nyaml_dict = {}\nyaml_path = rf\"dataset/splits/{dataset}/\"\nPath(yaml_path).mkdir(parents=True, exist_ok=True)\n\nwith open(yaml_path + \"/test.yaml\", \"w+\") as outfile:\n yaml.dump(test_filelist, outfile, default_flow_style=False)\n", "id": "6813856", "language": "Python", "matching_score": 4.749601364135742, "max_stars_count": 0, "path": "split_stratified_breast_cancer_.py" }, { "content": "import pandas as pd\nimport random\nimport yaml\nfrom pathlib import Path\nfrom sklearn.model_selection import KFold\n\n# set basic params and load file list\ndataset = r\"melanoma\"\ncross_val_splits = 5\nnum_shuffels = 1\nsplits = [\"1/40\"]\ncsv_train_path = r\"ISIC-2017_Training_Part3_GroundTruth(1).csv\"\ncsv_test_path = r\"ISIC-2017_Validation_Part3_GroundTruth.csv\"\nimages_folder = \"images\"\nlabels_folder = \"labels\"\ntraining_filelist = []\nval_filelist = []\ntest_filelist = []\n\ntraining_filelist = pd.read_csv(csv_train_path)[\"image_id\"].to_list()\ntraining_filelist = [\n \"train/images/%s.jpg train/labels/%s_segmentation.png\" % (f, f)\n for f in training_filelist\n]\n\n# all iamges are in this case in the train folder\ntest_filelist = pd.read_csv(csv_test_path)[\"image_id\"].to_list()\ntest_filelist = [\n \"train/images/%s.jpg train/labels/%s_segmentation.png\" % (f, f)\n for f in test_filelist\n]\n\nlist_len = len(training_filelist)\nprint(training_filelist[:2], list_len)\n\n# %%\n# shuffle labeled/unlabled\nfor shuffle in range(num_shuffels):\n yaml_dict = {}\n for split in splits:\n random.shuffle(training_filelist)\n # calc splitpoint\n labeled_splitpoint = int(list_len * float(eval(split)))\n print(\n f\"splitpoint for {split} in dataset with list_len {list_len} are {labeled_splitpoint}\"\n )\n unlabeled = training_filelist[labeled_splitpoint:]\n labeled = training_filelist[:labeled_splitpoint]\n kf = KFold(n_splits=cross_val_splits)\n count = 0\n for train_index, val_index in kf.split(labeled):\n unlabeled_copy = unlabeled.copy() # or elese it cant be reused\n train = [labeled[i] for i in train_index]\n val = [labeled[i] for i in val_index]\n yaml_dict[\"val_split_\" + str(count)] = dict(\n unlabeled=unlabeled_copy, labeled=train, val=val\n )\n count += 1\n\n # save to yaml\n # e.g 1/4 -> 1_4 for folder name\n zw = list(split)\n if len(zw) > 1:\n zw[1] = \"_\"\n split = \"\".join(zw)\n\n yaml_path = rf\"dataset/splits/{dataset}/{split}/split_{shuffle}\"\n Path(yaml_path).mkdir(parents=True, exist_ok=True)\n with open(yaml_path + \"/split.yaml\", \"w+\") as outfile:\n yaml.dump(yaml_dict, outfile, default_flow_style=False)\n\n# test yaml file\nyaml_dict = {}\nyaml_path = rf\"dataset/splits/{dataset}/\"\nPath(yaml_path).mkdir(parents=True, exist_ok=True)\n\nwith open(yaml_path + \"/test_valset.yaml\", \"w+\") as outfile:\n yaml.dump(test_filelist, outfile, default_flow_style=False)\n", "id": "791736", "language": "Python", "matching_score": 4.413725852966309, "max_stars_count": 0, "path": "split_melanoma.py" } ]
2.931883
dv-opel
[ { "content": "from PyQt5.QtWidgets import QWidget\r\nfrom memo_card_layout import *\r\nfrom random import shuffle\r\n\r\nCARD_WIDTH = 600\r\nCARD_HEIGHT = 500\r\n\r\nfrm_question = \"Яблоко\"\r\nfrm_right = \"apple\"\r\nfrm_wrong1 = \"bbbbb\"\r\nfrm_wrong2 = \"ccccc\"\r\nfrm_wrong3 = \"ddddd\"\r\n\r\n# распределение ответов между кнопками\r\nradio_list = [rbtn1, rbtn2, rbtn3, rbtn4]\r\nshuffle(radio_list)\r\nanswer = radio_list[0]\r\nwrong_answer1 = radio_list[1]\r\nwrong_answer2 = radio_list[2]\r\nwrong_answer3 = radio_list[3]\r\n\r\ndef show_data():\r\n lb_Quation.setText(frm_question)\r\n lb_Correct.setText(frm_right)\r\n answer.setText(frm_right)\r\n wrong_answer1.setText(frm_wrong1)\r\n wrong_answer2.setText(frm_wrong2)\r\n wrong_answer3.setText(frm_wrong3)\r\n\r\ndef chek_result():\r\n if answer.isChecked():\r\n lb_Result.setText(\"Верно\")\r\n show_result()\r\n if wrong_answer1.isChecked() or wrong_answer2.isChecked() or wrong_answer3.isChecked():\r\n lb_Result.setText(\"Неверно\")\r\n show_result()\r\n\r\ndef click_ok():\r\n if btn_ok.text() == \"Ответить\":\r\n chek_result()\r\n\r\n\r\n# Создание Окна\r\nwindow = QWidget()\r\nwindow.setWindowTitle(\"Memory Card\")\r\nwindow.resize(CARD_WIDTH, CARD_HEIGHT)\r\nwindow.move(300, 200)\r\n\r\nwindow.setLayout(layout_card)\r\nshow_data()\r\nshow_questions()\r\nbtn_ok.clicked.connect(click_ok)\r\n\r\n\r\nwindow.show()\r\napp.exec_()\r\n\r\n", "id": "710142", "language": "Python", "matching_score": 3.8314435482025146, "max_stars_count": 0, "path": "memo_main.py" }, { "content": "from PyQt5.QtCore import Qt\r\nfrom PyQt5.QtWidgets import (\r\n QWidget, QLabel, QPushButton, QVBoxLayout, \r\n QHBoxLayout, QSpinBox, QGroupBox, QButtonGroup, \r\n QRadioButton, \r\n)\r\nfrom memo_app import app\r\n\r\n# СОЗДАНИЕ ВИДЖЕТОВ\r\nbtn_Menu = QPushButton(\"Меню\")\r\nbtn_Sleep = QPushButton(\"Отдохнуть\")\r\nbox_Minutes = QSpinBox()\r\nbox_Minutes.setValue(30)\r\nbtn_ok = QPushButton(\"Ответить\")\r\nlb_Quation = QLabel(\"XXX\")\r\n\r\nradioGroupBox = QGroupBox(\"Варианты ответов\")\r\nradioGroup = QButtonGroup()\r\nrbtn1 = QRadioButton()\r\nrbtn2 = QRadioButton()\r\nrbtn3 = QRadioButton()\r\nrbtn4 = QRadioButton()\r\nradioGroup.addButton(rbtn1)\r\nradioGroup.addButton(rbtn2)\r\nradioGroup.addButton(rbtn3)\r\nradioGroup.addButton(rbtn4)\r\n\r\nansGroupBox = QGroupBox(\"Результат теста\")\r\nlb_Result = QLabel(\"верно / неверно\")\r\nlb_Correct = QLabel(\"текст ответа\")\r\n\r\n# РАЗМЕЩЕНИЕ\r\n# панель вопросов\r\nlayout_ans1 = QHBoxLayout()\r\n\r\nlayout_ans2 = QVBoxLayout()\r\nlayout_ans3 = QVBoxLayout()\r\nlayout_ans2.addWidget(rbtn1)\r\nlayout_ans2.addWidget(rbtn2)\r\nlayout_ans3.addWidget(rbtn3)\r\nlayout_ans3.addWidget(rbtn4)\r\n\r\nlayout_ans1.addLayout(layout_ans2)\r\nlayout_ans1.addLayout(layout_ans3)\r\nradioGroupBox.setLayout(layout_ans1)\r\n\r\n#панель результатов\r\nlayout_res = QVBoxLayout()\r\nlayout_res.addWidget(lb_Result, alignment=(Qt.AlignLeft | Qt.AlignTop))\r\nlayout_res.addWidget(lb_Correct, alignment=Qt.AlignHCenter, stretch=2)\r\nansGroupBox.setLayout(layout_res)\r\nansGroupBox.hide()\r\n\r\n# размещение всех элементов в окне\r\nlayout_line1 = QHBoxLayout()\r\nlayout_line2 = QHBoxLayout()\r\nlayout_line3 = QHBoxLayout()\r\nlayout_line4 = QHBoxLayout()\r\n\r\nlayout_line1.addWidget(btn_Menu)\r\nlayout_line1.addStretch(1)\r\nlayout_line1.addWidget(btn_Sleep)\r\nlayout_line1.addWidget(box_Minutes)\r\nlayout_line1.addWidget(QLabel(\"минут\"))\r\n\r\nlayout_line2.addWidget(lb_Quation, alignment=Qt.AlignCenter)\r\n\r\nlayout_line3.addWidget(radioGroupBox)\r\nlayout_line3.addWidget(ansGroupBox)\r\n\r\nlayout_line4.addStretch(1)\r\nlayout_line4.addWidget(btn_ok, stretch=2)\r\nlayout_line4.addStretch(1)\r\n\r\n# размещение на вертикальной линии\r\nlayout_card = QVBoxLayout()\r\nlayout_card.addLayout(layout_line1, stretch=1)\r\nlayout_card.addLayout(layout_line2, stretch=2)\r\nlayout_card.addLayout(layout_line3, stretch=8)\r\nlayout_card.addStretch(1)\r\nlayout_card.addLayout(layout_line4, stretch=1)\r\nlayout_card.addStretch(1)\r\nlayout_card.setSpacing(5)\r\n\r\ndef show_result():\r\n radioGroupBox.hide()\r\n ansGroupBox.show()\r\n btn_ok.setText(\"Следующий вопрос\")\r\n\r\ndef show_questions():\r\n ansGroupBox.hide()\r\n radioGroupBox.show()\r\n btn_ok.setText(\"Ответить\")\r\n # сброс радиокнопок\r\n radioGroup.setExclusive(False)\r\n rbtn1.setChecked(False)\r\n rbtn2.setChecked(False)\r\n rbtn3.setChecked(False)\r\n rbtn4.setChecked(False)\r\n radioGroup.setExclusive(True)\r\n\r\n", "id": "4514155", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "memo_card_layout.py" }, { "content": "from PyQt5.QtWidgets import QApplication\r\n\r\napp = QApplication([])", "id": "2443340", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "memo_app.py" } ]
0
Antrovirens
[ { "content": "class ControlPoint(object):\n '''undirected unweighted graph'''\n\n def __init__(self):\n self.num = int(0)\n self.name = \"\"\n self.X = 0.0\n self.Y = 0.0\n self.Z = 0.0\n self.N = 0.0\n self.E = 0.0\n self.H = 0.0\n\n def init_controlpoint(self,line):\n line = line.rstrip('\\n')\n line = line.split(',')\n\n self.num = int(line[0])\n self.name = line[1]\n self.X = float(line[2])\n self.Y = float(line[3])\n self.Z = float(line[4])\n self.N = float(line[5])\n self.E = float(line[6])\n self.H = float(line[7])\n\n def controlpoint_inf(self):\n return [ self.num ,self.name, self.X, self.Y , self.Z]\n\n def controlpoint_match(self, point_name):\n if self.name == point_name:\n return True\n else :\n return False\n\n\n\nclass Baseline(object):\n '''undirected unweighted graph'''\n\n def __init__(self):\n self.num = int(0)\n self.origin = \"\"\n self.target = \"\"\n self.name = \"\"\n self.DX = 0.0\n self.DY = 0.0\n self.DZ = 0.0\n self.sigema_DX = 0.0\n self.sigema_DY = 0.0\n self.sigema_DZ = 0.0\n\n def init_baseline(self,line):\n self.num = int(line[0])\n self.origin = line[1]\n self.target = line[2]\n self.name = line[3]\n self.DX = float(line[4])\n self.DY = float(line[5])\n self.DZ = float(line[6])\n self.sigema_DX = float(line[7])\n self.sigema_DY = float(line[8])\n self.sigema_DZ = float(line[9])\n\n def baseline_inf(self):\n return [self.num,self.origin,self.target,self.name ,self.DX,self.DY,self.DZ,self.sigema_DX,self.sigema_DY,self.sigema_DZ,]\n \n def baseline_match(self,baseline):\n if baseline[0] == self.origin and baseline[1] == self.target:\n return True,1,self.num\n elif baseline[0] == self.target and baseline[1] == self.origin:\n return True,-1,self.num\n else:\n return False,0,-1\n\nclass GNSSNet(object):\n '''undirected unweighted graph'''\n\n def __init__(self):\n self.VertexNumber = int(0) #点\n self.EdgeNumber = int(0) #边\n self.BaselineSet = []\n self.ControlPointSet = []\n\n def insert_Baseline(self,baseline):\n self.BaselineSet.append(baseline)\n\n def insert_ControlPoint(self, controlpoint):\n self.ControlPointSet.append(controlpoint)\n\n\n def Net_baseline_match(self,baseline):\n for Baseline in self.BaselineSet:\n a,towards,number = Baseline.baseline_match(baseline)\n if a:\n return towards,self.BaselineSet[number-1].baseline_inf()\n\n def controlpoint_match(self,point_name):\n for controlpoint in self.ControlPointSet:\n if controlpoint.controlpoint_match(point_name):\n return True,controlpoint.controlpoint_inf()\n return False,None\n\n def init_P(self,P):\n for Baseline in self.BaselineSet:\n P[Baseline.num*3-3][Baseline.num*3-3] = 1.0 / (Baseline.sigema_DX * Baseline.sigema_DX)\n P[Baseline.num*3-2][Baseline.num*3-2] = 1.0 / (Baseline.sigema_DY * Baseline.sigema_DY)\n P[Baseline.num*3-1][Baseline.num*3-1] = 1.0 / (Baseline.sigema_DZ * Baseline.sigema_DZ)\n return P\n\n\n def init_L(self,L):\n for Baseline in self.BaselineSet:\n L[Baseline.num*3-3][0] = Baseline.DX \n L[Baseline.num*3-2][0] = Baseline.DY\n L[Baseline.num*3-1][0] = Baseline.DZ\n return L", "id": "12748693", "language": "Python", "matching_score": 2.4196741580963135, "max_stars_count": 7, "path": "GNSS网平差/python生成附有参数的条件平差/gnssnet.py" }, { "content": "'''\nmain.py\n'''\n\nimport pandas as pd\nimport numpy as np\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\nfrom gnssnet import *\nimport math\n\ndef Save2Excel(mats,name):\n data = pd.DataFrame(mats)\n writer = pd.ExcelWriter(\"C:\\\\Users\\\\sheld\\\\Desktop\\\\yq\\\\2\\\\\"+ name + \".xlsx\")\n data.to_excel(writer, \"page_1\", float_format = '%.6f')\n writer.save()\n writer.close()\n\n\n\ndef init_GNSSNet(G):\n with open('data.csv','r') as f:\n for line in f.readlines():\n b= Baseline()\n b.init_baseline(line.split(','))\n G.insert_Baseline(b)\n f.close()\n with open('ControlPoints.csv','r') as f:\n for line in f.readlines():\n p = ControlPoint()\n p.init_controlpoint(line)\n G.insert_ControlPoint(p)\n f.close()\n\ndef init_lines(lines,t):\n i = 0\n l = len(t)\n while i < l - 1:\n a = str(t[i])\n b = str(t[i + 1])\n lines.append([a,b])\n i = i+ 1\n\n\n\n\nn = 63\nt = 24\nr = 39\nu = 3\nc = 42\n\nglobal G\nG = GNSSNet()\ninit_GNSSNet(G)\n\n\n##a = [\"G02\", \"g03\"]\n##print(a)\n##a,b,c = G.BaselineSet[3].baseline_match(a)\n##print(a,b,c )\n##if a:\n## print(G.BaselineSet[c-1].baseline_inf())\n\n\nA = np.zeros([c,n], dtype = float)\nA0 = np.zeros([c,1], dtype = float)\n\nB = np.zeros([c,u], dtype = float)\nX0 = np.zeros([u,1], dtype = float)\nx = np.zeros([u,1], dtype = float)\n\nP = np.zeros([n,n], dtype = float)\nL = np.zeros([n,1], dtype = float)\n\n#写入A矩阵 \nwith open('paths.csv',encoding='utf-8') as f:\n \n for line in f.readlines():\n line = line.rstrip('\\n')\n #print(line)\n t = line.split(',')\n num = int(t[0])\n del t[0]\n \n\n if t[0] == t[len(t)-1]:\n\n circles = []\n init_lines(circles,t)\n #print(circles)\n for baseline in circles:\n c,binf = G.Net_baseline_match(baseline)\n A[num*3-3][binf[0]*3-3] = c\n A[num*3-2][binf[0]*3-2] = c\n A[num*3-1][binf[0]*3-1] = c\n\n \n\n## print(\"闭合差为0,第几个条件:\", num ,\"用的基线:\",binf,\"正向还是反向\",c ,\" 计算了一次A和W矩阵\")\n## print(A[num*3-3])\n## print(A[num*3-2])\n## print(A[num*3-1])\n else:\n a,b = G.controlpoint_match(t[0])\n c,d = G.controlpoint_match(t[len(t) - 1])\n if a and c:\n print(b,'\\n',d,'\\n')\n lines = []\n init_lines(lines , t)\n\n for baseline in lines:\n e,binf = G.Net_baseline_match(baseline)\n A[num*3-3][binf[0]*3-3] = e\n A[num*3-2][binf[0]*3-2] = e\n A[num*3-1][binf[0]*3-1] = e\n\n## print(\"已知点坐标,第几个条件:\", num ,\"用的基线:\",binf,\"正向还是反向\",e ,\" 计算了一次A和W矩阵\")\n## print(A[num*3-3])\n## print(A[num*3-2])\n## print(A[num*3-1])\n\n\n\n A0[num*3-3][0] = b[2] - d[2]\n A0[num*3-2][0] = b[3] - d[3]\n A0[num*3-1][0] = b[4] - d[4]\n \n\n\n print(A0[num*3-3])\n print(A0[num*3-2])\n print(A0[num*3-1])\n \n else:\n print(\"发现了一行错误的条件\")\n\nf.close()\n\n#写入B矩阵\n#假装我读取了文件\nstr1 = \"G10\"\nstr2 = \"G07\"\nbaseline_name = [str1 , str2] \nbaseline_towards,baseline_inf = G.Net_baseline_match(baseline_name)\n_,controlpoint_inf_G10 = G.controlpoint_match(str1)\n_,controlpoint_inf_G07 = G.controlpoint_match(str2)\n\nA[r][3*baseline_inf[0] -3] = 1\nA[r+1][3*baseline_inf[0] -2] = 1\nA[r+2][3*baseline_inf[0] -1] = 1\n\nprint(A[39])\nprint(A[40])\nprint(A[41])\n\nA0[r][0] = controlpoint_inf_G10[2]\nA0[r+1][0] = controlpoint_inf_G10[3]\nA0[r+2][0] = controlpoint_inf_G10[4]\n\n\n\nB[r][0] = -1\nB[r+1][1] = -1\nB[r+2][2] = -1\n\nX0[0][0] = controlpoint_inf_G10[2] + baseline_towards * baseline_inf[4]\nX0[1][0] = controlpoint_inf_G10[3] + baseline_towards * baseline_inf[5]\nX0[2][0] = controlpoint_inf_G10[4] + baseline_towards * baseline_inf[6]\n\n\n\n#定权\nP = G.init_P(P)\nL = G.init_L(L)\n#\nP = np.matrix(P)\nA = np.matrix(A)\nA0 = np.matrix(A0)\nB = np.matrix(B)\nL = np.matrix(L)\nX0 = np.matrix(X0)\n##print(\"A0:\\n\",A0)\n##print(\"L:\\n\",L)\n\nW = np.dot(A,L) + np.dot(B, X0) + A0\n\n##print(\"AL + A0:\\n\",W0)\n##print(\"W:\\n\",W)\n\n#化成毫米单位\nW = np.matrix(np.dot(W, 1000))\n\n#A矩阵的秩\nprint(np.linalg.matrix_rank(A, tol=None, hermitian=False))\nprint(np.linalg.matrix_rank(B, tol=None, hermitian=False))\n\nQ = P.I\nNaa = np.dot(np.dot(A, Q), A.T)\nNbb = np.dot(np.dot(B.T, Naa.I), B)\n\nx = - np.dot(np.dot(np.dot( Nbb.I,B.T), Naa.I),W)\n\nV = - np.dot(np.dot(np.dot(Q, A.T), Naa.I),(np.dot(B, x) + W))\n\ntime = 1\nV_total = V\nx_total = x\n\nwhile abs(V.max()) > 0.000001 and time < 100:\n\tL = L + V/1000\n\tX0 = X0 + x/1000\n\tW = np.dot(A,L) + np.dot(B, X0) + A0\n\n\tx = - np.dot(np.dot(np.dot( Nbb.I,B.T), Naa.I),W)\n\tV = - np.dot(np.dot(np.dot(Q, A.T), Naa.I),(np.dot(B, x) + W))\n\tV_total = V_total + V\n\tx_total = x_total + x\n\ttime = time + 1\n\n \n\nprint(\"time:\",time,'\\n')\nprint(V_total,'\\n')\nprint(x_total,'\\n')\n\nsigema02 = np.dot(np.dot(V_total.T,P),V_total)/r\n\nsigema0 = math.sqrt(sigema02)\n\nprint(math.sqrt(sigema02))\n\nSave2Excel(V_total,\"V\")\nSave2Excel(x_total,\"x\")\nSave2Excel(A,\"A\")\nSave2Excel(B,\"B\")\nSave2Excel(P,\"P\")\nSave2Excel(Naa,\"Naa\")\nSave2Excel(Nbb,\"Nbb\")\nSave2Excel(X0+x/1000,\"X^\")\nSave2Excel(L+V/1000,\"L^\")\n", "id": "1376521", "language": "Python", "matching_score": 5.646243095397949, "max_stars_count": 7, "path": "GNSS网平差/python生成附有参数的条件平差/main.py" }, { "content": "'''\n之前测的一个控制网,12个点,4个已知,一共7个同步环,21根基线,现进行网平差\n\n半自动化条件平差,手动输入列条件方程时用到的闭合环路线(1~10 互相独立,皆为最小环)或者坐标附和条件(11~13, 四个已知点之间的三条线)\nn = 21 * 3 =63\nt = 8 * 3 =24\nr = n - t= 63 - 25 = 39\n一共39个方程,需要39/3 = 13 个条件。 条件需要手动输入路线 如paths.csv, 如果生成的A矩阵不满秩(不是39),说明Naa不可逆,有重复用到的条件。\n\nmain.py\n'''\n\nimport pandas as pd\nimport numpy as np\nimport sys\nnp.set_printoptions(threshold=sys.maxsize)\nfrom gnssnet import *\nimport math\n\ndef Save2Excel(mats,name):\n data = pd.DataFrame(mats)\n writer = pd.ExcelWriter(\"C:\\\\Users\\\\sheld\\\\Desktop\\\\111\\\\条件平差的\"+ name + \".xlsx\")\n data.to_excel(writer, \"page_1\", float_format = '%.6f')\n writer.save()\n writer.close()\n\n\n\ndef init_GNSSNet(G):\n\t#读取基线数据\n with open('data.csv','r') as f:\n for line in f.readlines():\n b= Baseline()\n b.init_baseline(line.split(','))\n G.insert_Baseline(b)\n f.close()\n \n #读取控制点文件\n with open('ControlPoints.csv','r') as f:\n for line in f.readlines():\n p = ControlPoint()\n p.init_controlpoint(line)\n G.insert_ControlPoint(p)\n f.close()\n\ndef init_lines(lines,t):\n i = 0\n l = len(t)\n while i < l - 1:\n a = str(t[i])\n b = str(t[i + 1])\n lines.append([a,b])\n i = i+ 1\n\n\n\n\nn = 63\nt = 24\nr = 39\n\n#初始化GNSS网\nglobal G\nG = GNSSNet()\ninit_GNSSNet(G)\n\n##a = [\"G02\", \"g03\"]\n##print(a)\n##a,b,c = G.BaselineSet[3].baseline_match(a)\n##print(a,b,c )\n##if a:\n## print(G.BaselineSet[c-1].baseline_inf())\n \nA = np.zeros([39,63], dtype = int)\n\nA0 = np.zeros([39,1], dtype = float)\nW = np.zeros([39,1], dtype = float)\n\nP = np.zeros([63,63], dtype = float)\nL = np.zeros([63,1], dtype = float)\n\n#写入A、W矩阵 \nwith open('paths.csv',encoding='utf-8') as f:\n \n for line in f.readlines():\n line = line.rstrip('\\n')\n #print(line)\n t = line.split(',')\n num = int(t[0])\n del t[0]\n \n\n if t[0] == t[len(t)-1]:\n\n circles = []\n init_lines(circles,t)\n #print(circles)\n for baseline in circles:\n c,binf = G.Net_baseline_match(baseline)\n A[num*3-3][binf[0]*3-3] = c\n A[num*3-2][binf[0]*3-2] = c\n A[num*3-1][binf[0]*3-1] = c\n W[num*3-3][0] = W[num*3-3][0] + c * binf[4]\n W[num*3-2][0] = W[num*3-2][0] + c * binf[5]\n W[num*3-1][0] = W[num*3-1][0] + c * binf[6]\n## print(\"闭合差为0,第几个条件:\", num ,\"用的基线:\",binf,\"正向还是反向\",c ,\" 计算了一次A和W矩阵\")\n## print(A[num*3-3])\n## print(A[num*3-2])\n## print(A[num*3-1])\n else:\n a,b = G.controlpoint_match(t[0])\n c,d = G.controlpoint_match(t[len(t) - 1])\n if a and c:\n print(b,'\\n',d,'\\n')\n lines = []\n init_lines(lines , t)\n\n for baseline in lines:\n e,binf = G.Net_baseline_match(baseline)\n A[num*3-3][binf[0]*3-3] = e\n A[num*3-2][binf[0]*3-2] = e\n A[num*3-1][binf[0]*3-1] = e\n W[num*3-3][0] = W[num*3-3][0] + e * binf[4] \n W[num*3-2][0] = W[num*3-2][0] + e * binf[5] \n W[num*3-1][0] = W[num*3-1][0] + e * binf[6]\n## print(\"已知点坐标,第几个条件:\", num ,\"用的基线:\",binf,\"正向还是反向\",e ,\" 计算了一次A和W矩阵\")\n## print(A[num*3-3])\n## print(A[num*3-2])\n## print(A[num*3-1])\n\n\n W[num*3-3][0] = W[num*3-3][0] + b[2] - d[2]\n W[num*3-2][0] = W[num*3-2][0] + b[3] - d[3]\n W[num*3-1][0] = W[num*3-1][0] + b[4] - d[4]\n\n A0[num*3-3][0] = b[2] - d[2]\n A0[num*3-2][0] = b[3] - d[3]\n A0[num*3-1][0] = b[4] - d[4]\n \n\n\n print(A0[num*3-3])\n print(A0[num*3-2])\n print(A0[num*3-1])\n \n else:\n print(\"发现了一行错误的条件\")\n\nf.close()\n#定权\nP = G.init_P(P)\nL = G.init_L(L)\n#\nP = np.matrix(P)\nA = np.matrix(A)\n\nA0 = np.matrix(A0)\nL = np.matrix(L)\n\n##print(\"A0:\\n\",A0)\n##print(\"L:\\n\",L)\n\nW0 = np.dot(A,L)+A0\n\n##print(\"AL + A0:\\n\",W0)\n##print(\"W:\\n\",W)\n\n\nW = np.matrix(np.dot(W0, 1000))\n\n#A矩阵的秩\nprint(np.linalg.matrix_rank(A, tol=None, hermitian=False))\n\nQ = P.I\nNaa = np.dot(np.dot(A, Q), A.T)\nK = np.dot(np.dot(Naa.I,W),-1)\nV = np.dot(np.dot(Q,A.T),K)\n\ntime = 1\nV_total = V\n\nwhile abs(V.max()) > 0.000001 and time < 100:\n L = L + V/1000\n W0 = np.dot(A,L)+A0\n W = np.matrix(np.dot(W0, 1000))\n K = np.dot(np.dot(Naa.I,W),-1)\n V = np.dot(np.dot(Q,A.T),K)\n V_total = V_total + V\n time = time + 1\n\nsigema02 = np.dot(np.dot(V_total.T,P),V_total)/39 \n\nprint(\"time:\",time,'\\n')\nprint(V_total)\n\n\n\nsigema0 = math.sqrt(sigema02)\n\nprint(math.sqrt(sigema02))\nSave2Excel(L,\"L\")\nSave2Excel(V_total,\"V\")\nSave2Excel(A,\"A\")\nSave2Excel(P,\"P\")\nSave2Excel(Naa,\"Naa\")\n", "id": "4233485", "language": "Python", "matching_score": 2.9710803031921387, "max_stars_count": 7, "path": "GNSS网平差/python生成条件方程并平差/main.py" }, { "content": "'''\n间接平差\nmain.py\n'''\n\nimport pandas as pd\nimport numpy as np\nimport sys\nnp.set_printoptions(threshold=sys.maxsize) #print显示完整array\nfrom gnssnet import *\nimport math\n\ndef Save2Excel(mats,name):\n data = pd.DataFrame(mats)\n writer = pd.ExcelWriter(\"C:\\\\Users\\\\sheld\\\\Desktop\\\\111\\\\\"+ \"间接平差的\" +name + \".xlsx\")\n data.to_excel(writer, \"page_1\", float_format = '%.6f')#浮点数,精确到6位小数\n writer.save()\n writer.close()\n\n\n\ndef init_GNSSNet(G):\n\n#读入所有的基线坐标信息\n with open(\"data.csv\",'r') as f:\n for line in f.readlines():\n b= Baseline()\n b.init_baseline(line.split(','))\n G.insert_Baseline(b)\n\n f.close()\n\n#已经手算过近似坐标了\n with open(\"zhandian.csv\",'r',encoding='UTF-8-sig') as f:\n i = 1\n for line in f.readlines():\n \n p = station()\n p.init_station2(line)\n\n if not p.cat_match():\n p.beizhu(i)\n i = i + 1\n G.insert_Station(p)\n f.close()\n\n\nn = 63\nt = 24\nr = n - t\n\nglobal G\nG = GNSSNet()\ninit_GNSSNet(G)\n\nP = np.zeros([n,n], dtype = float)\nL = np.zeros([n,1], dtype = float)\nl = np.zeros([n,1], dtype = float)\n\n'''\nL^ = BX^ + d\nl = L - (BX0 + d)\nV = Bx^ - l\n\n'''\nB = np.zeros([n,t], dtype = float)\nd = np.zeros([n,1], dtype = float)\n\nX0 = np.zeros([t,1], dtype = float)\nx = np.zeros([t,1], dtype = float)\n\n\n\n#写入B,d矩阵\nfor baseline in G.BaselineSet:\n bl_inf = baseline.baseline_inf()\n num = bl_inf[0]\n origin = bl_inf[1]\n target = bl_inf[2]\n\n #起点\n found,ifknow,stat_inf = G.Station_Cat_Match(origin)\n if ifknow:\n d[3 * num - 3][0] = d[3 * num - 3][0] - stat_inf[2]\n d[3 * num - 2][0] = d[3 * num - 2][0] - stat_inf[3]\n d[3 * num - 1][0] = d[3 * num - 1][0] - stat_inf[4]\n## print(num,bl_inf,\"起点是已知点\",stat_inf)\n elif not ifknow:\n B[3 * num - 3][3 * stat_inf[6] - 3] = -1\n B[3 * num - 2][3 * stat_inf[6] - 2] = -1\n B[3 * num - 1][3 * stat_inf[6] - 1] = -1\n## print(num,bl_inf,\"起点是未知点\",stat_inf)\n \n else:\n print(\"有错误的站点名\")\n \n #末尾\n found,ifknow,stat_inf = G.Station_Cat_Match(target)\n if ifknow:\n d[3 * num - 3][0] = d[3 * num - 3][0] + stat_inf[2]\n d[3 * num - 2][0] = d[3 * num - 2][0] + stat_inf[3]\n d[3 * num - 1][0] = d[3 * num - 1][0] + stat_inf[4]\n## print(num,bl_inf,\"终点是已知点\",stat_inf)\n \n elif not ifknow:\n B[3 * num - 3][3 * stat_inf[6] - 3] = 1\n B[3 * num - 2][3 * stat_inf[6] - 2] = 1\n B[3 * num - 1][3 * stat_inf[6] - 1] = 1\n## print(num,bl_inf,\"终点是未知点\",stat_inf)\n else:\n print(\"有错误的站点名\")\n\n\n## print(B[3 * num - 3])\n## print(B[3 * num - 2])\n## print(B[3 * num - 1])\n## print(d[3 * num - 3])\n## print(d[3 * num - 2])\n## print(d[3 * num - 1])\n\n \n\n#定权\nP = G.init_P(P)\nL = G.init_L(L)\nx0 = G.init_X0(X0)\n\nP = np.matrix(P)\nL = np.matrix(L)\nX0 = np.matrix(X0)\nd = np.matrix(d)\nB = np.matrix(B)\n\nQ = P.I\n\n\nprint(\"X0:\\n\",X0)\nprint(\"L:\\n\",L)\n\nprint(\"d:\\n\",d)\nprint(\"B:\\n\",B)\n\nl = L - np.dot(B,X0) - d\nprint(\"l:\\n\",l)\n\n\n#化成毫米单位\nl = np.matrix(np.dot(l, 1000))\n\n#Nbbx^ - W = 0\nNbb = np.dot(np.dot(B.T, P), B)\nW = np.dot(np.dot(B.T, P), l)\n\n\n#矩阵的秩\nprint(np.linalg.matrix_rank(B, tol=None, hermitian=False))\nprint(np.linalg.matrix_rank(l, tol=None, hermitian=False))\n\n#x^\nx = np.dot(Nbb.I,W)\n\nV = np.dot(B,x) - l\n\nprint('x',x,'\\n')\n\nprint('V',V,'\\n')\n\ntime = 1\nV_total = V\nx_total = x\n##\n##while abs(V.max()) > 0.00001 and time < 10000:\n## L = L + V/1000\n## X0 = X0 + x/1000\n##\n## l = L - np.dot(B,X0) - d\n##\n## l = np.matrix(np.dot(l, 1000))\n## x = np.dot(Nbb.I,W)\n## V = np.dot(B,x) - l\n## \n## V_total = V_total + V\n## x_total = x_total + x\n## time = time + 1\n##\n##\n\n\n\nL = L + V/1000\nX0 = X0 + x/1000\n\nprint('L^',L)\n\nprint(\"time:\",time,'\\n')\nprint('V_total',V_total,'\\n')\nprint('x_total',x_total,'\\n')\n\n\n\n\nsigema02 = np.dot(np.dot(V_total.T,P),V_total)/r\n\nsigema0 = math.sqrt(sigema02)\n\nprint(sigema0)\n\n\nSave2Excel(B,\"B\")\nSave2Excel(d,\"d\")\nSave2Excel(L,\"L^\")\nSave2Excel(l,\"l\")\nSave2Excel(X0,\"X^\")\nSave2Excel(V_total,\"V_total\")\nSave2Excel(x_total,\"x_total\")\nSave2Excel(Nbb,\"Nbb\")\n\n", "id": "107057", "language": "Python", "matching_score": 4.712903022766113, "max_stars_count": 7, "path": "GNSS网平差/python间接平差/main.py" }, { "content": "'''\n间接平差\n已知G07-G11边长为定值\nmain.py\n'''\n\nimport pandas as pd\nimport numpy as np\nimport sys\nnp.set_printoptions(threshold=sys.maxsize) #print显示完整array\nfrom gnssnet import *\nimport math\n\ndef Save2Excel(mats,name):\n data = pd.DataFrame(mats)\n writer = pd.ExcelWriter(\"C:\\\\Users\\\\sheld\\\\Desktop\\\\111\\\\附有限制条件的间接平差的\"+ name + \".xlsx\")\n data.to_excel(writer, \"page_1\", float_format = '%.6f')#浮点数,精确到6位小数\n writer.save()\n writer.close()\n\n\n\ndef init_GNSSNet(G):\n\n#读入所有的基线坐标信息\n with open(\"data.csv\",'r') as f:\n for line in f.readlines():\n b= Baseline()\n b.init_baseline(line.split(','))\n G.insert_Baseline(b)\n\n f.close()\n\n#已经手算过近似坐标了\n with open(\"zhandian.csv\",'r',encoding='UTF-8-sig') as f:\n i = 1\n for line in f.readlines():\n \n p = station()\n p.init_station2(line)\n\n if not p.cat_match():\n p.beizhu(i)\n i = i + 1\n G.insert_Station(p)\n f.close()\n\n\nn = 63\nt = 24\nr = n - t\ns = 1\nu = t + s\n\nS = math.sqrt(366.6087**2 + (252.4815)**2 + 72.1291**2)\n\nglobal G\nG = GNSSNet()\ninit_GNSSNet(G)\n\nP = np.zeros([n,n], dtype = float)\nL = np.zeros([n,1], dtype = float)\nl = np.zeros([n,1], dtype = float)\n\n\n\n\n'''\nL^ = BX^ + d\nFai(X^) = 0\n\nl = L - (BX0 + d)\nV = Bx^ - l\n'''\nB = np.zeros([n,u], dtype = float)\nd = np.zeros([n,1], dtype = float)\n\nX0 = np.zeros([u,1], dtype = float)\nx = np.zeros([u,1], dtype = float)\n\n\n'''\nCx^ + Wx = 0\n'''\n\nC = np.zeros([s,u], dtype = float)\nWx = np.zeros([s,1], dtype = float)\n\n#写入B,d矩阵\nfor baseline in G.BaselineSet:\n bl_inf = baseline.baseline_inf()\n num = bl_inf[0]\n origin = bl_inf[1]\n target = bl_inf[2]\n\n #起点\n found,ifknow,stat_inf = G.Station_Cat_Match(origin)\n if ifknow:\n d[3 * num - 3][0] = d[3 * num - 3][0] - stat_inf[2]\n d[3 * num - 2][0] = d[3 * num - 2][0] - stat_inf[3]\n d[3 * num - 1][0] = d[3 * num - 1][0] - stat_inf[4]\n## print(num,bl_inf,\"起点是已知点\",stat_inf)\n elif not ifknow:\n B[3 * num - 3][3 * stat_inf[6] - 3] = -1\n B[3 * num - 2][3 * stat_inf[6] - 2] = -1\n B[3 * num - 1][3 * stat_inf[6] - 1] = -1\n## print(num,bl_inf,\"起点是未知点\",stat_inf)\n \n else:\n print(\"有错误的站点名\")\n \n #末尾\n found,ifknow,stat_inf = G.Station_Cat_Match(target)\n if ifknow:\n d[3 * num - 3][0] = d[3 * num - 3][0] + stat_inf[2]\n d[3 * num - 2][0] = d[3 * num - 2][0] + stat_inf[3]\n d[3 * num - 1][0] = d[3 * num - 1][0] + stat_inf[4]\n## print(num,bl_inf,\"终点是已知点\",stat_inf)\n \n elif not ifknow:\n B[3 * num - 3][3 * stat_inf[6] - 3] = 1\n B[3 * num - 2][3 * stat_inf[6] - 2] = 1\n B[3 * num - 1][3 * stat_inf[6] - 1] = 1\n## print(num,bl_inf,\"终点是未知点\",stat_inf)\n else:\n print(\"有错误的站点名\")\n\n\n## print(B[3 * num - 3])\n## print(B[3 * num - 2])\n## print(B[3 * num - 1])\n## print(d[3 * num - 3])\n## print(d[3 * num - 2])\n## print(d[3 * num - 1])\n\n\n#定权\nP = G.init_P(P)\nL = G.init_L(L)\nx0 = G.init_X0(X0)\n\nX0[3*8][0] = math.sqrt(366.6087**2 + (252.4815)**2 + 72.1291**2)\n\n#写入C、W矩阵 G01-G02\n\nG01_X = -2612808.8378000\nG01_Y = 4748993.2784000\nG01_Z = 3350430.6698000\n\nG02_X0 = -2613175.4394000\nG02_Y0 = 4748740.7892000\nG02_Z0 = 3350502.7893000\n\nS0 = math.sqrt(366.6087**2 + (252.4815)**2 + 72.1291**2)\n\nC[0][3] = 2* G02_X0\nC[0][4] = 2* G02_Y0\nC[0][5] = 2* G02_Z0\n\nC[0][24] = -2 * S0\n\nWx[0][0] = -2*G01_X - 2*G01_Y -2*G01_Z\n\n\n\nP = np.matrix(P)\nL = np.matrix(L)\nX0 = np.matrix(X0)\nd = np.matrix(d)\nB = np.matrix(B)\n\nC = np.matrix(C)\nWx = np.matrix(Wx)\n\n#矩阵的秩\nprint(np.linalg.matrix_rank(B, tol=None, hermitian=False))\nprint(np.linalg.matrix_rank(Wx, tol=None, hermitian=False))\n\n\nprint(\"C:\\n\",C)\nSave2Excel(C,\"C\")\nprint(\"Wx:\\n\",Wx)\n\nQ = P.I\n\n\n##print(\"X0:\\n\",X0)\n##print(\"L:\\n\",L)\n##\n##print(\"d:\\n\",d)\n\nprint(\"B:\\n\",B)\nSave2Excel(B,\"B\")\nl = L - np.dot(B,X0) - d\n##print(\"l:\\n\",l)\n\n\n###化成毫米单位\n##l = np.matrix(np.dot(l, 1000))\n\n#Nbbx^ - W = 0\nNbb = np.dot(np.dot(B.T, P), B)\nSave2Excel(Nbb,\"Nbb\")\n\nprint(\"Nbb:\\n\",Nbb,Nbb.shape)\nW = np.dot(np.dot(B.T, P), l)\n\nNcc = np.dot(np.dot(C, Nbb.I), C.T)\n\n\n#矩阵的秩\nprint(np.linalg.matrix_rank(B, tol=None, hermitian=False))\nprint(np.linalg.matrix_rank(P, tol=None, hermitian=False))\n\n#x^\nx = np.dot((Nbb.I - np.dot(np.dot(np.dot(np.dot(Nbb.I, C.T ), Ncc.I ) , C ) , Nbb.I)) , W ) - np.dot( np.dot( np.dot( Nbb.I , C.T ) , Ncc.I ) , Wx )\n\nV = np.dot(B,x) - l\n\n\n\n\nprint('x',x,'\\n')\n\nprint('V',V,'\\n')\n\ntime = 1\nV_total = V\nx_total = x\n##\n##while abs(V.max()) > 0.00001 and time < 10000:\n## L = L + V/1000\n## X0 = X0 + x/1000\n##\n## l = L - np.dot(B,X0) - d\n##\n## l = np.matrix(np.dot(l, 1000))\n## x = np.dot(Nbb.I,W)\n## V = np.dot(B,x) - l\n## \n## V_total = V_total + V\n## x_total = x_total + x\n## time = time + 1\n##\n##\n\nL = L + V\nX0 = X0 + x\n\n\n##L = L + V/1000\n##X0 = X0 + x/1000\n\nprint('L^',L)\n\nprint(\"time:\",time,'\\n')\nprint('V_total',V_total,'\\n')\nprint('x_total',x_total,'\\n')\n\n\n\n\nsigema02 = np.dot(np.dot(V_total.T,P),V_total)/r\n\nsigema0 = math.sqrt(sigema02)\n\nprint(sigema0)\n\n\nSave2Excel(B,\"B\")\nSave2Excel(d,\"d\")\nSave2Excel(L,\"L^\")\nSave2Excel(l,\"l\")\nSave2Excel(X0,\"X^\")\nSave2Excel(V_total,\"V_total\")\nSave2Excel(x_total,\"x_total\")\n\n\n", "id": "7489899", "language": "Python", "matching_score": 0.6407499313354492, "max_stars_count": 7, "path": "GNSS网平差/python附有限制条件的间接平差/main.py" }, { "content": "\"\"\"defgraph.py\"\"\"\n\n'''\nclass Vertex(object):\n def __init__(self,name):\n self.name = name\n self.adjs = []\n self.adjsname = []\n self.isdestn = 0\n \n def look_adjs(self):\n L = []\n for v in self.adjs:\n L.append(v.name)\n print(L)\n\nclass Edge(object):\n def __init__(self,v,u):\n self.relevances = {v.name:v.adjs, u.name:u.adjs}\n\n def look_relevances(self):\n L = []\n for vname in self.relevances.keys():\n L.append(vname)\n print(L)\n\n'''\nclass Graph(object):\n '''undirected unweighted graph'''\n\n def __init__(self):\n self.VertexNumber = int(0) #点\n self.EdgeNumber = int(0) #边\n self.VertexSet = {}\n self.EdgeSet = []\n self.origin = None\n self.destn = None\n self.O2DPathNum = int(0) #路径数目\n \n def insert_vertex(self,vertex):\n self.VertexSet.setdefault(vertex,[])\n\n def insert_edge(self,v,u):\n edge = set([v, u])\n if not (edge in self.EdgeSet):\n self.EdgeSet.append(edge)\n\n '''establish adjacency relationship'''\n if not (u in self.VertexSet[v]):\n self.VertexSet[v].append(u)\n if not (v in self.VertexSet[u]):\n self.VertexSet[u].append(v)\n\n def get_adjs(self,vertex):\n if vertex in self.VertexSet.keys():\n return self.VertexSet[vertex]\n else:\n print('{} is not in VertexSet'.format(vertex)) \n\n def look_VertexSet(self):\n L = []\n for v in self.VertexSet.keys():\n L.append(v) \n print(L)\n \n def look_EdgeSet(self):\n print(self.EdgeSet)\n", "id": "4715357", "language": "Python", "matching_score": 4.255878448486328, "max_stars_count": 7, "path": "GNSS网平差/python两点之间路径/defgraph.py" }, { "content": "'''All paths of two vertexs.py'''\n\nimport string\nfrom defgraph import *\n\ndef initialize_graph(G):\n with open('test2.txt','r') as f:\n G.EdgeNumber = int(f.readline().strip())\n G.VertexNumber = int(f.readline().strip())\n \n '''\n create the EdgeSet and the VertexSet\n notes:enumerate can get the loop count'\n '''\n for i,line in enumerate(f.readlines()):\n if i == G.EdgeNumber:\n G.origin = line.strip()\n elif i == G.EdgeNumber + 1:\n G.destn = line.strip()\n else:\n u,v = line.strip().split()\n \n G.insert_vertex(v)\n G.insert_vertex(u)\n\n G.insert_edge(v,u)\n\n\ndef search_path():\n global path\n path = []\n path.append(G.origin)\n visit(G.origin)\n print(\"Path Number:{}\".format(G.O2DPathNum))\n\ndef visit(vertex):\n v_adjs = G.get_adjs(vertex) \n\n '''whether vertex has adjacences'''\n if v_adjs:\n for u in v_adjs:\n if u == G.destn:\n print(''.join([v for v in path]) + u) \n G.O2DPathNum += 1\n elif not (u in path):\n path.append(u)\n visit(u)\n '''loop end means that node 'u' has been explored'''\n path.pop()\n\n\ndef main():\n global G\n G = Graph()\n initialize_graph(G)\n '''\n G.look_VertexSet()\n G.look_EdgeSet()\n print(G.get_adjs('A')) \n '''\n search_path()\n \n\nif __name__ == \"__main__\":\n main()\n", "id": "12134473", "language": "Python", "matching_score": 0.033283781260252, "max_stars_count": 7, "path": "GNSS网平差/python两点之间路径/main.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'C:\\GitHub\\learn-surveying-software-designing\\高斯投影坐标计算\\MainWindow.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.0\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(524, 384)\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.label = QtWidgets.QLabel(self.centralWidget)\n self.label.setGeometry(QtCore.QRect(70, 40, 91, 21))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralWidget)\n self.label_2.setGeometry(QtCore.QRect(300, 40, 91, 21))\n self.label_2.setObjectName(\"label_2\")\n self.pushButton = QtWidgets.QPushButton(self.centralWidget)\n self.pushButton.setGeometry(QtCore.QRect(90, 250, 75, 23))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(self.centralWidget)\n self.pushButton_2.setGeometry(QtCore.QRect(320, 250, 75, 23))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralWidget)\n self.plainTextEdit.setGeometry(QtCore.QRect(50, 90, 161, 131))\n self.plainTextEdit.setObjectName(\"plainTextEdit\")\n self.plainTextEdit_2 = QtWidgets.QPlainTextEdit(self.centralWidget)\n self.plainTextEdit_2.setGeometry(QtCore.QRect(270, 90, 161, 131))\n self.plainTextEdit_2.setObjectName(\"plainTextEdit_2\")\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 524, 23))\n self.menuBar.setObjectName(\"menuBar\")\n self.menu = QtWidgets.QMenu(self.menuBar)\n self.menu.setObjectName(\"menu\")\n self.menu_2 = QtWidgets.QMenu(self.menuBar)\n self.menu_2.setObjectName(\"menu_2\")\n MainWindow.setMenuBar(self.menuBar)\n self.action_Forward = QtWidgets.QAction(MainWindow)\n self.action_Forward.setObjectName(\"action_Forward\")\n self.action_Backward = QtWidgets.QAction(MainWindow)\n self.action_Backward.setObjectName(\"action_Backward\")\n self.action_Open = QtWidgets.QAction(MainWindow)\n self.action_Open.setObjectName(\"action_Open\")\n self.action_Close = QtWidgets.QAction(MainWindow)\n self.action_Close.setObjectName(\"action_Close\")\n self.action_Save = QtWidgets.QAction(MainWindow)\n self.action_Save.setObjectName(\"action_Save\")\n self.action_Quit = QtWidgets.QAction(MainWindow)\n self.action_Quit.setObjectName(\"action_Quit\")\n self.menu.addAction(self.action_Open)\n self.menu.addAction(self.action_Close)\n self.menu.addAction(self.action_Save)\n self.menu.addAction(self.action_Quit)\n self.menu_2.addAction(self.action_Forward)\n self.menu_2.addAction(self.action_Backward)\n self.menuBar.addAction(self.menu.menuAction())\n self.menuBar.addAction(self.menu_2.menuAction())\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"B,L\"))\n self.label_2.setText(_translate(\"MainWindow\", \"X,Y\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"正算\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"反算\"))\n self.menu.setTitle(_translate(\"MainWindow\", \"文件\"))\n self.menu_2.setTitle(_translate(\"MainWindow\", \"计算\"))\n self.action_Forward.setText(_translate(\"MainWindow\", \"正算\"))\n self.action_Backward.setText(_translate(\"MainWindow\", \"反算\"))\n self.action_Open.setText(_translate(\"MainWindow\", \"打开\"))\n self.action_Close.setText(_translate(\"MainWindow\", \"&关闭\"))\n self.action_Save.setText(_translate(\"MainWindow\", \"&保存\"))\n self.action_Quit.setText(_translate(\"MainWindow\", \"&退出\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "id": "6484255", "language": "Python", "matching_score": 6.739138126373291, "max_stars_count": 7, "path": "高斯投影坐标计算/Ui_MainWindow.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'C:\\GitHub\\learn-surveying-software-designing\\读取rinex文件\\MainWindow.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.0\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(567, 415)\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.label = QtWidgets.QLabel(self.centralWidget)\n self.label.setGeometry(QtCore.QRect(30, 50, 54, 20))\n self.label.setObjectName(\"label\")\n self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralWidget)\n self.plainTextEdit.setGeometry(QtCore.QRect(10, 80, 541, 301))\n self.plainTextEdit.setObjectName(\"plainTextEdit\")\n self.lineEdit = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit.setGeometry(QtCore.QRect(400, 20, 113, 20))\n self.lineEdit.setObjectName(\"lineEdit\")\n self.label_path = QtWidgets.QLabel(self.centralWidget)\n self.label_path.setGeometry(QtCore.QRect(100, 50, 54, 12))\n self.label_path.setObjectName(\"label_path\")\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 567, 23))\n self.menuBar.setObjectName(\"menuBar\")\n self.menu = QtWidgets.QMenu(self.menuBar)\n self.menu.setObjectName(\"menu\")\n MainWindow.setMenuBar(self.menuBar)\n self.action_Open = QtWidgets.QAction(MainWindow)\n self.action_Open.setObjectName(\"action_Open\")\n self.action_Close = QtWidgets.QAction(MainWindow)\n self.action_Close.setObjectName(\"action_Close\")\n self.action_Save = QtWidgets.QAction(MainWindow)\n self.action_Save.setObjectName(\"action_Save\")\n self.action_Quit = QtWidgets.QAction(MainWindow)\n self.action_Quit.setObjectName(\"action_Quit\")\n self.menu.addAction(self.action_Open)\n self.menu.addAction(self.action_Close)\n self.menu.addAction(self.action_Save)\n self.menu.addAction(self.action_Quit)\n self.menuBar.addAction(self.menu.menuAction())\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"读取结果\"))\n self.label_path.setText(_translate(\"MainWindow\", \"filepath\"))\n self.menu.setTitle(_translate(\"MainWindow\", \"文件\"))\n self.action_Open.setText(_translate(\"MainWindow\", \"&Open\"))\n self.action_Close.setText(_translate(\"MainWindow\", \"&Close\"))\n self.action_Save.setText(_translate(\"MainWindow\", \"&Save\"))\n self.action_Quit.setText(_translate(\"MainWindow\", \"&Quit\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "id": "3500343", "language": "Python", "matching_score": 3.7616963386535645, "max_stars_count": 7, "path": "读取rinex文件/Ui_MainWindow.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'C:\\GitHub\\learn-surveying-software-designing\\坐标换算\\mainForm.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.0\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(\"Dialog\")\n Dialog.setEnabled(True)\n Dialog.resize(574, 771)\n Dialog.setSizeGripEnabled(True)\n self.label = QtWidgets.QLabel(Dialog)\n self.label.setGeometry(QtCore.QRect(200, 10, 131, 31))\n font = QtGui.QFont()\n font.setFamily(\"新宋体\")\n font.setPointSize(14)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.pushButton = QtWidgets.QPushButton(Dialog)\n self.pushButton.setGeometry(QtCore.QRect(250, 360, 111, 23))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton_2 = QtWidgets.QPushButton(Dialog)\n self.pushButton_2.setGeometry(QtCore.QRect(380, 360, 141, 23))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.label_2 = QtWidgets.QLabel(Dialog)\n self.label_2.setGeometry(QtCore.QRect(50, 160, 54, 12))\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(Dialog)\n self.label_3.setGeometry(QtCore.QRect(330, 160, 54, 12))\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(Dialog)\n self.label_4.setGeometry(QtCore.QRect(290, 50, 81, 16))\n self.label_4.setObjectName(\"label_4\")\n self.pushButton_Open = QtWidgets.QPushButton(Dialog)\n self.pushButton_Open.setGeometry(QtCore.QRect(50, 360, 75, 23))\n self.pushButton_Open.setObjectName(\"pushButton_Open\")\n self.pushButton_Save = QtWidgets.QPushButton(Dialog)\n self.pushButton_Save.setGeometry(QtCore.QRect(150, 360, 75, 23))\n self.pushButton_Save.setObjectName(\"pushButton_Save\")\n self.pushButton_3 = QtWidgets.QPushButton(Dialog)\n self.pushButton_3.setGeometry(QtCore.QRect(430, 480, 111, 23))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.label_5 = QtWidgets.QLabel(Dialog)\n self.label_5.setGeometry(QtCore.QRect(60, 450, 54, 12))\n self.label_5.setObjectName(\"label_5\")\n self.label_6 = QtWidgets.QLabel(Dialog)\n self.label_6.setGeometry(QtCore.QRect(200, 450, 81, 16))\n self.label_6.setObjectName(\"label_6\")\n self.label_7 = QtWidgets.QLabel(Dialog)\n self.label_7.setGeometry(QtCore.QRect(50, 50, 54, 12))\n self.label_7.setObjectName(\"label_7\")\n self.pushButton_Close = QtWidgets.QPushButton(Dialog)\n self.pushButton_Close.setGeometry(QtCore.QRect(480, 660, 75, 23))\n self.pushButton_Close.setObjectName(\"pushButton_Close\")\n self.plainTextEdit_Input = QtWidgets.QPlainTextEdit(Dialog)\n self.plainTextEdit_Input.setGeometry(QtCore.QRect(40, 180, 231, 151))\n self.plainTextEdit_Input.setObjectName(\"plainTextEdit_Input\")\n self.plainTextEdit_Out = QtWidgets.QPlainTextEdit(Dialog)\n self.plainTextEdit_Out.setGeometry(QtCore.QRect(320, 180, 221, 151))\n self.plainTextEdit_Out.setObjectName(\"plainTextEdit_Out\")\n self.lineEdit_l = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_l.setGeometry(QtCore.QRect(50, 480, 113, 20))\n self.lineEdit_l.setObjectName(\"lineEdit_l\")\n self.lineEdit_s = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_s.setGeometry(QtCore.QRect(180, 480, 201, 21))\n self.lineEdit_s.setObjectName(\"lineEdit_s\")\n self.lineEdit_a = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_a.setGeometry(QtCore.QRect(280, 80, 101, 20))\n self.lineEdit_a.setObjectName(\"lineEdit_a\")\n self.comboBox = QtWidgets.QComboBox(Dialog)\n self.comboBox.setGeometry(QtCore.QRect(50, 80, 191, 22))\n self.comboBox.setObjectName(\"comboBox\")\n self.comboBox.addItem(\"\")\n self.comboBox.setItemText(0, \"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.comboBox.addItem(\"\")\n self.label_8 = QtWidgets.QLabel(Dialog)\n self.label_8.setGeometry(QtCore.QRect(410, 50, 54, 12))\n self.label_8.setObjectName(\"label_8\")\n self.lineEdit_alpha = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_alpha.setGeometry(QtCore.QRect(410, 80, 131, 20))\n self.lineEdit_alpha.setObjectName(\"lineEdit_alpha\")\n self.label_9 = QtWidgets.QLabel(Dialog)\n self.label_9.setGeometry(QtCore.QRect(60, 540, 54, 12))\n self.label_9.setObjectName(\"label_9\")\n self.lineEdit_Xangle = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_Xangle.setGeometry(QtCore.QRect(90, 590, 113, 20))\n self.lineEdit_Xangle.setObjectName(\"lineEdit_Xangle\")\n self.lineEdit_Yangle = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_Yangle.setGeometry(QtCore.QRect(90, 620, 113, 20))\n self.lineEdit_Yangle.setObjectName(\"lineEdit_Yangle\")\n self.lineEdit_Zangle = QtWidgets.QLineEdit(Dialog)\n self.lineEdit_Zangle.setGeometry(QtCore.QRect(90, 650, 113, 20))\n self.lineEdit_Zangle.setObjectName(\"lineEdit_Zangle\")\n self.plainTextEdit = QtWidgets.QPlainTextEdit(Dialog)\n self.plainTextEdit.setGeometry(QtCore.QRect(240, 570, 211, 131))\n self.plainTextEdit.setObjectName(\"plainTextEdit\")\n self.label_10 = QtWidgets.QLabel(Dialog)\n self.label_10.setGeometry(QtCore.QRect(250, 540, 54, 12))\n self.label_10.setObjectName(\"label_10\")\n self.label_11 = QtWidgets.QLabel(Dialog)\n self.label_11.setGeometry(QtCore.QRect(50, 590, 31, 16))\n self.label_11.setObjectName(\"label_11\")\n self.label_12 = QtWidgets.QLabel(Dialog)\n self.label_12.setGeometry(QtCore.QRect(50, 620, 31, 16))\n self.label_12.setObjectName(\"label_12\")\n self.label_13 = QtWidgets.QLabel(Dialog)\n self.label_13.setGeometry(QtCore.QRect(50, 650, 31, 16))\n self.label_13.setObjectName(\"label_13\")\n self.pushButton_4 = QtWidgets.QPushButton(Dialog)\n self.pushButton_4.setGeometry(QtCore.QRect(480, 620, 75, 23))\n self.pushButton_4.setObjectName(\"pushButton_4\")\n self.label_2.setBuddy(self.plainTextEdit_Input)\n self.label_3.setBuddy(self.plainTextEdit_Out)\n\n self.retranslateUi(Dialog)\n self.pushButton_Close.clicked.connect(Dialog.close)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n Dialog.setTabOrder(self.comboBox, self.lineEdit_a)\n Dialog.setTabOrder(self.lineEdit_a, self.lineEdit_alpha)\n Dialog.setTabOrder(self.lineEdit_alpha, self.plainTextEdit_Input)\n Dialog.setTabOrder(self.plainTextEdit_Input, self.plainTextEdit_Out)\n Dialog.setTabOrder(self.plainTextEdit_Out, self.pushButton_Open)\n Dialog.setTabOrder(self.pushButton_Open, self.pushButton_Save)\n Dialog.setTabOrder(self.pushButton_Save, self.pushButton)\n Dialog.setTabOrder(self.pushButton, self.pushButton_2)\n Dialog.setTabOrder(self.pushButton_2, self.lineEdit_l)\n Dialog.setTabOrder(self.lineEdit_l, self.lineEdit_s)\n Dialog.setTabOrder(self.lineEdit_s, self.pushButton_3)\n Dialog.setTabOrder(self.pushButton_3, self.pushButton_Close)\n\n def retranslateUi(self, Dialog):\n _translate = QtCore.QCoreApplication.translate\n Dialog.setWindowTitle(_translate(\"Dialog\", \"坐标转换\"))\n self.label.setText(_translate(\"Dialog\", \"坐标转换工具\"))\n self.pushButton.setText(_translate(\"Dialog\", \"B,L,H 转 X,Y,Z\"))\n self.pushButton_2.setText(_translate(\"Dialog\", \"X,Y,Z 转 B,L,H\"))\n self.label_2.setText(_translate(\"Dialog\", \"输入(&I)\"))\n self.label_3.setText(_translate(\"Dialog\", \"输出(&O)\"))\n self.label_4.setText(_translate(\"Dialog\", \"椭球半径a\"))\n self.pushButton_Open.setText(_translate(\"Dialog\", \"从文件打开\"))\n self.pushButton_Save.setText(_translate(\"Dialog\", \"保存到文件\"))\n self.pushButton_3.setText(_translate(\"Dialog\", \"计算平行圈弧长\"))\n self.label_5.setText(_translate(\"Dialog\", \"l\\'\\'\"))\n self.label_6.setText(_translate(\"Dialog\", \"平行圈弧长\"))\n self.label_7.setText(_translate(\"Dialog\", \"选择椭球\"))\n self.pushButton_Close.setText(_translate(\"Dialog\", \"关闭程序\"))\n self.comboBox.setItemText(1, _translate(\"Dialog\", \"WGS-84椭球体\"))\n self.comboBox.setItemText(2, _translate(\"Dialog\", \"2000中国大地坐标系\"))\n self.comboBox.setItemText(3, _translate(\"Dialog\", \"1975年国际椭球体\"))\n self.label_8.setText(_translate(\"Dialog\", \"扁率α\"))\n self.label_9.setText(_translate(\"Dialog\", \"欧勒角\"))\n self.label_10.setText(_translate(\"Dialog\", \"变换结果\"))\n self.label_11.setText(_translate(\"Dialog\", \"εx\"))\n self.label_12.setText(_translate(\"Dialog\", \"εy\"))\n self.label_13.setText(_translate(\"Dialog\", \"εz\"))\n self.pushButton_4.setText(_translate(\"Dialog\", \"旋转变换\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Dialog = QtWidgets.QDialog()\n ui = Ui_Dialog()\n ui.setupUi(Dialog)\n Dialog.show()\n sys.exit(app.exec_())\n", "id": "11485264", "language": "Python", "matching_score": 4.688874244689941, "max_stars_count": 7, "path": "坐标换算/Ui_mainForm.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'C:\\GitHub\\learn-surveying-software-designing\\0313作业\\MainWindow.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.0\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(554, 577)\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.label = QtWidgets.QLabel(self.centralWidget)\n self.label.setGeometry(QtCore.QRect(130, 10, 12, 12))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralWidget)\n self.label_2.setGeometry(QtCore.QRect(220, 10, 12, 12))\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(self.centralWidget)\n self.label_3.setGeometry(QtCore.QRect(70, 30, 20, 12))\n self.label_3.setObjectName(\"label_3\")\n self.label_4 = QtWidgets.QLabel(self.centralWidget)\n self.label_4.setGeometry(QtCore.QRect(70, 70, 20, 12))\n self.label_4.setObjectName(\"label_4\")\n self.graphicsView = QtWidgets.QGraphicsView(self.centralWidget)\n self.graphicsView.setGeometry(QtCore.QRect(30, 190, 501, 351))\n self.graphicsView.setObjectName(\"graphicsView\")\n self.lineEdit_XA = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_XA.setGeometry(QtCore.QRect(100, 30, 71, 20))\n self.lineEdit_XA.setObjectName(\"lineEdit_XA\")\n self.lineEdit_XB = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_XB.setGeometry(QtCore.QRect(100, 70, 71, 20))\n self.lineEdit_XB.setObjectName(\"lineEdit_XB\")\n self.lineEdit_YA = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_YA.setGeometry(QtCore.QRect(190, 30, 71, 20))\n self.lineEdit_YA.setObjectName(\"lineEdit_YA\")\n self.lineEdit_YB = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_YB.setGeometry(QtCore.QRect(190, 70, 71, 20))\n self.lineEdit_YB.setObjectName(\"lineEdit_YB\")\n self.lineEdit_Sab = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_Sab.setGeometry(QtCore.QRect(360, 30, 113, 20))\n self.lineEdit_Sab.setObjectName(\"lineEdit_Sab\")\n self.label_5 = QtWidgets.QLabel(self.centralWidget)\n self.label_5.setGeometry(QtCore.QRect(320, 30, 25, 12))\n self.label_5.setObjectName(\"label_5\")\n self.label_6 = QtWidgets.QLabel(self.centralWidget)\n self.label_6.setGeometry(QtCore.QRect(320, 60, 25, 12))\n self.label_6.setObjectName(\"label_6\")\n self.lineEdit_tab = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_tab.setGeometry(QtCore.QRect(360, 60, 113, 20))\n self.lineEdit_tab.setObjectName(\"lineEdit_tab\")\n self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralWidget)\n self.plainTextEdit.setGeometry(QtCore.QRect(30, 110, 501, 71))\n self.plainTextEdit.setReadOnly(True)\n self.plainTextEdit.setObjectName(\"plainTextEdit\")\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 554, 23))\n self.menuBar.setObjectName(\"menuBar\")\n self.menu = QtWidgets.QMenu(self.menuBar)\n self.menu.setObjectName(\"menu\")\n self.menu_2 = QtWidgets.QMenu(self.menuBar)\n self.menu_2.setObjectName(\"menu_2\")\n MainWindow.setMenuBar(self.menuBar)\n self.action_Open = QtWidgets.QAction(MainWindow)\n self.action_Open.setObjectName(\"action_Open\")\n self.action_Save = QtWidgets.QAction(MainWindow)\n self.action_Save.setObjectName(\"action_Save\")\n self.action_Close = QtWidgets.QAction(MainWindow)\n self.action_Close.setObjectName(\"action_Close\")\n self.action_Calculate = QtWidgets.QAction(MainWindow)\n self.action_Calculate.setObjectName(\"action_Calculate\")\n self.action_Quit = QtWidgets.QAction(MainWindow)\n self.action_Quit.setObjectName(\"action_Quit\")\n self.menu.addAction(self.action_Open)\n self.menu.addAction(self.action_Save)\n self.menu.addAction(self.action_Close)\n self.menu.addAction(self.action_Quit)\n self.menu_2.addAction(self.action_Calculate)\n self.menuBar.addAction(self.menu.menuAction())\n self.menuBar.addAction(self.menu_2.menuAction())\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"X\"))\n self.label_2.setText(_translate(\"MainWindow\", \"Y\"))\n self.label_3.setText(_translate(\"MainWindow\", \"A\"))\n self.label_4.setText(_translate(\"MainWindow\", \"B\"))\n self.label_5.setText(_translate(\"MainWindow\", \"Sab\"))\n self.label_6.setText(_translate(\"MainWindow\", \"tab\"))\n self.menu.setTitle(_translate(\"MainWindow\", \"文件\"))\n self.menu_2.setTitle(_translate(\"MainWindow\", \"计算\"))\n self.action_Open.setText(_translate(\"MainWindow\", \"&Open\"))\n self.action_Save.setText(_translate(\"MainWindow\", \"&Save\"))\n self.action_Close.setText(_translate(\"MainWindow\", \"&Close\"))\n self.action_Calculate.setText(_translate(\"MainWindow\", \"&Calculate\"))\n self.action_Quit.setText(_translate(\"MainWindow\", \"&Quit\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "id": "5438337", "language": "Python", "matching_score": 5.822349548339844, "max_stars_count": 7, "path": "计算距离、方位角/Ui_MainWindow.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'C:\\GitHub\\learn-surveying-software-designing\\大地主题解算\\MainWindow.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.0\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(390, 358)\n self.centralWidget = QtWidgets.QWidget(MainWindow)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.lineEdit_B1 = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_B1.setGeometry(QtCore.QRect(60, 70, 113, 20))\n self.lineEdit_B1.setObjectName(\"lineEdit_B1\")\n self.lineEdit_L1 = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_L1.setGeometry(QtCore.QRect(60, 120, 113, 20))\n self.lineEdit_L1.setObjectName(\"lineEdit_L1\")\n self.label = QtWidgets.QLabel(self.centralWidget)\n self.label.setGeometry(QtCore.QRect(30, 70, 16, 16))\n self.label.setObjectName(\"label\")\n self.label_2 = QtWidgets.QLabel(self.centralWidget)\n self.label_2.setGeometry(QtCore.QRect(30, 120, 16, 16))\n self.label_2.setObjectName(\"label_2\")\n self.lineEdit_L2 = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_L2.setGeometry(QtCore.QRect(220, 120, 113, 20))\n self.lineEdit_L2.setObjectName(\"lineEdit_L2\")\n self.lineEdit_B2 = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_B2.setGeometry(QtCore.QRect(220, 70, 113, 20))\n self.lineEdit_B2.setObjectName(\"lineEdit_B2\")\n self.label_4 = QtWidgets.QLabel(self.centralWidget)\n self.label_4.setGeometry(QtCore.QRect(70, 40, 54, 12))\n self.label_4.setObjectName(\"label_4\")\n self.label_5 = QtWidgets.QLabel(self.centralWidget)\n self.label_5.setGeometry(QtCore.QRect(250, 40, 54, 12))\n self.label_5.setObjectName(\"label_5\")\n self.lineEdit_S = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_S.setGeometry(QtCore.QRect(140, 190, 191, 21))\n self.lineEdit_S.setObjectName(\"lineEdit_S\")\n self.label_6 = QtWidgets.QLabel(self.centralWidget)\n self.label_6.setGeometry(QtCore.QRect(40, 190, 101, 16))\n self.label_6.setObjectName(\"label_6\")\n self.lineEdit_A12 = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_A12.setGeometry(QtCore.QRect(140, 230, 191, 21))\n self.lineEdit_A12.setObjectName(\"lineEdit_A12\")\n self.lineEdit_A21 = QtWidgets.QLineEdit(self.centralWidget)\n self.lineEdit_A21.setGeometry(QtCore.QRect(140, 270, 191, 21))\n self.lineEdit_A21.setObjectName(\"lineEdit_A21\")\n self.label_7 = QtWidgets.QLabel(self.centralWidget)\n self.label_7.setGeometry(QtCore.QRect(40, 230, 81, 16))\n self.label_7.setObjectName(\"label_7\")\n self.label_8 = QtWidgets.QLabel(self.centralWidget)\n self.label_8.setGeometry(QtCore.QRect(40, 270, 71, 16))\n self.label_8.setObjectName(\"label_8\")\n MainWindow.setCentralWidget(self.centralWidget)\n self.menuBar = QtWidgets.QMenuBar(MainWindow)\n self.menuBar.setGeometry(QtCore.QRect(0, 0, 390, 23))\n self.menuBar.setObjectName(\"menuBar\")\n self.menu = QtWidgets.QMenu(self.menuBar)\n self.menu.setObjectName(\"menu\")\n self.menu_2 = QtWidgets.QMenu(self.menuBar)\n self.menu_2.setObjectName(\"menu_2\")\n MainWindow.setMenuBar(self.menuBar)\n self.actionopen = QtWidgets.QAction(MainWindow)\n self.actionopen.setObjectName(\"actionopen\")\n self.actionsave = QtWidgets.QAction(MainWindow)\n self.actionsave.setObjectName(\"actionsave\")\n self.action_1 = QtWidgets.QAction(MainWindow)\n self.action_1.setObjectName(\"action_1\")\n self.action_2 = QtWidgets.QAction(MainWindow)\n self.action_2.setObjectName(\"action_2\")\n self.actionclose = QtWidgets.QAction(MainWindow)\n self.actionclose.setObjectName(\"actionclose\")\n self.menu.addAction(self.actionopen)\n self.menu.addAction(self.actionsave)\n self.menu.addAction(self.actionclose)\n self.menu_2.addAction(self.action_1)\n self.menu_2.addAction(self.action_2)\n self.menuBar.addAction(self.menu.menuAction())\n self.menuBar.addAction(self.menu_2.menuAction())\n\n self.retranslateUi(MainWindow)\n self.actionclose.triggered.connect(MainWindow.close)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"B\"))\n self.label_2.setText(_translate(\"MainWindow\", \"L\"))\n self.label_4.setText(_translate(\"MainWindow\", \"P1\"))\n self.label_5.setText(_translate(\"MainWindow\", \"P2\"))\n self.label_6.setText(_translate(\"MainWindow\", \"大地线长度S\"))\n self.label_7.setText(_translate(\"MainWindow\", \"正方位角A12\"))\n self.label_8.setText(_translate(\"MainWindow\", \"反方位角A21\"))\n self.menu.setTitle(_translate(\"MainWindow\", \"文件\"))\n self.menu_2.setTitle(_translate(\"MainWindow\", \"大地主题解算\"))\n self.actionopen.setText(_translate(\"MainWindow\", \"打开\"))\n self.actionsave.setText(_translate(\"MainWindow\", \"保存\"))\n self.action_1.setText(_translate(\"MainWindow\", \"正算\"))\n self.action_2.setText(_translate(\"MainWindow\", \"反算\"))\n self.actionclose.setText(_translate(\"MainWindow\", \"关闭\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n", "id": "10271591", "language": "Python", "matching_score": 4.750330448150635, "max_stars_count": 7, "path": "大地主题解算/Ui_MainWindow.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing MainWindow.\n\"\"\"\nimport sys\nimport math\nfrom PyQt5.QtCore import pyqtSlot\n#from PyQt5.QtWidgets import QMainWindow\nfrom PyQt5.QtWidgets import *\nfrom Ui_MainWindow import Ui_MainWindow\n\n[B1, L1, H1] = [0, 0, 0]\n[B2, L2, H2] = [0, 0, 0]\nS=0\nA12=A21=0\nrho = 206265 #常数\n#WGS-84\na = 6378137\nb = 6356752.3142\nc = 6399593.6258\nalpha = 1/298.257223562\ne2 = 0.00669437999013 #e^2\ne12 = 0.00673949674227 #e'^2\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n @pyqtSlot()\n def on_action_1_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n #raise NotImplementedError\n B1 = float((self.lineEdit_B1.text()))\n L1 = float((self.lineEdit_L1.text()))\n S = float((self.lineEdit_S.text()))\n A12 = float((self.lineEdit_A12.text()))\n \n B1 = DmsToDeg(B1)\n L1 = DmsToDeg(L1)\n A12 = DmsToDeg(A12)\n \n #赋初值\n B2 = B1\n L2 = L1\n A21 = A12\n t = -100 #判定条件\n \n #迭代:\n while abs(t - B2) > 0.000001:\n \n t = B2\n Bm = (B1 + B2)/2\n Lm = (L1 + L2)/2\n Am = (A12 + A21)/2\n \n \n Mm = a * (1 - e2) * math.pow((1 - e2 * math.pow(math.sin(Bm), 2)), -1.5)\n Nm = a * math.pow(1 - e2 * math.pow(math.sin(Bm), 2), -0.5)\n #秒为单位\n dB0 = rho/Mm*S*math.cos(Am)\n dL0 = rho/Nm*S*math.sin(Am)/math.cos(Bm)\n dA0 =dL0 * math.sin(Bm)\n \n dL = rho / Mm * S * math.sin(Am) / math.cos(Bm)*(1 + dA0**2/(24*rho**2) - dB0**2 / (24 * rho**2))\n dB = rho/Mm*S*math.cos(Am)*(1+dL0**2/(12*rho**2)+dA0**2/(24*rho**2))\n dA = rho/Nm*S*math.sin(Am)*math.tan(Bm)*(1+dB0**2/(12*rho**2)+dL0**2*math.cos(Bm)**2/(12*rho**2)+dA0**2/(24*rho**2))\n #度数为单位\n B2 = B1 + dB/3600\n L2 = L1 + dL/3600\n \n A21 = A12 + dA/3600 +180\n \n if A21 > 360:\n A21 = A21 - 360\n \n print(\"已经迭代了一次\")\n \n \n #输出\n self.lineEdit_B2.setText(str(DegToDms(B2)))\n self.lineEdit_L2.setText(str(DegToDms(L2)))\n self.lineEdit_A21.setText(str(DegToDms(A21)))\n \n @pyqtSlot()\n def on_action_2_triggered(self):\n \"\"\"\n 高斯平均引数反算公式\n \"\"\"\n \n # TODO: not implemented yet\n #raise NotImplementedError\n B1 = float((self.lineEdit_B1.text()))\n L1 = float((self.lineEdit_L1.text()))\n B2 = float((self.lineEdit_B2.text()))\n L2 = float((self.lineEdit_L2.text()))\n \n B1 = DmsToDeg(B1)\n L1 = DmsToDeg(L1)\n B2 = DmsToDeg(B2)\n L2 = DmsToDeg(L2)\n \n Bm = (B1 + B2)/2\n dB = B2 - B1\n dL = L2 - L1\n \n #度分秒化为秒 \n \n \n Nm = a * math.pow(1 - e2 * math.pow(math.sin(Bm), 2), -0.5)\n Vm = math.sqrt(1+e12*math.cos(Bm)**2)\n \n itam2 = e12 * math.cos(Bm)**2 # M点ita的平方值\n tm = math.tan(Bm)\n \n #计算系数\n r01 = Nm * math.cos(Bm) / rho\n r21 = math.cos(Bm) * Nm * (2 + 7*itam2 - 9 * itam2 * tm**2 * itam2**2)/(24 * rho**3 * Vm**4)\n r03 = - Nm * math.cos(Bm)**3 * tm**2 / (24 * rho**3)\n S10 = Nm/(rho * Vm**2)\n S12 = Nm * math.cos(Bm)**2 * (2 + 3 * tm**2 + 2*itam2)/(24*rho**3*Vm**2)\n S30 = Nm*(itam2 - tm**2 * itam2)\n #中间变量\n SsinAm = r01* dL + r21 * dB**2 * dL + r03 * dL**3\n ScosAm = S10 * dB + S12 * dB * dL**2 + S30 * dB**3\n \n #计算系数2\n t01 = tm * math.cos(Bm) \n t21 = math.cos(Bm) * tm * (2 + 7 * itam2 + 9 * itam2 * tm**2 + 5* itam2**2) / (24 * rho**2 * Vm**4)\n t03 = (math.cos(Bm)) **3 * tm * (2 + tm**2 + 2 * itam2) / (24 * rho**2)\n \n #dA''\n \n dA = t01 * dL + t21 * dB**2 * dL + t03 * dL**3\n \n # Am\n \n tanAm = (SsinAm) / (ScosAm)\n \n Am = math.atan(tanAm)\n \n #判断Am的象限\n \n c = abs(ScosAm/SsinAm)\n \n b = B2 - B1\n l = L2 - L1\n \n if abs(b) > abs(l):\n T = math.atan(abs(SsinAm/ScosAm))\n else:\n T = 45 + math.atan(abs((1-c)/(1+c)))\n \n if b > 0 and l >= 0:\n Am = T\n \n if b < 0 and l >= 0:\n Am = 180 - T\n \n if b <=0 and l < 0:\n Am = 180 + T\n \n if b > 0 and l < 0:\n Am = 360 - T\n \n if b == 0 and l > 0:\n Am = 90\n \n # S A12 A21\n \n S = SsinAm / math.sin(Am)\n \n A12 = Am - 0.5 * dA \n A21 = Am + 0.5 * dA + 180 \n if A21 > 360 :\n A21 = A21 - 360\n \n #判断象限\n \n self.lineEdit_S.setText(str(S))\n self.lineEdit_A12.setText(str(DegToDms(A12)))\n self.lineEdit_A21.setText(str(DegToDms(A21)))\n\n# TODO: 数据格式化\ndef DmsToDeg(dms):\n d = int(dms)\n t = (dms - d) * 100\n m = int(t)\n t = (t - int(t)) * 100\n s = t\n return (d + m / 60 + s / 3600)\n\ndef DegToDms(deg):\n d = int(deg)\n t = (deg - int(deg))*60\n m = int(t)\n t = (t - int(t)) * 60\n s = t\n return (d + m/100 + s / 10000)\n \ndef PrintDms(dms):\n d = int(dms)\n t = (dms - d) * 100\n m = int(t)\n t = (t - int(t)) * 100\n s = t\n print(\"deg = \" + d + '\\t' + \"min = \"+ m + '\\t' + \"sec = \" + s)\n return 0\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dlg = MainWindow()\n dlg.show()\n sys.exit(app.exec_())\n", "id": "8158404", "language": "Python", "matching_score": 3.8476784229278564, "max_stars_count": 7, "path": "大地主题解算/MainWindow.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing MainWindow.\n\"\"\"\n\n\nimport sys\nimport numpy as np\nimport math\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtWidgets import *\nfrom Ui_mainForm import Ui_MainWindow\n \n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n @pyqtSlot()\n def on_pushButton_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_pushButton_2_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Forward_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Backward_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Open_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n filename,_ = QFileDialog.getOpenFileName(self, '输入坐标数据', './', 'All Files (*);;Text Files (*.txt)');\n text=open(filename,'r').read()\n print('文件打开成功')\n self.plainTextEdit_Input.setPlainText(text)\n \n @pyqtSlot()\n def on_action_Close_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Save_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Quit_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dlg = Dialog()\n dlg.show()\n sys.exit(app.exec_())\n", "id": "4124845", "language": "Python", "matching_score": 5.09454870223999, "max_stars_count": 7, "path": "高斯投影坐标计算/MainWindow.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing Dialog.\n\"\"\"\nimport sys\nimport numpy as np\nimport math\nfrom PyQt5.QtCore import pyqtSlot\n#from PyQt5.QtWidgets import QDialog, QApplication\nfrom PyQt5.QtWidgets import *\nfrom Ui_mainForm import Ui_Dialog\n \n\nclass Dialog(QDialog, Ui_Dialog):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(Dialog, self).__init__(parent)\n self.setupUi(self)\n \n @pyqtSlot()\n def on_pushButton_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_pushButton_2_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_pushButton_Open_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: 打开文件并显示\n # raise NotImplementedError\n filename,_ = QFileDialog.getOpenFileName(self, '输入坐标数据', './', 'All Files (*);;Text Files (*.txt)');\n text=open(filename,'r').read()\n print('文件打开成功')\n self.plainTextEdit_Input.setPlainText(text)\n \n @pyqtSlot()\n def on_pushButton_Save_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_pushButton_3_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot(int)\n def on_comboBox_currentIndexChanged(self, index):\n \"\"\"\n Slot documentation goes here.\n \n @param index DESCRIPTION\n @type int\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_pushButton_4_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n #raise NotImplementedError\n Xangle = Decimal(self.lineEdit_Xangle.text())\n Yangle = Decimal(self.lineEdit_Yangle.text())\n Zangle = Decimal(self.lineEdit_Zangle.text())\n R1 = np.array([[1, 0, 0], [0, math.cos(Xangle), math.sin(Xangle)], [0, -math.sin(Xangle), math.cos(Xangle)]], dtype = float)\n R2 = np.array([[ math.cos(Yangle), 0, -math.sin(Yangle)], [0, 1, 0], [math.sin(Yangle), 0, math.cos(Yangle)]], dtype = float)\n R3 = np.array([[math.cos(Zangle), math.sin(Zangle), 0], [-math.sin(Zangle), math.cos(Zangle), 0], [0, 0, 1]], dtype = float)\n \n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dlg = Dialog()\n dlg.show()\n sys.exit(app.exec_())\n \n\n", "id": "12127331", "language": "Python", "matching_score": 2.530000925064087, "max_stars_count": 7, "path": "坐标换算/mainForm.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing MainWindow.\n\"\"\"\n\nimport sys\n#import numpy as np\nfrom math import pi, atan, sqrt\n#import matplotlib.pyplot as plt\nfrom datetime import datetime\n\nimport matplotlib\nmatplotlib.use(\"Qt5Agg\") # 声明使用QT5\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\n\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom Ui_MainWindow import Ui_MainWindow\n\n\nx = [0.0, 0.0]\ny = [0.0, 0.0]\nSab = 0.0\nTab = 0.0\n\n#计算方位角\ndef Azimuth():\n dx = x[1] - x[0]\n dy = y[1] - y[0]\n \n if dx ==0:\n if dy >=0 :\n a = pi/2\n else:\n a = pi * 3 / 2\n elif dy ==0:\n if dx >= 0:\n a=0\n else:\n a = pi\n else:\n a = atan(dy / dx)\n if dx <= 0:\n a = a + pi\n elif dy <= 0:\n a = a + 2 * pi\n return a\n\nclass Figure_Canvas(FigureCanvas): # 通过继承FigureCanvas类,使得该类既是一个PyQt5的Qwidget,又是一个matplotlib的FigureCanvas,这是连接pyqt5与matplot lib的关键\n\n def __init__(self, parent=None, width=5.1, height=4, dpi=10):\n fig = Figure(figsize=(width, height), dpi=80) # 创建一个Figure,注意:该Figure为matplotlib下的figure,不是matplotlib.pyplot下面的figure\n\n FigureCanvas.__init__(self, fig) # 初始化父类\n self.setParent(parent)\n\n self.axes = fig.add_subplot(111) # 调用figure下面的add_subplot方法,类似于matplotlib.pyplot下面的subplot方法\n\n def StartPlot(self):\n self.axes.set_xlabel('Y')\n self.axes.set_ylabel('X')\n\n self.axes.scatter(y[0], x[0], c= 'red', marker='o')\n self.axes.scatter(y[1], x[1], c= 'yellow')\n self.axes.legend(('A', 'B'), loc='best')\n \n self.axes.set_title('Calculation Results',color = 'blue')\n \n self.axes.plot(y, x, c= 'blue', lw=0.5)\n \n self.axes.annotate('(' + str(x[0]) + ',' + str(y[0]) + ')', xy=(y[0], x[0]), xytext=(-40, 6), textcoords='offset points', weight='heavy')\n self.axes.annotate('(' + str(x[1]) + ',' + str(y[1]) + ')', xy=(y[1], x[1]), xytext=(-40, 6), textcoords='offset points', weight='heavy')\n \n t1 = (y[0]+y[1])/2\n t2 = (x[0]+x[1])/2\n \n self.axes.annotate('Sab = '+ str(Sab) + '; Tab = ' + str(Tab), xy=(t1, t2), xytext=(-80, 80), textcoords='offset points', color = 'blue', arrowprops = dict( arrowstyle = '->', connectionstyle = 'arc3', color = 'b'))\n \n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n self.plainTextEdit.setPlainText('[' + str(datetime.now()) + ']' + '输入数据或从文件打开来开始计算')\n \n @pyqtSlot()\n def on_action_Open_triggered(self):\n filename,_ = QFileDialog.getOpenFileName(self, '输入坐标数据', './', 'All Files (*);;Text Files (*.txt)');\n if filename == '':\n self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '打开失败 返回值为空')\n return 0\n f=open(filename,'r', encoding='utf-8')\n dic = []\n for line in f.readlines():\n line=line.strip('\\n') #去掉换行符\\n\n b=line.split(',') #将每一行以,为分隔符转换成列表\n dic.append(b)\n \n \n self.lineEdit_XA.setText(dic[0][0])\n self.lineEdit_YA.setText(dic[0][1])\n self.lineEdit_XB.setText(dic[1][0])\n self.lineEdit_YB.setText(dic[1][1])\n \n self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '打开文件:' + str(filename))\n f.close()\n \n \n @pyqtSlot()\n def on_action_Save_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: 保存结果 \n with open('输出结果.txt','a') as f:\n f.write('[' + str(datetime.now()) + ']' + '\\n')\n f.write('A:'+str([x[0], y[0]]) + ';B:' + str([x[1],y[1]]) + '\\n')\n f.write('Sab = '+ str(Sab) + '; Tab = ' + str(Tab) + '\\n')\n f.write('\\n')\n \n self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '保存成功')\n \n @pyqtSlot()\n def on_action_Close_triggered(self):\n self.close()\n \n @pyqtSlot()\n def on_action_Calculate_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: 检查是否缺失条件, 进行计算, 绘制图形\n \n if self.lineEdit_XA.text() == '' or self.lineEdit_XB.text() == '' or self.lineEdit_YA.text() == '' or self.lineEdit_YB.text() == '': #空的情况下,内容为‘’ (空白);不是None\n self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '中断:参数为空')\n return 0\n \n XA = float(self.lineEdit_XA.text())\n XB = float(self.lineEdit_XB.text())\n YA = float(self.lineEdit_YA.text())\n YB = float(self.lineEdit_YB.text())\n \n if XA ==XB and YA == YB:\n self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '中断:两点重合')\n return 0\n \n global x, y, Sab, Tab # 给全局变量赋值\n \n x = [XA, XB]\n y = [YA, YB]\n \n Sab = sqrt((XA - XB) * (XA - XB) + (YA - YB) * (YA - YB) )\n Tab = Azimuth()\n \n self.lineEdit_Sab.setText(str(Sab))\n self.lineEdit_tab.setText(str(Tab))\n \n self.plainTextEdit.appendPlainText('[' + str(datetime.now()) + ']' + '计算完成:' + 'Sab = '+ str(Sab) + '; Tab = ' + str(Tab))\n \n ins = Figure_Canvas() #实例化一个FigureCanvas\n ins.StartPlot() # 画图\n graphicscene = QGraphicsScene() #创建一个QGraphicsScene,因为加载的图形(FigureCanvas)不能直接放到graphicview控件中,必须先放到graphicScene,然后再把graphicscene放到graphicview中\n graphicscene.addWidget(ins) # 把图形放到QGraphicsScene中,注意:图形是作为一个QWidget放到QGraphicsScene中的\n \n# graphicscene=graphicscene.scaled(self.graphicsView.width()-10,self.graphicsView.height()-10)\n# 咋调大小暂时还没搞清楚\n\n self.graphicsView.setScene(graphicscene) # 把QGraphicsScene放入QGraphicsView\n self.graphicsView.show() # 调用show方法呈现图形\n \n @pyqtSlot()\n def on_action_Quit_triggered(self):\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dlg = MainWindow()\n dlg.show()\n sys.exit(app.exec_())\n", "id": "8864634", "language": "Python", "matching_score": 6.884726524353027, "max_stars_count": 7, "path": "计算距离、方位角/MainWindow.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing MainWindow.\n\"\"\"\nimport sys\nimport numpy as np\n#from math import pi, atan, sqrt\n#from datetime import datetime\n\n#import matplotlib\n#matplotlib.use(\"Qt5Agg\") # 声明使用QT5\n#from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n#from matplotlib.figure import Figure\n\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom Ui_MainWindow import Ui_MainWindow\n\nnp.set_printoptions(suppress=True)\nephemeris = []\n\nlines = []\n\nclass Ephemeris:\n def __init__(self):\n e = []\n \n def print(self):\n str = str(e)\n return str\n\n\nendofhead = 'END OF HEADER'\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(MainWindow, self).__init__(parent)\n self.setupUi(self)\n \n @pyqtSlot()\n def on_action_Open_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # \n global ephemeris\n \n filename,_ = QFileDialog.getOpenFileName(self, '输入星历文件', './', 'All Files (*);;2020 RINEX N Files (*.20n)');\n if filename == '':\n return 0\n print(filename)\n f=open(filename,'r', encoding='utf-8')\n global lines \n for line in f.readlines():\n line=line.strip('\\n') #去掉换行符\\n\n lines.append(line)\n f.close()\n t = 0\n for line in lines:\n if t > 0:\n if line[1] != ' ':\n \n self.plainTextEdit.appendPlainText(line)\n elif line[1] == ' ' and line[4] != 0:\n \n self.plainTextEdit.appendPlainText(line)\n \n if line[60:73] == 'END OF HEADER':\n t = 1 \n \n \n \n @pyqtSlot()\n def on_action_Close_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Save_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n # TODO: not implemented yet\n raise NotImplementedError\n \n @pyqtSlot()\n def on_action_Quit_triggered(self):\n self.close()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n dlg = MainWindow()\n dlg.show()\n sys.exit(app.exec_())\n", "id": "1989241", "language": "Python", "matching_score": 0.33027589321136475, "max_stars_count": 7, "path": "读取rinex文件/MainWindow.py" }, { "content": "import warnings # 忽略警告\nwarnings.filterwarnings('ignore')\n\n\nimport pandas as pd #表格和数据操作\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom random import randrange\nfrom statsmodels.tsa.stattools import adfuller as ADF\nfrom statsmodels.stats.diagnostic import acorr_ljungbox #白噪声检验\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.api import tsa\nimport statsmodels.api as sm\nfrom statsmodels.graphics.api import qqplot\nfrom itertools import product\n\n#数据第一行是表头,不是数据\ndta=pd.read_csv('data.CSV',header=0 ,index_col=0)\nprint(\"-------------------------------------------------------------\")\n\ndta = dta.dropna() #去除不完整项数据\nprint(dta)\n\nprint(\"-------------------------------------------------------------\")\nprint(dta.describe()) #数据统计\nprint(\"-------------------------------------------------------------\")\n#用单位根检验(ADF)进行平稳性检验\n\ndta1= dta\ndiff=0\nadf=ADF(dta1)\nif adf[1]>0.05:\n print (u'原始序列经检验不平稳,p值为:%s'%(adf[1]))\nelse:\n print (u'原始序列经检验平稳,p值为:%s'%(adf[1]))\n\nprint(\"-------------------------------------------------------------\")\n\nwhile adf[1]>=0.05:#adf[1]为p值,p值小于0.05认为是平稳的\n diff=diff+1\n adf=ADF(dta1.diff(diff).dropna())\nprint (u'原始序列经过%s阶差分后归于平稳,p值为%s'%(diff,adf[1]))\n\n\nprint(\"-------------------------------------------------------------\")\nfor i in range(1,4):\n dta1 = dta1.diff(i).dropna()\n adf = ADF(dta1)\n print(u'原始序列经过%s阶差分,p值为%s'%(i,adf[1]))\n\nprint(\"-------------------------------------------------------------\")\n\n#采用LB统计量的方法进行白噪声检验\n\ndta2 = dta\n[[lb],[p]]=acorr_ljungbox(dta2,lags=1)\nif p<0.05:\n print (u'原始序列为非白噪声序列,p值为:%s'%p)\nelse:\n print (u'原始序列为白噪声序列,p值为:%s'%p)\n[[lb],[p]]=acorr_ljungbox(dta2.diff(1).dropna(),lags=1)\nif p<0.05:\n print (u'一阶差分序列为非白噪声序列,p值为:%s'%p)\nelse:\n print (u'一阶差分序列为白噪声序列,p值为:%s'%p)\n\nprint(\"-------------------------------------------------------------\")\n\ndta.plot()\nplt.show()\n\n\n\n#绘制自相关和偏向关图\nfig = plt.figure(figsize=(8,7))\nax1= fig.add_subplot(211)\nax2= fig.add_subplot(212)\nfig = plot_acf(dta,ax=ax1)\nfig = plot_pacf(dta,ax=ax2)\nfig.show()\n\n\n#模型识别\n#确定最佳p,d,q值\n\n#定阶\npmax = int(len(dta)/10) #一般阶数不超过length/10\nqmax = int(len(dta)/10) #一般阶数不超过length/10\nbic_matrix = [] #bic矩阵\nfor p in range(pmax+1):\n tmp = []\n for q in range(qmax+1):\n try: #存在部分报错,所以用try来跳过报错。\n tmp.append(ARIMA(dta, (p,1,q)).fit().bic)\n except:\n tmp.append(None)\n bic_matrix.append(tmp)\n\nbic_matrix = pd.DataFrame(bic_matrix) #从中可以找出最小值\n\np,q = bic_matrix.stack().astype('float64').idxmin() #先用stack展平,然后用idxmin找出最小值位置。\nprint (u'BIC最小的p值和q值为:%s、%s' %(p,q))\n\n#一阶插分的p值最小,效果最好\n#建立ARIMA(p,1,q)模型\narima = ARIMA(dta.dropna(), (p,1,q)).fit()\nprint(\"-------------------------------------------------------------\")\nprint(\"最优模型\", arima.summary())\n\n#模型检验:模型确立后,检验其残差序列是否为白噪声\n\ndta3 = dta.drop(axis = 0, index = '2021-01-01')#删除首项,对应差分缺失\n\ndta_pred = arima.predict(typ = 'levels') #按模型预测\n\nprint(\"-------------------------------------------------------------\")\nprint(dta_pred) #手动操作。\n\n#绘拟合图\ndta.plot()\ndta_pred.plot()\nplt.show()\n\n###修正残差序列格式 error是原始数据减去pred的结果,直接算我算不出来,用excel手算的\n\npred_error = pd.read_csv('data_dta_pred_error.CSV',header=0 ,index_col=0).dropna()#计算残差\n\nlb,p_l= acorr_ljungbox(pred_error, lags = 1)#LB检验\nprint(p_l)\nh = (p_l < 0.05).sum() #p值小于0.05,认为是非白噪声。\nif h > 0:\n print (u'模型ARIMA(%s,1,%s)不符合白噪声检验'%(p,q))\nelse:\n print (u'模型ARIMA(%s,1,%s)符合白噪声检验' %(p,q))\n\n\n#残差的自相关图\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = plot_acf(pred_error,ax=ax1)\nax2 = fig.add_subplot(212)\nfig = plot_pacf(pred_error, ax=ax2)\nfig.show()\n\n#D-W检验\nprint(sm.stats.durbin_watson(pred_error))\n\n#绘制qq图\nfig = plt.figure(figsize=(12,8))\nfig = qqplot(pred_error, line='q', fit=True)\nfig.show()\n\n###不同差分次数的精度\n##print(ARIMA(dta, (p,0,q)).fit().bic)\n##print(ARIMA(dta, (p,1,q)).fit().bic)\n##\n##print(ARIMA(dta, (p,0,q)).fit().aic)\n##print(ARIMA(dta, (p,1,q)).fit().aic)\n###print(ARIMA(dta, (p,2,q)).fit().aic) #MA系数不可逆\n##\n###差分比较\n##fig1 = plt.figure(figsize=(8,7))\n##ax1= fig1.add_subplot(211)\n##diff1 = dta.diff(1)\n##diff1.plot(ax=ax1)\n##ax2= fig1.add_subplot(212)\n##diff2 = dta.diff(2)\n##diff2.plot(ax=ax2)\n##fig1.show()\n##\n###差分后的自相关图\n##dta= dta.diff(1)#1阶差分\n##dta = dta.dropna()\n##\n##fig2 = plt.figure(figsize=(8,7))\n##ax1=fig2.add_subplot(211)\n##fig2 = plot_acf(dta,ax=ax1)\n##ax2 = fig2.add_subplot(212)\n##fig2 = plot_pacf(dta,ax=ax2)\n##fig2.show()\n", "id": "975591", "language": "Python", "matching_score": 4.493766784667969, "max_stars_count": 7, "path": "变形监测记录表/时间序列分析/arima.py" }, { "content": "import warnings # 忽略警告\nwarnings.filterwarnings('ignore')\n\nimport pandas as pd #表格和数据操作\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom statsmodels.api import tsa\nimport statsmodels.api as sm\n\nfrom itertools import product\n\n\ndta0=pd.read_csv('data.CSV',header=0)\ndta0.index = pd.to_datetime(dta0['date'])\n\n##data = pd.DataFrame()\n##data['date'] = ['2008/1/11','2008/2/6','2008/3/17','2008/4/13','2008/5/17','2008/6/15','2008/7/1','2008/7/12','2008/8/10','2008/9/14','2008/10/12','2008/11/16','2008/12/13','2009/1/19','2009/2/16','2009/3/13','2009/4/18','2009/5/16','2009/6/20','2009/7/11','2009/8/15','2009/9/19','2009/10/16','2009/11/14','2009/12/11','2010/1/15','2010/2/20','2010/3/13','2010/4/17','2010/5/15','2010/6/12','2010/7/16','2010/8/14','2010/9/18','2010/10/16','2010/11/19','2010/12/24','2011/1/21','2011/2/18','2011/3/19','2011/4/17','2011/5/15','2011/6/18','2011/7/16','2011/8/20','2011/9/24','2011/10/22','2011/11/19','2011/12/24','2012/1/14']\n##data['dy'] = [0.62,1.01,1.78,1.29,0.11,-0.35,-0.44,-0.3,-1.11,-1.78,-1.39,-0.94,-0.36,1.47,1.75,2.04,1.03,0.02,-0.59,-1.35,-2.14,-1.96,-1.46,-0.56,0.04,0.96,1.58,1.43,0.95,0.14,-0.3,-1.35,-1.6,-1.98,-1.58,-0.98,0.56,1.14,1.19,1.18,0.61,0.76,-0.66,-1.14,-1.35,-1.85,-0.95,-0.65,0.44,1.09]\n##data.index = pd.to_datetime(data['date'])\n\n\np=0\nd=1\nq=1\n\narima = ARIMA(dta0['H'].dropna(), (p,d,q)).fit()\n\nprint(arima.summary())\n\ndta_pred = arima.predict(typ = 'levels') #预测\n\n###拟合\n##fig1 = plt.figure(figsize=(12,8))\n##plt.plot(dta0['dy'], color='green')\n##plt.plot(dta_pred, color='yellow')\n##fig1.show()\n\n#模型预测\nforecast_ts = arima.forecast(10)\nfore = pd.DataFrame()\nfore['date'] = ['2021-01-24','2021-01-25','2021-01-26']\nfore['result'] = pd.DataFrame(forecast_ts[0])\nfore.index = pd.to_datetime(fore['date'])\n\nprint(fore['result'])\n\n#绘制成果表\ndta0['H'].plot(color='blue', label='Original',figsize=(12,8))\ndta_pred.plot(color='red', label='Predict',figsize=(12,8))\ndta0.H.plot(color='green', label='Original',figsize=(12,8))\nfore.result.plot(color='yellow', label='Forecast',figsize=(12,8))\n\nplt.show()\n\n", "id": "2774047", "language": "Python", "matching_score": 2.5051379203796387, "max_stars_count": 7, "path": "变形监测记录表/时间序列分析/模型预测.py" } ]
4.255878
datalions
[ { "content": "import set_parameters\nfrom tweepy import API\nfrom tweepy import OAuthHandler\n\ndef get_twitter_auth():\n \"\"\"Setup Twitter connection\n return: API object\"\"\"\n\n parameters = set_parameters.take_auth_data()\n\n twitter_access_token = parameters['twitter_access_token']\n twitter_secret_token = parameters['twitter_secret_token']\n twitter_api_key = parameters['twitter_api_key']\n twitter_secret_key = parameters['twitter_secret_key']\n\n auth = OAuthHandler(twitter_api_key, twitter_secret_key)\n auth.set_access_token(twitter_access_token, twitter_secret_token)\n return auth\n\ndef get_twitter_client():\n auth = get_twitter_auth()\n client = API(auth)\n return client\n\nif __name__ == '__main__':\n a = get_twitter_client()\n print(a)", "id": "2323971", "language": "Python", "matching_score": 1.6778039932250977, "max_stars_count": 0, "path": "scraper/twitter_connection.py" }, { "content": "from tweepy import Cursor, TweepError\nfrom time import sleep\nfrom datetime import date, datetime\nfrom twitter_connection import get_twitter_client\nfrom set_parameters import take_search_words\nimport json\nfrom pathlib import Path\n\napi = get_twitter_client()\nd = date.today()\nhome = str(Path.home())\nfile_folder = home + '/Documents/twitter/data/'\n\n\ndef log_to_file(log_text):\n with open(home + '/Documents/twitter/' + str(d) + '_log.txt', 'a+') as l_file:\n l_file.write(log_text)\n l_file.write('\\n')\n\n\ndef save_a_tweet(tweets, word_abb):\n tweets = json.dumps({str(d): tweets})\n filename = file_folder + word_abb + '_' + str(d) + '.json'\n with open(filename, 'a+') as f:\n f.write(tweets + '\\n')\n\n\nfor word in take_search_words(home + '/Documents/twitter/topics/'):\n c_api = Cursor(api.search, q=word, count=100).items()\n\n while True:\n try:\n tweet = c_api.next()\n save_a_tweet(tweet._json, word[:3].lower())\n log_correct_tweet = 'Date: {}, Keyword: {}, 1'.format(\n datetime.now(), word\n )\n log_to_file(log_correct_tweet)\n except TweepError:\n with open(home + '/Documents/twitter/log.txt', 'a+') as log_file:\n log_tweep_error = 'Date: {}, Keyword: {}, TweepError - function will start work after 15 min., 2'.format(\n datetime.now(), word\n )\n log_to_file(log_tweep_error)\n sleep(60 * 15)\n continue\n except StopIteration:\n log_stop_iteration = 'Date: {}, Keyword: {}, 0'.format(\n datetime.now(), word\n )\n log_to_file(log_stop_iteration)\n break\n", "id": "8370121", "language": "Python", "matching_score": 2.644150972366333, "max_stars_count": 0, "path": "scraper/twitter_mining_basics.py" }, { "content": "from pathlib import Path\nfrom os import listdir\n\n\ndef take_auth_data():\n \"\"\"\n Works properly if all files with access tokens are placed inside Documents/twitter/keys folder\n :return: Dictionary with pairs {'Token type': 'Token value'}\n \"\"\"\n home = str(Path.home())\n path_to_keys = '/Documents/twitter/keys/'\n\n files = [f for f in listdir(home+path_to_keys) if '.DS' not in f]\n\n tokens = []\n for f in files:\n with open(home+path_to_keys+f, 'r') as lines:\n ln = lines.readline().replace(\" \", \"\")\n tokens.append(ln)\n\n auth_data = dict(zip(files, tokens))\n return auth_data\n\n\ndef take_search_words(folder):\n \"\"\"\n Works properly if all files with words to search are placed inside Documents/twitter/topics folder\n :param folder: folder with files with words\n :return: Array of words for searching process\n \"\"\"\n words = []\n\n list_of_files = [f for f in listdir(folder) if f[-4:] == '.txt']\n\n for textfile in list_of_files:\n with open(folder + textfile, 'r') as my_file:\n for line in my_file:\n ln = line.strip()\n words.append(ln)\n\n return words\n\n\nif __name__ == '__main__':\n testcase = take_auth_data()\n testcase2 = take_search_words('test')\n", "id": "4521982", "language": "Python", "matching_score": 2.4901676177978516, "max_stars_count": 0, "path": "scraper/set_parameters.py" } ]
2.490168
Adam-Sawyer
[ { "content": "# Move: Contains all details about a Piece's move in the game.\n\n# DO NOT MODIFY THIS FILE\n# Never try to directly create an instance of this class, or modify its member variables.\n# Instead, you should only be reading its variables and calling its functions.\n\nfrom games.chess.game_object import GameObject\n\n# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n# you can add additional import(s) here\n# <<-- /Creer-Merge: imports -->>\n\nclass Move(GameObject):\n \"\"\"The class representing the Move in the Chess game.\n\n Contains all details about a Piece's move in the game.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes a Move with basic logic as provided by the Creer code generator.\"\"\"\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._captured = None\n self._from_file = \"\"\n self._from_rank = 0\n self._piece = None\n self._promotion = \"\"\n self._san = \"\"\n self._to_file = \"\"\n self._to_rank = 0\n\n @property\n def captured(self):\n \"\"\"The Piece captured by this Move, None if no capture.\n\n :rtype: games.chess.piece.Piece\n \"\"\"\n return self._captured\n\n @property\n def from_file(self):\n \"\"\"The file the Piece moved from.\n\n :rtype: str\n \"\"\"\n return self._from_file\n\n @property\n def from_rank(self):\n \"\"\"The rank the Piece moved from.\n\n :rtype: int\n \"\"\"\n return self._from_rank\n\n @property\n def piece(self):\n \"\"\"The Piece that was moved.\n\n :rtype: games.chess.piece.Piece\n \"\"\"\n return self._piece\n\n @property\n def promotion(self):\n \"\"\"The Piece type this Move's Piece was promoted to from a Pawn, empty string if no promotion occurred.\n\n :rtype: str\n \"\"\"\n return self._promotion\n\n @property\n def san(self):\n \"\"\"The standard algebraic notation (SAN) representation of the move.\n\n :rtype: str\n \"\"\"\n return self._san\n\n @property\n def to_file(self):\n \"\"\"The file the Piece moved to.\n\n :rtype: str\n \"\"\"\n return self._to_file\n\n @property\n def to_rank(self):\n \"\"\"The rank the Piece moved to.\n\n :rtype: int\n \"\"\"\n return self._to_rank\n\n # <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n # if you want to add any client side logic (such as state checking functions) this is where you can add them\n # <<-- /Creer-Merge: functions -->>\n", "id": "3216383", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "games/chess/move.py" }, { "content": "# Player: A player in this game. Every AI controls one player.\n\n# DO NOT MODIFY THIS FILE\n# Never try to directly create an instance of this class, or modify its member variables.\n# Instead, you should only be reading its variables and calling its functions.\n\nfrom games.chess.game_object import GameObject\n\n# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n# you can add additional import(s) here\n# <<-- /Creer-Merge: imports -->>\n\nclass Player(GameObject):\n \"\"\"The class representing the Player in the Chess game.\n\n A player in this game. Every AI controls one player.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes a Player with basic logic as provided by the Creer code generator.\"\"\"\n GameObject.__init__(self)\n\n # private attributes to hold the properties so they appear read only\n self._client_type = \"\"\n self._color = \"\"\n self._in_check = False\n self._lost = False\n self._made_move = False\n self._name = \"Anonymous\"\n self._opponent = None\n self._pieces = []\n self._rank_direction = 0\n self._reason_lost = \"\"\n self._reason_won = \"\"\n self._time_remaining = 0\n self._won = False\n\n @property\n def client_type(self):\n \"\"\"What type of client this is, e.g. 'Python', 'JavaScript', or some other language. For potential data mining purposes.\n\n :rtype: str\n \"\"\"\n return self._client_type\n\n @property\n def color(self):\n \"\"\"The color (side) of this player. Either 'White' or 'Black', with the 'White' player having the first move.\n\n :rtype: str\n \"\"\"\n return self._color\n\n @property\n def in_check(self):\n \"\"\"True if this player is currently in check, and must move out of check, False otherwise.\n\n :rtype: bool\n \"\"\"\n return self._in_check\n\n @property\n def lost(self):\n \"\"\"If the player lost the game or not.\n\n :rtype: bool\n \"\"\"\n return self._lost\n\n @property\n def made_move(self):\n \"\"\"If the Player has made their move for the turn. True means they can no longer move a Piece this turn.\n\n :rtype: bool\n \"\"\"\n return self._made_move\n\n @property\n def name(self):\n \"\"\"The name of the player.\n\n :rtype: str\n \"\"\"\n return self._name\n\n @property\n def opponent(self):\n \"\"\"This player's opponent in the game.\n\n :rtype: games.chess.player.Player\n \"\"\"\n return self._opponent\n\n @property\n def pieces(self):\n \"\"\"All the uncaptured chess Pieces owned by this player.\n\n :rtype: list[games.chess.piece.Piece]\n \"\"\"\n return self._pieces\n\n @property\n def rank_direction(self):\n \"\"\"The direction your Pieces must go along the rank axis until they reach the other side. Will be +1 if the Player is 'White', or -1 if the Player is 'Black'.\n\n :rtype: int\n \"\"\"\n return self._rank_direction\n\n @property\n def reason_lost(self):\n \"\"\"The reason why the player lost the game.\n\n :rtype: str\n \"\"\"\n return self._reason_lost\n\n @property\n def reason_won(self):\n \"\"\"The reason why the player won the game.\n\n :rtype: str\n \"\"\"\n return self._reason_won\n\n @property\n def time_remaining(self):\n \"\"\"The amount of time (in ns) remaining for this AI to send commands.\n\n :rtype: float\n \"\"\"\n return self._time_remaining\n\n @property\n def won(self):\n \"\"\"If the player won the game or not.\n\n :rtype: bool\n \"\"\"\n return self._won\n\n # <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n # if you want to add any client side logic (such as state checking functions) this is where you can add them\n # <<-- /Creer-Merge: functions -->>\n", "id": "905630", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "games/chess/player.py" } ]
0
aahmetmeliksah
[ { "content": "\"\"\" \r\n1- Bir listeyi düzleştiren (flatten) fonksiyon yazın. Elemanları birden çok katmanlı listelerden ([[3],2] gibi) oluşabileceği gibi, non-scalar verilerden de oluşabilir. Örnek olarak:\r\ninput: [[1,'a',['cat'],2],[[[3]],'dog'],4,5]\r\n\r\noutput: [1,'a','cat',2,3,'dog',4,5]\r\n\"\"\"\r\ndef flattenListGenerator(lists_of_lists):\r\n if type(lists_of_lists) is list:\r\n for i in lists_of_lists:\r\n yield from flattenListGenerator(i)\r\n else:\r\n yield lists_of_lists\r\n\r\ndef flatten(lists_of_lists):\r\n return list(flattenListGenerator(lists_of_lists))\r\n\r\n# print(flatten([[1,'a',['cat'],2],[[[3]],'dog'],4,5]))\r\n\r\n\"\"\"\r\n2- Verilen listenin içindeki elemanları tersine döndüren bir fonksiyon yazın. Eğer listenin içindeki elemanlar da liste içeriyorsa onların elemanlarını da tersine döndürün. Örnek olarak:\r\n\r\ninput: [[1, 2], [3, 4], [5, 6, 7]]\r\n\r\noutput: [[[7, 6, 5], [4, 3], [2, 1]]\r\n\"\"\"\r\ndef reverseLists(lists):\r\n return [(reverseLists(x) if isinstance(x, list) else x)\r\n for x in reversed(lists)]\r\n\r\n\r\narr1 = reverseLists([[1, 2], [3, 4], [5, 6, 7]])\r\nprint(arr1)", "id": "9317351", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "index.py" } ]
0
varlociraptor
[ { "content": "from pysam import VariantFile\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom itertools import islice\nimport pandas as pd\n\n\nsns.set_style(\"ticks\")\n\n\ncalls = VariantFile(snakemake.input[0])\n\n\ndef freq(sample):\n ref, alt = sample.get(\"AD\")\n if alt == 0:\n return 0\n return alt / (ref + alt)\n\n\nnormal_freqs = []\ntumor_freqs = []\nvartypes = []\n\nprint(\"collecting calls\")\nfor record in calls:\n if not record.info.get(\"SHARED\"):\n continue\n normal_freqs.append(freq(record.samples.get(\"normal\")))\n tumor_freqs.append(freq(record.samples.get(\"tumor\")))\n vartypes.append(record.info.get(\"TYPE\"))\n\nd = pd.DataFrame({\"normal\": normal_freqs, \"tumor\": tumor_freqs, \"type\": vartypes})\n\nprint(\"subsampling\")\n# sample d to not get overwhelmed\nd = d.sample(10000, random_state=245746)\n\nprint(\"plotting and density estimation\")\ng = sns.FacetGrid(col=\"type\", data=d)\ng.map(sns.kdeplot, \"normal\", \"tumor\", shade=True, clip=(0, 1), shade_lowest=False)\ng.map(sns.scatterplot, \"normal\", \"tumor\", size=1, alpha=0.5, marker=\".\")\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "9483274", "language": "Python", "matching_score": 2.351327419281006, "max_stars_count": 2, "path": "scripts/plot-freqdist.py" }, { "content": "#!/usr/bin/env python\n\nimport sys\nimport numpy as np\nimport pandas as pd\nimport pysam\nimport matplotlib\nmatplotlib.use(\"agg\")\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom functools import partial\n\ntumor = pysam.AlignmentFile(snakemake.input[0], \"rb\")\nnormal = pysam.AlignmentFile(snakemake.input[1], \"rb\")\n\nsoftclips = []\n\nfor i, rec in enumerate(normal):\n if rec.is_supplementary or rec.is_unmapped:\n continue\n is_first_read = rec.pos < rec.mpos\n get_clip = lambda c: c[1] if c[0] == 4 else None\n clip_left = get_clip(rec.cigartuples[0])\n if clip_left is not None:\n softclips.append([clip_left, True, is_first_read])\n clip_right = get_clip(rec.cigartuples[-1])\n if clip_right is not None:\n softclips.append([clip_right, False, is_first_read])\n if i == 10000000:\n break\n\nsoftclips = pd.DataFrame(softclips, columns=[\"len\", \"left\", \"first_in_pair\"])\n\ndef plot(*args, **kwargs):\n softclips = args[0]\n plt.hist(softclips, normed=True)\n q95 = np.percentile(softclips, 99)\n plt.plot([q95, q95], [0, 1.0], \"--k\")\n m = max(softclips)\n plt.plot([m, m], [0, 1.0], \":k\")\n plt.text(m, 1, \"max={}\".format(m), horizontalalignment=\"right\", verticalalignment=\"top\")\n\n\ng = sns.FacetGrid(softclips, col=\"left\", row=\"first_in_pair\")\ng = g.map(plot, \"len\")\n\nplt.savefig(snakemake.output[0])\n", "id": "631", "language": "Python", "matching_score": 0.7524398565292358, "max_stars_count": 2, "path": "scripts/bam-stats.py" }, { "content": "from cyvcf2 import VCF, Writer\nimport numpy as np\n\n\ndef subclone_vaf(gt):\n \"\"\"Calculate subclone allele frequency\"\"\"\n if np.all(gt[:2] == [1, 1]):\n return 1.0\n elif (np.all(gt[:2] == [0, 1]) or np.all(gt[:2] == [1, 0]) or\n np.all(gt[:2] == [-1, 1]) or np.all(gt[:2] == [1, -1])):\n return 0.5\n else:\n return 0.0\n\n\n# Reader\nvcf_in = VCF(snakemake.input[0])\n\n# Setup subclone information\nsubclones = [\"Som{}\".format(i) for i in range(1, 5)]\nfractions = [1/3, 1/3, 1/4, 1/12]\n\n\n# Prepare writer\nvcf_in.add_info_to_header({\"ID\": \"TAF\",\n \"Number\": \"1\",\n \"Description\": \"True tumor allele frequency\",\n \"Type\": \"Float\"})\nvcf_in.add_info_to_header({\"ID\": \"NAF\",\n \"Number\": \"1\",\n \"Description\": \"True normal allele frequency\",\n \"Type\": \"Float\"})\nbcf_out = Writer(snakemake.output[0], vcf_in)\n\nfor rec in vcf_in:\n if len(rec.ALT) > 1:\n raise ValueError(\"multiallelic sites are not supported at the moment\")\n\n try:\n # get VAFs from VCF\n tumor_vaf = rec.INFO[\"TAF\"]\n normal_vaf = rec.INFO[\"NAF\"]\n except KeyError:\n # calculate VAFs\n subclone_idx = [vcf_in.samples.index(s) for s in subclones]\n control_idx = vcf_in.samples.index(\"Control\")\n\n tumor_vaf = sum(fraction * subclone_vaf(rec.genotypes[idx])\n for idx, fraction in zip(subclone_idx, fractions))\n normal_vaf = subclone_vaf(rec.genotypes[control_idx])\n\n rec.INFO[\"TAF\"] = tumor_vaf\n rec.INFO[\"NAF\"] = normal_vaf\n \n # only keep somatic variants\n if normal_vaf == 0.0 and tumor_vaf > 0.0:\n bcf_out.write_record(rec)\n\nbcf_out.close()\n", "id": "11919205", "language": "Python", "matching_score": 2.669079542160034, "max_stars_count": 2, "path": "scripts/annotate-truth.py" }, { "content": "from cyvcf2 import VCF, Writer\nimport numpy as np\n\ndef get_sample_name(tissue):\n ds = snakemake.config[\"runs\"][snakemake.wildcards.run][\"dataset\"]\n return snakemake.config[\"datasets\"][ds][tissue][\"name\"]\n\n\ntumor, normal = map(get_sample_name, [\"tumor\", \"normal\"])\n\n\nbcf_in = VCF(snakemake.input.vcf)\nbcf_out = Writer(snakemake.output[0], bcf_in)\n\n\nfor rec in bcf_in:\n if rec.FILTER:\n continue\n gt = rec.genotypes\n tumor_gt = gt[0][:2]\n normal_gt = gt[1][:2]\n if (np.any(tumor_gt) and\n not np.any(normal_gt) and\n not np.any(np.isnan(normal_gt))):\n # somatic variant\n bcf_out.write_record(rec)\n\nbcf_out.close()\n", "id": "11709214", "language": "Python", "matching_score": 1.1725128889083862, "max_stars_count": 2, "path": "scripts/adhoc-calling.py" }, { "content": "from jinja2 import Template\nimport yaml\nimport subprocess as sp\nfrom itertools import product\n\nmeta = yaml.load(open(snakemake.input.meta, \"r\"), Loader=yaml.FullLoader)\n\nplot_prior = meta.get(\"plot-prior\", False)\nprint(plot_prior)\nprior_plots = {}\nif plot_prior:\n for sample, contig in product(plot_prior[\"samples\"], plot_prior[\"contigs\"]):\n cmd = [\"varlociraptor\", \"plot\", \"variant-calling-prior\", \"--sample\", str(sample), \"--contig\", str(contig), \"--scenario\", snakemake.input.scenario]\n prior_plots[(sample, contig)] = sp.run(cmd, stdout=sp.PIPE, check=True, text=True).stdout\n\nprint(prior_plots)\nwith open(snakemake.input.template, \"r\") as template, open(snakemake.input.scenario, \"r\") as scenario, open(snakemake.output[0], \"w\") as out:\n print(Template(template.read()).render(scenario=scenario.read(), prior_plots=prior_plots, name=meta[\"name\"], desc=meta[\"desc\"]), file=out)", "id": "12686731", "language": "Python", "matching_score": 1.0915687084197998, "max_stars_count": 0, "path": "content/scenarios/workflow/scripts/render.py" }, { "content": "from svgutils.compose import *\n\nplots = snakemake.input\n\nFigure(\"22cm\", \"6cm\",\n SVG(plots[0]),\n SVG(plots[1]).move(100, 0),\n SVG(plots[2]).move(200, 0)\n).save(snakemake.output[0])\n", "id": "1657668", "language": "Python", "matching_score": 0.02584407851099968, "max_stars_count": 2, "path": "scripts/fig-fdr.py" }, { "content": "from common import load_variants\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\n\nvartype = snakemake.wildcards.vartype\n\nindex_cols = [\"CHROM\", \"POS\", \"SVLEN\"] if vartype == \"INS\" or vartype == \"DEL\" else [\"CHROM\", \"POS\", \"ALT\"]\n\nall_variants = [load_variants(f, vartype=vartype) for f in snakemake.input.calls]\n\nG = nx.Graph()\nfor calls, (i, j) in zip(all_variants, snakemake.params.dataset_combinations):\n calls[\"component\"] = None\n for call in calls.itertuples():\n a = (i, call.Index)\n G.add_node(a)\n if call.MATCHING >= 0:\n b = (j, call.MATCHING)\n G.add_node(b)\n G.add_edge(a, b)\n\n# get a set of calls for each dataset (we don't need all pairwise comparisons for that)\nrepresentatives = {snakemake.params.dataset_combinations[i][0]: calls for i, calls in enumerate(all_variants)}\n\nif snakemake.wildcards.mode != \"varlociraptor\":\n varlociraptor_variants = [load_variants(f, vartype=vartype) for f in snakemake.input.varlociraptor_calls]\n for calls in varlociraptor_variants:\n calls.set_index(index_cols, inplace=True)\n varlociraptor_representatives = {snakemake.params.dataset_combinations[i][0]: calls for i, calls in enumerate(varlociraptor_variants)}\n\n# annotate calls with their component, i.e. their equivalence class\nfor component_id, component in enumerate(nx.connected_components(G)):\n for i, k in component:\n representatives[i].loc[k, \"component\"] = component_id\nfor calls in representatives.values():\n calls[\"component\"] = calls[\"component\"].astype(np.float32)\n calls.set_index(\"component\", inplace=True)\n\n# join calls based on their equivalence class\naggregated = None\nsuffix = \"_{}\".format\ndataset_name = lambda i: snakemake.params.datasets[i]\nis_varlociraptor = False\nfor dataset_id, calls in representatives.items():\n cols = list(index_cols)\n if \"CASE_AF\" in calls.columns:\n cols.extend([\"CASE_AF\", \"PROB_SOMATIC_TUMOR\"])\n is_varlociraptor = True\n calls = calls[cols]\n if snakemake.wildcards.mode != \"varlociraptor\":\n idx_calls = calls.set_index(cols, drop=False)\n caseaf = idx_calls.join(varlociraptor_representatives[dataset_id][[\"CASE_AF\"]], how=\"left\")[\"CASE_AF\"]\n caseaf = caseaf[~caseaf.index.duplicated()]\n calls = calls[~idx_calls.index.duplicated()]\n calls[\"CASE_AF\"] = caseaf.values\n\n calls.columns = [c + suffix(dataset_name(dataset_id)) for c in calls.columns]\n if aggregated is None:\n aggregated = calls\n else:\n aggregated = aggregated.join(calls, how=\"outer\", lsuffix=\"\", rsuffix=\"\")\n\n# Forget the component id. Otherwise, we might run into errors with duplicate elements\n# in the index below. These can occur if there are multiple ambiguous calls.\naggregated.reset_index(inplace=True, drop=True)\n\npos_cols = aggregated.columns[aggregated.columns.str.startswith(\"POS_\")]\nis_called = (~aggregated[pos_cols].isnull()).astype(int)\nis_called.columns = pos_cols.str.replace(\"POS_\", \"\")\naggregated = aggregated.join(is_called, lsuffix=\"\", rsuffix=\"\")\n\naggregated.insert(len(aggregated.columns), \"concordance_count\", is_called.sum(axis=1))\n\naggregated[\"max_case_af\"] = aggregated[aggregated.columns[aggregated.columns.str.startswith(\"CASE_AF\")]].max(axis=1)\nif is_varlociraptor:\n aggregated[\"max_prob_somatic_tumor\"] = aggregated[aggregated.columns[aggregated.columns.str.startswith(\"PROB_SOMATIC\")]].min(axis=1)\n\naggregated.to_csv(snakemake.output[0], sep=\"\\t\", index=False)\n", "id": "9661357", "language": "Python", "matching_score": 2.5847561359405518, "max_stars_count": 2, "path": "scripts/aggregate-concordance.py" }, { "content": "import matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport math\n\n\ndef plot_ranges(ranges, plot_range, xlabel, ylabel, row_offset=0, nrow_offset=0, row_span=1, fig=None, gs=None, legend=True, fig_height=None, legend_outside=False):\n ncols = 4 if len(ranges) == 4 else min(3, len(ranges))\n nrows = int(math.ceil(len(ranges) / ncols)) + nrow_offset\n if fig is None:\n if fig_height is None:\n fig_height = 4 * nrows\n fig_width = 4 * ncols\n if legend_outside:\n fig_width += 2.5\n fig = plt.figure(figsize=(fig_width, fig_height))\n gs = gridspec.GridSpec(nrows, ncols, figure=fig)\n axes = []\n all_handles = []\n seen = set()\n for i, (lower, upper) in enumerate(ranges):\n row = i // ncols + row_offset\n col = i % ncols\n fig.add_subplot(gs[row:row+row_span, col]) \n ax, handles = plot_range(lower, upper)\n\n if col == 0:\n plt.ylabel(ylabel)\n else:\n plt.ylabel(\"\")\n if row + row_span == nrows:\n plt.xlabel(xlabel)\n else:\n plt.xlabel(\"\")\n\n if row_offset == 0 and len(ranges) > 1:\n if lower == upper:\n if isinstance(lower, float):\n lower = \"{:.3g}\".format(lower)\n plt.title(lower)\n else:\n plt.title(\"{} - {}\".format(lower, upper))\n\n axes.append(ax)\n for handle in handles:\n label = handle.get_label()\n if label not in seen:\n seen.add(label)\n all_handles.append(handle)\n\n if legend:\n if legend_outside:\n axes[-1].legend(handles=all_handles, loc=\"upper left\", bbox_to_anchor=(1.01, 1.0))\n else:\n axes[0].legend(handles=all_handles, loc=\"best\")\n plt.tight_layout()\n return fig, gs\n\n\ndef load_variants(path,\n minlen=None,\n maxlen=None,\n vartype=None,\n constrain=None,\n min_af=None,\n max_af=None):\n variants = pd.read_table(path, header=[0, 1])\n\n # store tumor AF estimate in CASE_AF column\n try:\n case_af = variants.loc[:, (\"tumor\", \"AF\")]\n variants.loc[:, (\"VARIANT\", \"CASE_AF\")] = case_af\n except KeyError:\n # ignore if no AF estimate for tumor is present\n pass\n try:\n dp = variants.loc[:, (\"tumor\", \"DP\")]\n variants.loc[:, (\"VARIANT\", \"TUMOR_DP\")] = dp\n except KeyError:\n # ignore if not present\n pass\n\n variants = variants[\"VARIANT\"]\n variants[\"CHROM\"] = variants[\"CHROM\"].astype(str)\n\n variants.index = np.arange(variants.shape[0])\n\n # constrain type\n if vartype == \"DEL\":\n is_allele_del = (variants[\"REF\"].str.len() > 1) & (variants[\"ALT\"].str.len() == 1)\n is_sv_del = variants[\"ALT\"] == \"<DEL>\"\n isdel = is_allele_del | is_sv_del\n\n if \"SVTYPE\" in variants.columns:\n variants = variants[(variants[\"SVTYPE\"].astype(str) == \"DEL\")\n | (isdel & variants[\"SVTYPE\"].isnull())]\n else:\n variants = variants[isdel]\n elif vartype == \"INS\":\n isins = (variants[\"REF\"].str.len() == 1) & (variants[\"ALT\"].str.len() >\n 1)\n if \"SVTYPE\" in variants.columns:\n variants = variants[(variants[\"SVTYPE\"].astype(str) == \"INS\")\n | (isins & variants[\"SVTYPE\"].isnull())]\n else:\n variants = variants[isins]\n else:\n assert False, \"Unsupported variant type\"\n\n # constrain length\n if \"SVLEN\" not in variants.columns or variants[\"SVLEN\"].isnull().any():\n if not (variants.columns == \"END\").any() or variants[\"END\"].isnull(\n ).any():\n variants[\"SVLEN\"] = (\n variants[\"ALT\"].str.len() - variants[\"REF\"].str.len()).abs()\n print(\"REF ALT comp\")\n else:\n print(\"use END\")\n variants[\"SVLEN\"] = variants[\"END\"] - (variants[\"POS\"] + 1)\n # convert to positive value\n variants.loc[:, \"SVLEN\"] = variants[\"SVLEN\"].abs()\n if minlen is not None and maxlen is not None:\n variants = variants[(variants[\"SVLEN\"] >= minlen)\n & (variants[\"SVLEN\"] < maxlen)]\n\n # only autosomes\n variants = variants[variants[\"CHROM\"].str.match(r\"(chr)?[0-9]+\")]\n\n if constrain is not None:\n valid = (variants[\"MATCHING\"] < 0) | (variants[\"MATCHING\"].isin(\n constrain.index))\n variants = variants[valid]\n\n if min_af is not None and max_af is not None:\n valid = (variants[\"AF\"] <= max_af) & (variants[\"AF\"] >= min_af)\n variants = variants[valid]\n\n print(\"total variants\", variants.shape[0])\n if \"MATCHING\" in variants.columns:\n print(\"matching variants\", (variants[\"MATCHING\"] >= 0).sum())\n\n return variants\n\n\ndef precision(calls):\n p = calls.shape[0]\n if p == 0:\n return 1.0\n tp = np.count_nonzero(calls.is_tp)\n precision = tp / p\n return precision\n\n\ndef recall(calls, truth):\n p = calls.shape[0]\n if p == 0:\n return 0.0\n matches = calls.loc[calls.MATCHING.isin(truth.index), \"MATCHING\"]\n #tp = calls[calls.is_tp].MATCHING.unique().size\n tp = matches.unique().size\n t = truth.shape[0]\n recall = tp / t\n return recall\n\n\ndef get_colors(config):\n callers = [caller for caller in config[\"caller\"] if caller != \"varlociraptor\"]\n palette = sns.color_palette(\"colorblind\", n_colors=len(callers))\n palette = sns.color_palette(\"tab10\", n_colors=len(callers))\n return {caller: c for caller, c in zip(callers, palette)}\n\n\ndef phred_scale(prob):\n return -10 * math.log10(prob)\n", "id": "348108", "language": "Python", "matching_score": 3.426149606704712, "max_stars_count": 2, "path": "scripts/common.py" }, { "content": "from itertools import product\nfrom functools import partial\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport common\nimport numpy as np\nimport math\nfrom matplotlib.lines import Line2D\n\nvartype = snakemake.wildcards.vartype\ncolors = common.get_colors(snakemake.config)\n\n\ndef props(callers):\n return product(callers, snakemake.params.len_ranges)\n\n\ndef plot_len_range(minlen, maxlen, min_precision=0.0):\n\n truth = common.load_variants(\n snakemake.input.truth, minlen, maxlen, vartype=vartype)\n\n def plot(calls,\n label,\n color,\n line=True,\n style=\"-\",\n invert=False,\n markersize=4,\n endmarker=False):\n calls = pd.read_table(calls, index_col=0)\n if len(calls) < 10:\n return\n if line:\n thresholds = calls.score.quantile(np.linspace(0.0, 1.0, 50))\n precision = []\n recall = []\n for t in thresholds:\n if invert:\n c = calls[calls.score >= t]\n else:\n c = calls[calls.score <= t]\n p = common.precision(c)\n r = common.recall(c, truth)\n print(label, t, c.shape[0], p, r)\n if len(c) < 10:\n print(\"skipping threshold: too few calls\", c)\n continue\n precision.append(p)\n recall.append(r)\n if len(precision) <= 2:\n print(\"skipping curve because we have too few values\")\n return\n else:\n precision = [common.precision(calls)]\n recall = [common.recall(calls, truth)]\n style = \".\"\n print(label, calls.shape[0], precision, recall)\n\n plt.plot(\n recall,\n precision,\n style,\n color=color,\n label=label,\n markersize=markersize\n )\n if endmarker:\n plt.plot(recall[-1], precision[-1], \"s\", color=color, markersize=markersize)\n\n handles = []\n for calls, (caller,\n len_range) in zip(snakemake.input.varlociraptor_calls,\n props(snakemake.params.varlociraptor_callers)):\n if len_range[0] != minlen and len_range[1] != maxlen:\n continue\n label = \"varlociraptor+{}\".format(caller)\n plot(calls, label, colors[caller], endmarker=True)\n handles.append(Line2D([0], [0], color=colors[caller], label=label))\n\n for calls, (caller,\n len_range) in zip(snakemake.input.default_calls,\n props(snakemake.params.default_callers)):\n if len_range[0] != minlen and len_range[1] != maxlen:\n continue\n color = colors[caller]\n plot(\n calls,\n caller,\n color,\n style=\":\",\n invert=snakemake.config[\"caller\"][caller].get(\"invert\", False))\n if caller in snakemake.params.adhoc_callers:\n handles.append(Line2D([0], [0], markersize=10, markerfacecolor=color, markeredgecolor=color, color=color, label=caller, marker=\".\", linestyle=\":\"))\n else:\n handles.append(Line2D([0], [0], color=color, label=caller, linestyle=\":\"))\n\n for calls, (caller, len_range) in zip(snakemake.input.adhoc_calls,\n props(snakemake.params.adhoc_callers)):\n if len_range[0] != minlen and len_range[1] != maxlen:\n continue\n color = colors[caller]\n plot(calls, caller, color, markersize=10, line=False)\n if caller not in snakemake.params.default_callers:\n handles.append(Line2D([0], [0], markersize=10, markerfacecolor=color, markeredgecolor=color, label=caller, marker=\".\", lw=0))\n\n sns.despine()\n ax = plt.gca()\n plt.ylim((min_precision, 1.01 if min_precision == 0.0 else 1.001))\n return ax, handles\n\n\nplot = plot_len_range\nfig_height = None\nlegend_outside = snakemake.params.legend_outside\nif snakemake.wildcards.zoom == \"zoom\":\n plot = partial(plot_len_range, min_precision=0.99 if vartype == \"INS\" else 0.95)\n fig_height = 3\n legend_outside = True\n\n\ncommon.plot_ranges(\n snakemake.params.len_ranges,\n plot,\n xlabel=\"recall\",\n ylabel=\"precision\",\n fig_height=fig_height,\n legend_outside=legend_outside,\n)\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "8344350", "language": "Python", "matching_score": 4.3077712059021, "max_stars_count": 2, "path": "scripts/plot-precision-recall.py" }, { "content": "from itertools import product\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport common\nimport numpy as np\nimport math\nfrom matplotlib.lines import Line2D\n\nMIN_CALLS = 10\n\nvartype = snakemake.wildcards.vartype\ncolors = common.get_colors(snakemake.config)\n\n\ndef props(callers):\n return product(callers, snakemake.params.len_ranges)\n\n\ndef plot_len_range(minlen, maxlen):\n\n truth = common.load_variants(\n snakemake.input.truth, minlen, maxlen, vartype=vartype)\n\n afs = pd.Series(truth.TAF.unique()).sort_values()\n\n def plot(calls,\n label,\n color,\n varlociraptor=True,\n style=\"-.\",\n markersize=4):\n calls = pd.read_table(calls, index_col=0)\n if len(calls) < 10:\n return\n if varlociraptor:\n phred = lambda p: -10 * math.log10(p)\n def calc_recall(p):\n c = calls[calls.score <= phred(p)]\n return [common.recall(c, truth[truth.TAF >= af]) for af in afs]\n\n return plt.fill_between(\n afs,\n calc_recall(0.98 if maxlen > 30 else 0.99),\n calc_recall(0.9),\n color=color,\n label=label,\n alpha=0.6)\n else:\n recall = [common.recall(calls, truth[truth.TAF >= af]) for af in afs]\n # plot a white background first to increase visibility\n plt.plot(afs, recall, \"-\", color=\"white\", alpha=0.8)\n return plt.plot(\n afs,\n recall,\n style,\n color=color,\n label=label)[0]\n\n handles = []\n def register_handle(handle):\n if handle is not None:\n handles.append(handle)\n for calls, (caller,\n len_range) in zip(snakemake.input.varlociraptor_calls,\n props(snakemake.params.varlociraptor_callers)):\n if len_range[0] != minlen and len_range[1] != maxlen:\n continue\n label = \"varlociraptor+{}\".format(caller)\n handle = plot(calls, label, colors[caller], varlociraptor=True)\n register_handle(handle)\n #handles.append(Line2D([0], [0], color=colors[caller], label=label))\n\n for calls, (caller, len_range) in zip(snakemake.input.adhoc_calls,\n props(snakemake.params.adhoc_callers)):\n if len_range[0] != minlen and len_range[1] != maxlen:\n continue\n color = colors[caller]\n handle = plot(calls, caller, color, style=\":\", varlociraptor=False)\n register_handle(handle)\n #handles.append(Line2D([0], [0], linestyle=\":\", color=color, label=caller))\n\n sns.despine()\n ax = plt.gca()\n return ax, handles\n\n\ncommon.plot_ranges(\n snakemake.params.len_ranges,\n plot_len_range,\n xlabel=\"allele frequency\",\n ylabel=\"recall\")\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "11253689", "language": "Python", "matching_score": 0.5035237073898315, "max_stars_count": 2, "path": "scripts/plot-allelefreq-recall.py" }, { "content": "import pandas as pd\nimport numpy as np\n\n\nmin_depth = None\nfor f in snakemake.input:\n d = pd.read_table(f, header=None, names=[\"chrom\", \"pos\", \"depth\"], index_col=[0, 1], engine=\"c\", dtype={\"chrom\": str, \"pos\": np.int32, \"depth\": np.int16})\n if min_depth is None:\n min_depth = d\n else:\n c = pd.concat([min_depth, d], axis=1)\n c.fillna(0, inplace=True)\n min_depth = c.min(axis=1)\n min_depth = min_depth[min_depth > 1]\nmin_depth.to_csv(snakemake.output[0], sep=\"\\t\")\n", "id": "7041927", "language": "Python", "matching_score": 0.9485445618629456, "max_stars_count": 2, "path": "scripts/min-depths.py" }, { "content": "import math\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport common\nimport numpy as np\n\nMIN_COUNT = 20\nMAX_DEPTH = 60\n\nvartype = snakemake.wildcards.vartype\ncolors = common.get_colors(snakemake.config)\n\ntruth = common.load_variants(snakemake.input.truth, vartype=vartype)\n\nall_calls = []\nfor caller, calls in zip(snakemake.params.callers, snakemake.input.calls):\n calls = pd.read_table(calls)\n calls.loc[:, \"caller\"] = caller\n all_calls.append(calls)\nall_calls = pd.concat(all_calls)\n\ndef plot(af, _):\n constrain_lower = lambda error: np.maximum(error, -af)\n constrain_upper = lambda error: np.minimum(error, 1.0 - af)\n\n dp = all_calls[\"TUMOR_DP\"]\n calls = all_calls[all_calls.is_tp]\n true_af = truth.loc[calls.MATCHING].reset_index().TAF\n calls = calls.reset_index()\n calls[\"true_af\"] = true_af\n calls = calls[calls[\"true_af\"] == af]\n calls[\"error\"] = calls.CASE_AF - true_af\n\n sns.kdeplot(calls[\"TUMOR_DP\"], calls[\"error\"], cmap=\"Blues\", n_levels=50, shade=True, alpha=0.7, shade_lowest=False) #alpha=0.5, clip=((0.0, 1.0), (0.0, af)))\n plt.plot(calls[\"TUMOR_DP\"], calls[\"error\"], \",\", color=\"k\", lw=0, alpha=1.0, rasterized=True)\n by_depth = calls.groupby(\"TUMOR_DP\")[\"error\"].describe().reset_index()\n by_depth[\"-std\"] = constrain_lower(-by_depth[\"std\"])\n by_depth[\"std\"] = constrain_upper(by_depth[\"std\"])\n by_depth = by_depth[by_depth[\"count\"] >= MIN_COUNT]\n plt.plot(by_depth.TUMOR_DP, by_depth[\"std\"], \"--\", color=\"k\")\n plt.plot(by_depth.TUMOR_DP, by_depth[\"-std\"], \"--\", color=\"k\")\n plt.plot(by_depth.TUMOR_DP, by_depth[\"mean\"], \"-\", color=\"k\")\n\n depths = np.arange(0, MAX_DEPTH)\n # standard deviation when sampling in binomial process from allele freq\n # this is the expected sampling error within the correctly mapped fragments\n sd = np.array([1.0 / depth * math.sqrt(depth * af * (1.0 - af)) for depth in depths])\n plt.fill_between(depths, constrain_lower(-sd), constrain_upper(sd), color=\"grey\", alpha=0.5)\n \n sns.despine()\n plt.xticks(rotation=\"vertical\")\n ax = plt.gca()\n ax.legend().remove()\n handles, labels = ax.get_legend_handles_labels()\n plt.ylim((-1.0, 1.0))\n plt.xlim((0, MAX_DEPTH))\n\n return ax, []\n\nafs = [(af, af) for af in truth.TAF.sort_values().unique()]\n\ncommon.plot_ranges(\n afs,\n plot,\n \"depth\",\n \"predicted - truth\")\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "4765456", "language": "Python", "matching_score": 4.544338703155518, "max_stars_count": 2, "path": "scripts/plot-allelefreq-scatter.py" }, { "content": "from itertools import product\nimport math\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport common\nimport numpy as np\n\n\nMIN_CALLS = 10\n\nvartype = snakemake.wildcards.vartype\ncolors = common.get_colors(snakemake.config)\n\ntruth = common.load_variants(snakemake.input.truth, vartype=vartype)\n\ndef props(callers):\n return product(callers, snakemake.params.len_ranges)\n\ndef plot_len_range(minlen, maxlen):\n def plot(calls, colors):\n calls = calls[calls.is_tp]\n true_af = truth.loc[calls.MATCHING].reset_index().TAF\n calls = calls.reset_index()\n calls[\"error\"] = calls.CASE_AF - true_af\n\n if calls.empty:\n return\n\n calls[\"true_af\"] = true_af\n true_af = pd.Series(calls[\"true_af\"].unique()).sort_values()\n # standard deviation when sampling in binomial process from allele freq\n # this is the expected sampling error within the correctly mapped fragments\n # sd = true_af.apply(lambda af: 1 / 40 * math.sqrt(40 * af * (1 - af)))\n # x = np.arange(len(true_af))\n # offsets = [-0.5, 0.5]\n # y_upper = np.array([v for v in sd for o in offsets])\n # y_lower = np.maximum(-y_upper, [-f for f in true_af for o in offsets])\n # plt.fill_between([v + o for v in x for o in offsets], y_lower, y_upper, color=\"#EEEEEE\", zorder=-5)\n\n calls[\"true_af\"] = calls[\"true_af\"].apply(\"{:.3f}\".format)\n\n size = 1 if maxlen == 30 else 2\n sns.stripplot(\"true_af\", \"error\", hue=\"caller\", data=calls, palette=colors, dodge=True, jitter=True, alpha=0.5, size=size, rasterized=True)\n sns.boxplot(\"true_af\", \"error\", hue=\"caller\", data=calls, color=\"white\", fliersize=0, linewidth=1)\n\n handles, labels = plt.gca().get_legend_handles_labels()\n n = len(calls.caller.unique())\n\n plt.ylim((-1,1))\n plt.grid(axis=\"y\", linestyle=\":\", color=\"grey\")\n sns.despine()\n plt.xticks(rotation=\"vertical\")\n ax = plt.gca()\n ax.legend().remove()\n\n return ax, handles[n:]\n\n all_calls, all_colors = load_calls(minlen, maxlen)\n return plot(all_calls, all_colors)\n\ndef load_calls(minlen, maxlen):\n all_calls = []\n all_colors = []\n for calls, (caller, len_range) in zip(snakemake.input.varlociraptor_calls, props(snakemake.params.varlociraptor_callers)):\n if len_range[0] != minlen and len_range[1] != maxlen:\n continue\n label = \"varlociraptor+{}\".format(caller)\n calls = pd.read_table(calls)\n calls[\"caller\"] = label\n if not calls.empty:\n all_calls.append(calls)\n all_colors.append(colors[caller])\n\n all_calls = pd.concat(all_calls)\n return all_calls, all_colors\n\ncommon.plot_ranges(\n snakemake.params.len_ranges,\n plot_len_range,\n xlabel=\"true allele frequency\",\n ylabel=\"predicted - truth\")\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "12111051", "language": "Python", "matching_score": 3.400045156478882, "max_stars_count": 2, "path": "scripts/plot-allelefreq-estimation.py" }, { "content": "from itertools import product\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport common\nimport numpy as np\nimport math\nfrom matplotlib.lines import Line2D\nfrom matplotlib.colors import to_rgba\n\n\nclass NotEnoughObservationsException(Exception):\n pass\n\n\nMIN_CALLS = 20\nMAX_LEN = 1000\n\nvartype = snakemake.wildcards.vartype\ncolors = common.get_colors(snakemake.config)\n\n\n\nvarlociraptor_calls_low = [pd.read_table(f) for f in snakemake.input.varlociraptor_calls_low]\nvarlociraptor_calls_high = [pd.read_table(f) for f in snakemake.input.varlociraptor_calls_high]\nadhoc_calls = [pd.read_table(f) for f in snakemake.input.adhoc_calls]\n\n\ndef expected_count(af, effective_mutation_rate):\n \"\"\"Calculate the expected number of somatic variants\n greater than a given allele frequency given an effective mutation\n rate, according to the model of Williams et al. Nature \n Genetics 2016\"\"\"\n return effective_mutation_rate * (1.0 / af - 1.0)\n\n\ndef expected_counts(afs, effective_mutation_rate):\n return [expected_count(af, effective_mutation_rate) for af in afs]\n\n\ndef calc_concordance(calls):\n n = len(calls)\n return (calls[\"concordance_count\"] > 1).sum() / n\n\n\ndef plot_len_range(minlen, maxlen, yfunc=None, yscale=None, upper_bound=None):\n handles_varlociraptor = []\n handles_adhoc = []\n for i, caller in enumerate(snakemake.params.callers):\n def plot_calls(calls, label, color, style, calls_lower=None):\n def get_xy(calls, caseafs=None):\n svlen = calls.loc[:, calls.columns.str.startswith(\"SVLEN\")].abs()\n # at least one of the calls has a valid svlen\n valid = ((svlen >= minlen) & (svlen <= maxlen)).sum(axis=1) >= 1\n calls = calls[valid]\n if caseafs is None:\n caseafs = calls[\"max_case_af\"].dropna().unique()\n y = []\n _caseafs = []\n for caseaf in sorted(caseafs):\n _calls = calls[calls[\"max_case_af\"] >= caseaf]\n if upper_bound is not None:\n _calls = _calls[_calls[\"max_case_af\"] <= caseaf + upper_bound]\n if len(_calls) < MIN_CALLS:\n continue\n _caseafs.append(caseaf)\n y.append(yfunc(_calls))\n return _caseafs, y\n\n x, y = get_xy(calls)\n if not x:\n raise NotEnoughObservationsException()\n if calls_lower is not None:\n _, y2 = get_xy(calls_lower, caseafs=x)\n return plt.fill_between(x, y, y2, label=label, edgecolor=color, facecolor=to_rgba(color, alpha=0.2))\n else:\n if style != \"-\":\n plt.plot(x, y, \"-\", color=\"white\", alpha=0.8)\n return plt.plot(x, y, style, label=label, color=color)[0]\n\n color = colors[snakemake.params.callers[i]]\n try:\n handles_varlociraptor.append(\n plot_calls(\n varlociraptor_calls_high[i], \n \"varlociraptor+{}\".format(caller), \n color=color, style=\"-\", \n calls_lower=varlociraptor_calls_low[i]))\n except NotEnoughObservationsException:\n # skip plot\n pass\n try:\n handles_adhoc.append(plot_calls(adhoc_calls[i], caller, color=color, style=\":\"))\n except NotEnoughObservationsException:\n # skip plot\n pass\n\n handles = handles_varlociraptor + handles_adhoc\n sns.despine()\n ax = plt.gca()\n if yscale is not None:\n ax.set_yscale(yscale)\n return ax, handles\n\nplt.figure(figsize=(10, 4))\nplt.subplot(121)\nplot_len_range(1, MAX_LEN, yfunc=calc_concordance)\nplt.xlabel(\"$\\geq$ tumor allele frequency\")\nplt.ylabel(\"concordance\")\n\nplt.subplot(122)\nfor effective_mutation_rate in 10 ** np.linspace(1, 5, 7):\n afs = np.linspace(0.0, 1.0, 100, endpoint=False)\n plt.semilogy(afs, expected_counts(afs, effective_mutation_rate), \"-\", color=\"grey\", alpha=0.4)\n\nax, handles = plot_len_range(1, MAX_LEN, yfunc=lambda calls: len(calls), yscale=\"log\")\n\nplt.xlabel(\"$\\geq$ tumor allele frequency\")\nplt.ylabel(\"# of calls\")\n\nax.legend(handles=handles, loc=\"upper left\", bbox_to_anchor=(1.0, 1.0))\n\nplt.tight_layout()\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "2667466", "language": "Python", "matching_score": 3.6055750846862793, "max_stars_count": 2, "path": "scripts/plot-concordance.py" }, { "content": "from itertools import product\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport common\nimport numpy as np\n\n\nMIN_CALLS = 100\n\ncolors = common.get_colors(snakemake.config)\n\nprops = product(snakemake.params.callers,\n snakemake.params.len_ranges, snakemake.params.fdrs)\n\ncalls = []\n\nfor _calls, (caller, len_range, fdr) in zip(snakemake.input.varlociraptor_calls, props):\n calls.append({\"caller\": caller, \"len_range\": len_range, \"fdr\": float(fdr), \"calls\": _calls})\n\ncalls = pd.DataFrame(calls)\ncalls = calls.set_index(\"caller\", drop=False)\n\n\ndef plot_len_range(minlen, maxlen):\n\n def plot(caller):\n color = colors[caller]\n label = \"varlociraptor+{}\".format(caller)\n fdrs = []\n alphas = []\n calls_ = calls.loc[caller]\n calls_ = calls_[calls_[\"len_range\"].map(lambda r: r == [minlen, maxlen])]\n calls_ = calls_.sort_values(\"fdr\")\n for e in calls_.itertuples():\n c = pd.read_table(e.calls)\n n = c.shape[0]\n if n < MIN_CALLS:\n continue\n true_fdr = 1.0 - common.precision(c)\n if fdrs and fdrs[-1] == true_fdr:\n continue\n fdrs.append(true_fdr)\n alphas.append(e.fdr)\n plt.plot(alphas, fdrs, \".-\", color=color, label=label)\n\n\n for caller in calls.index.unique():\n plot(caller)\n\n plt.plot([0, 1], [0, 1], \":\", color=\"grey\")\n\n sns.despine()\n ax = plt.gca()\n handles, _ = ax.get_legend_handles_labels()\n return ax, handles\n\ncommon.plot_ranges(\n snakemake.params.len_ranges,\n plot_len_range,\n xlabel=\"FDR threshold\",\n ylabel=\"true FDR\")\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "10676449", "language": "Python", "matching_score": 3.4715988636016846, "max_stars_count": 2, "path": "scripts/plot-fdr-control.py" }, { "content": "from itertools import product\nimport matplotlib\nmatplotlib.use(\"agg\")\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport common\nimport numpy as np\nimport math\n\nvartype = snakemake.wildcards.vartype\ncolors = common.get_colors(snakemake.config)\n\ndef props(callers):\n return product(callers, snakemake.params.len_ranges)\n\nphred_to_log_factor = -0.23025850929940456\nlog_to_phred_factor = -4.3429448190325175\n\ndef plot_len_range(minlen, maxlen):\n for calls, (caller, len_range) in zip(snakemake.input.varlociraptor_calls, props(snakemake.params.varlociraptor_callers)):\n if len_range[0] != minlen and len_range[1] != maxlen:\n continue\n label = \"varlociraptor+{}\".format(caller)\n calls = pd.read_table(calls)\n calls[\"caller\"] = label\n if not calls.empty:\n color = colors[caller]\n sns.kdeplot(calls[calls.is_tp].PROB_SOMATIC_TUMOR.map(np.log), color=color, label=label)\n sns.kdeplot(calls[~calls.is_tp].PROB_SOMATIC_TUMOR.map(np.log), color=color, linestyle=\":\", label=\"\")\n\n ax = plt.gca()\n fmt_ticks = lambda ticks: [\"{:.1g}\".format(np.exp(t)) for t in ticks]\n ax.set_xticklabels(fmt_ticks(plt.xticks()[0]))\n ax.legend().remove()\n handles, _ = ax.get_legend_handles_labels()\n sns.despine()\n\n return ax, handles\n\ncommon.plot_ranges(\n snakemake.params.len_ranges,\n plot_len_range,\n xlabel=r\"$-10 \\log_{10}$ Pr(somatic)\",\n ylabel=\"density\")\n\nplt.savefig(snakemake.output[0], bbox_inches=\"tight\")\n", "id": "5242950", "language": "Python", "matching_score": 1.8988873958587646, "max_stars_count": 2, "path": "scripts/plot-score-dist.py" }, { "content": "import pandas as pd\nimport numpy as np\nfrom common import load_variants\n\n\nminlen = int(snakemake.wildcards.minlen)\nmaxlen = int(snakemake.wildcards.maxlen)\nvartype = snakemake.wildcards.vartype\n\nif snakemake.wildcards.mode == \"varlociraptor\":\n score = snakemake.config[\"caller\"][\"varlociraptor\"][\"score\"]\n # calls are already filtered by FDR control step\n minlen = None\n maxlen = None\nelif snakemake.wildcards.mode == \"default\":\n score = snakemake.config[\"caller\"][snakemake.wildcards.caller][\"score\"]\nelse:\n score = None\n\ncalls = load_variants(snakemake.input.calls, vartype=vartype, minlen=minlen, maxlen=maxlen)\n\ncalls[\"is_tp\"] = calls[\"MATCHING\"] >= 0\n\ncalls[\"score\"] = calls[score] if score else np.nan\n\ncalls.to_csv(snakemake.output[0], sep=\"\\t\")\n", "id": "11029884", "language": "Python", "matching_score": 1, "max_stars_count": 2, "path": "scripts/obtain-tp-fp.py" }, { "content": "from pybedtools import BedTool\n\n\nassert len(snakemake.input.bams) == 4\n\nMINCOV = 10\n\nexons = BedTool(snakemake.input.exons)\n\ncoverage = exons.multi_bam_coverage(bams=snakemake.input.bams)\n\n\ndef filter(exon):\n if not len(exon):\n return False\n return all(int(cov) / len(exon) >= MINCOV for cov in exon[-4:])\n \n\ncoverage.filter(filter).saveas(snakemake.output.bed)\n", "id": "11020868", "language": "Python", "matching_score": 0.10703213512897491, "max_stars_count": 2, "path": "scripts/exoncov.py" } ]
2.125107
sirnfs
[ { "content": "import enum\nfrom riskManagement import riskManagement\nfrom optionPrimitives import optionPrimitive\n\nclass StrangleManagementStrategyTypes(enum.Enum):\n HOLD_TO_EXPIRATION = 0\n CLOSE_AT_50_PERCENT = 1\n\nclass StrangleRiskManagement(riskManagement.RiskManagement):\n \"\"\"This class handles risk management strategies for strangles.\"\"\"\n\n def __init__(self, managementType: StrangleManagementStrategyTypes) -> None:\n self.__managementType = managementType\n\n def managePosition(self, currentPosition:optionPrimitive) -> bool:\n \"\"\"Manages the current position in the portfolio.\n Managing the position means indicating whether the position should be removed from the portfolio. In addition, we\n could create another signalEvent here if we want to do something like roll the strategy to the next month.\n :param currentPosition: Current position in the portfolio.\n \"\"\"\n if self.__managementType == StrangleManagementStrategyTypes.HOLD_TO_EXPIRATION:\n if currentPosition.getNumberOfDaysLeft() == 0:\n # Indicates that the options are expiring on this date.\n return True\n elif self.__managementType == StrangleManagementStrategyTypes.CLOSE_AT_50_PERCENT:\n # TODO(msantoro): Add supporting code here.\n return False\n else:\n raise NotImplementedError('No management strategy was specified or has not yet been implemented.')\n return False\n", "id": "9631145", "language": "Python", "matching_score": 4.05245304107666, "max_stars_count": 82, "path": "riskManagement/strangleRiskManagement.py" }, { "content": "import abc\nfrom optionPrimitives import optionPrimitive\n\nclass RiskManagement(abc.ABC):\n \"\"\"This class is a generic type for handling risk management strategies.\"\"\"\n\n @abc.abstractmethod\n def managePosition(self, currentPosition:optionPrimitive) -> bool:\n \"\"\"Manages the current position in the portfolio.\n Managing the position means indicating whether the position should be removed from the portfolio. In addition, we\n could create another signalEvent here if we want to do something like roll the strategy to the next month.\n :param currentPosition: Current position in the portfolio.\n \"\"\"\n pass", "id": "9955728", "language": "Python", "matching_score": 1.324770450592041, "max_stars_count": 82, "path": "riskManagement/riskManagement.py" }, { "content": "import abc\n\nclass DataHandler(abc.ABC):\n \"\"\"This class is a generic type for handling incoming data. Incoming data sources could be historical data in the\n form of a CSV or a database, or it could be live tick data coming from an exchange.\"\"\"\n\n @abc.abstractmethod\n def getNextTick(self) -> bool:\n \"\"\"Used to get the next available piece of data from the data source. For the CSV example, this would likely be the\n next row of the CSV.\n :return True / False indicating if data is available.\n \"\"\"\n pass", "id": "12731382", "language": "Python", "matching_score": 2.086595058441162, "max_stars_count": 82, "path": "dataHandler/dataHandler.py" }, { "content": "import abc\nimport enum\n\nclass EventTypes(enum.Enum):\n TICK = 0\n SIGNAL = 1\n\nclass EventHandler(abc.ABC):\n \"\"\"This class is a generic type for handling all events for the backtester and for live trading.\"\"\"\n\n @abc.abstractmethod\n def createEvent(self, data) -> None:\n \"\"\"Create an event which will be used for later processing e.g., create a data tick event for an option chain read\n from the CSV data handler.\n Attributes:\n data: input data for the event. e.g., option chain.\n \"\"\"\n pass", "id": "721234", "language": "Python", "matching_score": 1.890201210975647, "max_stars_count": 82, "path": "events/event.py" }, { "content": "from events import event\nfrom typing import Any, Iterable\n\nclass SignalEvent(event.EventHandler):\n \"\"\"This class handles the events for signals to carry out on tick data.\"\"\"\n\n def __init__(self) -> None:\n self.__data = None\n self.type = event.EventTypes.SIGNAL\n\n def getData(self) -> Iterable[Any]:\n return self.__data\n\n def createEvent(self, data: Iterable[Any]) -> None:\n \"\"\"Create a signal event.\n Attributes:\n data: input data for the event.\n \"\"\"\n self.__data = data\n", "id": "10688688", "language": "Python", "matching_score": 3.480700731277466, "max_stars_count": 82, "path": "events/signalEvent.py" }, { "content": "from events import event\nfrom typing import Any, Iterable\n\nclass TickEvent(event.EventHandler):\n \"\"\"This class handles the events for new incoming data whether it be from historical data or from live trading.\"\"\"\n\n def __init__(self) -> None:\n self.__data = None\n self.type = event.EventTypes.TICK\n\n def getData(self) -> Iterable[Any]:\n return self.__data\n\n def createEvent(self, data: Iterable[Any]) -> None:\n \"\"\"Create a tick event.\n Attributes:\n data: input data for the event. e.g., row of CSV data.\n \"\"\"\n self.__data = data", "id": "6343564", "language": "Python", "matching_score": 1.002360463142395, "max_stars_count": 82, "path": "events/tickEvent.py" }, { "content": "import csv\nimport datetime\nimport decimal\nimport json\nimport pandas as pd\nimport queue\nfrom dataHandler import dataHandler\nfrom base import call\nfrom base import put\nfrom base import option\nfrom events import tickEvent\nfrom typing import Iterable, Mapping, Text\n\nclass CsvData(dataHandler.DataHandler):\n \"\"\"This class handles data from CSV files which will be used for backtesting sessions.\"\"\"\n\n def __init__(self, csvPath: Text, dataProvider: Text, eventQueue: queue.Queue) -> None:\n \"\"\"Initializes CSV data parameters for file reading.\n\n Attributes:\n csvPath: path to CSV file used in backtesting.\n dataProvider: historical data provider (e.g, provider of CSV).\n eventQueue: location to place new data tick event.\n \"\"\"\n self.__csvPath = csvPath\n self.__curTimeDate = None\n self.__dataConfig = None\n self.__csvReader = None\n self.__csvColumnNames = None\n self.__dateColumnIndex = None\n self.__nextTimeDateRow = None\n self.__dataProvider = dataProvider\n self.__eventQueue = eventQueue\n\n # Open data source. Raises exception if failure.\n self.__dataConfig = self.__openDataSource()\n\n def __openDataSource(self) -> Mapping[Text, int]:\n \"\"\"Used to connect to the data source for the first time. In the case of a CSV, this means opening the file.\n The directory used is determined during initialization.\n :return dictionary from dataProvider.json file.\n :raises FileNotFoundError: Cannot find a CSV at specified location.\n :raises ValueError: Cannot load data as a JSON file.\n :raises ValueError: Requested data provider not found in JSON file.\n :raises ValueError: Number of CSV columns not provided in JSON file.\n :raises ValueError: Number of columns read from CSV does not match number of columns in JSON file.\n \"\"\"\n try:\n fileHandle = open(self.__csvPath, 'r')\n except OSError as e:\n raise OSError('Unable to open CSV at location: %s.' % self.__csvPath) from e\n\n # Load data provider information from dataProviders.json file.\n try:\n with open('./dataHandler/dataProviders.json') as dataProvider:\n dataConfig = json.load(dataProvider)\n except (FileNotFoundError, json.decoder.JSONDecodeError) as e:\n raise ValueError('Failure when trying to open / load data from JSON file: %s.' % (\n 'dataHandler/dataProviders.json')) from e\n\n # Check that data provider in JSON file matches the provided string in self._dataProvider\n if not self.__dataProvider in dataConfig:\n raise ValueError('The requested data provider: %s was not found in dataProviders.json' % self.__dataProvider)\n\n # Check that the number of columns in the CSV matches the number specified by the config file.\n self.__csvReader = csv.reader(fileHandle)\n self.__csvColumnNames = next(self.__csvReader)\n numberCsvColumns = len(self.__csvColumnNames)\n if 'number_columns' not in dataConfig[self.__dataProvider]:\n raise ValueError('number_columns not provided in dataProviders.json file')\n if not numberCsvColumns == dataConfig[self.__dataProvider]['number_columns']:\n raise ValueError('Number of columns read from CSV did not match the number of columns in dataProviders.json')\n return dataConfig\n\n def __getOptionChain(self) -> pd.DataFrame:\n \"\"\"Used to get the option chain data for the underlying. The option chain consists of all of the puts and calls\n at all strikes currently listed for the underlying.\n :return Pandas dataframe with option chain data.\n \"\"\"\n # Get the first date if self.__curTimeDate is None.\n dateColumnName = self.__dataConfig[self.__dataProvider]['column_names']['dateTime']\n if self.__curTimeDate is None:\n # Find the index of the date column in the header row of the CSV.\n for index, column in enumerate(self.__csvColumnNames):\n if column == dateColumnName:\n self.__dateColumnIndex = index\n if self.__dateColumnIndex is None:\n raise TypeError('The dateColumnName was not found in the CSV.')\n\n rowList = []\n # Get the next row of the CSV and convert the date column to a datetime object.\n row = next(self.__csvReader)\n rowList.append(row)\n self.__curTimeDate = datetime.datetime.strptime(row[self.__dateColumnIndex],\n self.__dataConfig[self.__dataProvider]['date_time_format'])\n\n # Get the rest of the rows that match the curTimeDate.\n for row in self.__csvReader:\n if datetime.datetime.strptime(row[self.__dateColumnIndex],\n self.__dataConfig[self.__dataProvider]['date_time_format']) == self.__curTimeDate:\n rowList.append(row)\n else:\n # Need to save the last row that doesn't match the curTimeDate so we can use it again.\n self.__nextTimeDateRow = row\n break\n\n # Create a Pandas dataframe from the list of lists.\n return pd.DataFrame(rowList, columns=self.__csvColumnNames)\n\n else:\n if self.__nextTimeDateRow is None:\n return pd.DataFrame()\n # Get the date / time from the previously stored row.\n self.__curTimeDate = datetime.datetime.strptime(self.__nextTimeDateRow[self.__dateColumnIndex],\n self.__dataConfig[self.__dataProvider]['date_time_format'])\n\n # Get all of the CSV rows for the curTimeDate.\n rowList = []\n rowList.append(self.__nextTimeDateRow)\n for row in self.__csvReader:\n if datetime.datetime.strptime(row[self.__dateColumnIndex],\n self.__dataConfig[self.__dataProvider]['date_time_format']) == self.__curTimeDate:\n rowList.append(row)\n else:\n # Need to save the last row that doesn't match the curTimeDate so we can use it again.\n self.__nextTimeDateRow = row\n break\n\n # If no rows were added above, it means that there's no more data to read from the CSV.\n if len(rowList) == 1:\n self.__nextTimeDateRow = None\n return pd.DataFrame()\n # Create a Pandas dataframe from the list of lists.\n return pd.DataFrame(rowList, columns=self.__csvColumnNames)\n\n def __createBaseType(self, optionChain: pd.DataFrame) -> Iterable[option.Option]:\n \"\"\"\n Convert an option chain held in a dataframe to base option types (calls or puts).\n\n Attributes:\n optionChain: Pandas dataframe with optionChain data as rows.\n\n :raises ValueError: Symbol for put/call in JSON not found in dataframe column.\n :return: List of Option base type objects (puts or calls).\n \"\"\"\n optionObjects = []\n # Create a dictionary for the fields that we will read from each row of the dataframe. The fields should also be\n # specified in the dataProviders.json file.\n # Instead of manually specifying the fields below, we could read them from the Option class.\n optionFieldDict = {'underlyingTicker': None, 'strikePrice': None, 'delta': None, 'expirationDateTime': None,\n 'underlyingPrice': None, 'optionSymbol': None, 'bidPrice': None, 'askPrice': None,\n 'tradePrice': None, 'openInterest': None, 'volume': None, 'dateTime': None, 'theta': None,\n 'gamma': None, 'rho': None, 'vega': None, 'impliedVol': None, 'exchangeCode': None,\n 'exercisePrice': None, 'assignPrice': None, 'openCost': None, 'closeCost': None,\n }\n dataProviderConfig = self.__dataConfig[self.__dataProvider]\n for _, row in optionChain.iterrows():\n # Defaults to PUT (True).\n putOrCall = True\n for option_column_name, dataframe_column_name in dataProviderConfig['column_names'].items():\n # Check that we need to look up the field.\n if not dataframe_column_name:\n continue\n if option_column_name == 'optionType':\n optionType = row[dataframe_column_name]\n # Convert any lowercase symbols to uppercase.\n optionType = str(optionType).upper()\n if optionType == dataProviderConfig['call_symbol_abbreviation']:\n putOrCall = False\n elif optionType == dataProviderConfig['put_symbol_abbreviation']:\n putOrCall = True\n else:\n raise ValueError('Symbol for put / call in dataProviders.json not found in optionType dataframe column.')\n else:\n optionFieldDict[option_column_name] = row[dataframe_column_name]\n\n if optionFieldDict['bidPrice'] is not None and optionFieldDict['askPrice'] is not None:\n optionFieldDict['tradePrice'] = (decimal.Decimal(optionFieldDict['bidPrice']) + decimal.Decimal(\n optionFieldDict['askPrice'])) / decimal.Decimal(2.0)\n\n argsDict = {'underlyingTicker': optionFieldDict['underlyingTicker'],\n 'strikePrice': decimal.Decimal(optionFieldDict['strikePrice']),\n 'delta': float(optionFieldDict['delta']), 'expirationDateTime': datetime.datetime.strptime(\n optionFieldDict['expirationDateTime'], dataProviderConfig['date_time_format']),\n 'underlyingPrice': decimal.Decimal(optionFieldDict['underlyingPrice']),\n 'optionSymbol': optionFieldDict['optionSymbol'],\n 'bidPrice': decimal.Decimal(optionFieldDict['bidPrice']),\n 'askPrice': decimal.Decimal(optionFieldDict['askPrice']),\n 'tradePrice': decimal.Decimal(optionFieldDict['tradePrice']),\n 'openInterest': int(optionFieldDict['openInterest']), 'volume': int(optionFieldDict['volume']),\n 'dateTime': datetime.datetime.strptime(optionFieldDict['dateTime'],\n dataProviderConfig['date_time_format']),\n 'theta': float(optionFieldDict['theta']),\n 'gamma': float(optionFieldDict['gamma']), 'rho': float(optionFieldDict['rho']),\n 'vega': float(optionFieldDict['vega']), 'impliedVol': float(optionFieldDict['impliedVol']),\n 'exchangeCode': optionFieldDict['exchangeCode'],\n 'exercisePrice': decimal.Decimal(optionFieldDict['exercisePrice']) if\n optionFieldDict['exercisePrice'] else None,\n 'assignPrice': decimal.Decimal(optionFieldDict['assignPrice']) if optionFieldDict[\n 'assignPrice'] else None,\n 'openCost': decimal.Decimal(optionFieldDict['openCost']) if optionFieldDict[\n 'openCost'] else None,\n 'closeCost': decimal.Decimal(optionFieldDict['closeCost']) if optionFieldDict[\n 'closeCost'] else None,\n }\n if not putOrCall:\n optionObjects.append(call.Call(**argsDict))\n else:\n optionObjects.append(put.Put(**argsDict))\n\n # Reset all the dictionary values back to None. This is probably overkill since we can just rewrite them.\n optionFieldDict = optionFieldDict.fromkeys(optionFieldDict, None)\n return optionObjects\n\n def getNextTick(self) -> bool:\n \"\"\"Used to get the next available piece of data from the data source. For the CSV example, this would likely be the\n next row for a stock or group of rows for an option chain.\n :return True / False indicating if there is data available.\n \"\"\"\n if self.__dataConfig[self.__dataProvider]['data_source_type'] == 'options':\n # Get optionChain as a dataframe.\n optionChain = self.__getOptionChain()\n if len(optionChain.index) == 0:\n # No more data available.\n return False\n # Convert optionChain from a dataframe to Option class objects.\n optionChainObjs = self.__createBaseType(optionChain)\n # Create tick event with option chain objects.\n event = tickEvent.TickEvent()\n event.createEvent(optionChainObjs)\n self.__eventQueue.put(event)\n return True\n elif self.__dataConfig[self.__dataProvider]['data_source_type'] == 'stocks':\n pass\n", "id": "11044923", "language": "Python", "matching_score": 5.008325576782227, "max_stars_count": 82, "path": "dataHandler/csvData.py" }, { "content": "import unittest\nimport decimal\nfrom dataHandler import csvData\nimport queue\n\nclass TestCSVHandler(unittest.TestCase):\n\n def setUp(self):\n # Create CsvData class object.\n self._dataProvider = 'iVolatility'\n self._filename = '/Users/msantoro/PycharmProjects/Backtester/sampleData/aapl_sample_ivolatility.csv'\n self._eventQueue = queue.Queue()\n self._csvObj = csvData.CsvData(csvPath=self._filename, dataProvider=self._dataProvider,\n eventQueue=self._eventQueue)\n\n def testOpenDataSourceNoCSVFound(self):\n \"\"\"Tests that an exception is raised when no CSV is found.\"\"\"\n with self.assertRaisesRegex(OSError, 'Unable to open CSV at location: bad_path_name.'):\n csvData.CsvData(csvPath='bad_path_name', dataProvider=self._dataProvider,\n eventQueue=self._eventQueue)\n\n def testOpenDataSourceInvalidDataProvider(self):\n \"\"\"Tests that an exception is rasied if the requested data provider isn't in the config file.\"\"\"\n with self.assertRaisesRegex(ValueError, ('The requested data provider: unknown_data_provider was not found in '\n 'dataProviders.json')):\n csvData.CsvData(csvPath=self._filename, dataProvider='unknown_data_provider',\n eventQueue=self._eventQueue)\n\n def testGetOptionChain(self):\n \"\"\"Tests that an option chain is successfully read from CSV file.\"\"\"\n # The first and second calls to getNextTick should load one option chain into the queue and return True,\n # and the third call should return False\n self.assertTrue(self._csvObj.getNextTick())\n self.assertTrue(self._eventQueue.qsize(), 1)\n self.assertTrue(self._csvObj.getNextTick())\n self.assertTrue(self._eventQueue.qsize(), 2)\n self.assertFalse(self._csvObj.getNextTick())\n self.assertTrue(self._eventQueue.qsize(), 2)\n\n # Check number of option objects in the first and second queue positions.\n desiredNumObjects = 1822\n self.assertEqual(len(self._eventQueue.get().getData()), desiredNumObjects)\n self.assertEqual(len(self._eventQueue.get().getData()), desiredNumObjects)\n self.assertEqual(self._eventQueue.qsize(), 0)\n\n def testGetOptionChainBadColumnName(self):\n \"\"\"Tests that an exception is raised if column name in the CSV doesn't match the one in dataProviders.json.\"\"\"\n # Create CsvData class object.\n dataProvider = 'iVolatility'\n filename = '/Users/msantoro/PycharmProjects/Backtester/sampleData/bad_column_name.csv'\n eventQueue = queue.Queue()\n csvObj = csvData.CsvData(csvPath=filename, dataProvider=dataProvider, eventQueue=eventQueue)\n\n with self.assertRaisesRegex(TypeError, ('The dateColumnName was not found in the CSV')):\n csvObj.getNextTick()\n\n def testCreateBaseType(self):\n \"\"\"Tests that Put and Call objects are created successfully.\"\"\"\n # First row in the sample data is a call, and second row is a put.\n eventQueue = queue.Queue()\n csvObj = csvData.CsvData(csvPath=self._filename, dataProvider=self._dataProvider, eventQueue=eventQueue)\n csvObj.getNextTick()\n optionChainObjs = eventQueue.get().getData()\n desiredCallAskPrice = decimal.Decimal(40.45)\n desiredPutAskPrice = decimal.Decimal(0.01)\n desiredStrikePrice = 55\n desiredUnderlyingTicker = 'AAPL'\n self.assertEqual(optionChainObjs[0].underlyingTicker, desiredUnderlyingTicker)\n self.assertEqual(optionChainObjs[0].strikePrice, desiredStrikePrice)\n self.assertEqual(optionChainObjs[1].underlyingTicker, desiredUnderlyingTicker)\n self.assertEqual(optionChainObjs[1].strikePrice, desiredStrikePrice)\n self.assertAlmostEqual(optionChainObjs[0].askPrice, desiredCallAskPrice)\n self.assertAlmostEqual(optionChainObjs[1].askPrice, desiredPutAskPrice)\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "6027420", "language": "Python", "matching_score": 3.4363980293273926, "max_stars_count": 82, "path": "dataHandler/csvDataTest.py" }, { "content": "import unittest\nfrom events import event\nfrom events import tickEvent\n\nclass TestTickEvent(unittest.TestCase):\n\n def testCreateTickEvent(self):\n \"\"\"Tests that a signal event is successfully created.\"\"\"\n tickObj = tickEvent.TickEvent()\n # Check that the data reference attribute is set to None since there has been no data passed.\n self.assertEqual(tickObj.getData(), None)\n self.assertEqual(tickObj.type, event.EventTypes.TICK)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "12318734", "language": "Python", "matching_score": 4.02628755569458, "max_stars_count": 82, "path": "events/tickEventTest.py" }, { "content": "import unittest\nfrom events import event\nfrom events import signalEvent\n\nclass TestSignalEvent(unittest.TestCase):\n\n def testCreateSignalEvent(self):\n \"\"\"Tests that a signal event is successfully created.\"\"\"\n signalObj = signalEvent.SignalEvent()\n # Check that the data reference attribute is set to None since there has been no data passed.\n self.assertEqual(signalObj.getData(), None)\n self.assertEqual(signalObj.type, event.EventTypes.SIGNAL)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "12415881", "language": "Python", "matching_score": 0.324407160282135, "max_stars_count": 82, "path": "events/signalEventTest.py" }, { "content": "import dataclasses\nimport decimal\nimport logging\nimport typing\nfrom events import signalEvent, tickEvent\nfrom optionPrimitives import optionPrimitive\n\[email protected]()\nclass Portfolio(object):\n \"\"\"This class creates a portfolio to hold all open positions.\n At the moment, the portfolio runs live, but in the future we should migrate the portfolio to be stored in a\n database.\n\n Attributes:\n startingCapital -- How much capital we have when starting.\n maxCapitalToUse -- Max percent of portfolio to use (decimal between 0 and 1).\n maxCapitalToUsePerTrade -- Max percent of portfolio to use on one trade (same underlying), 0 to 1.\n\n Portfolio intrinsics:\n realizedCapital: Updated when positions are actually closed.\n netLiquidity: Net liquidity of total portfolio (ideally includes commissions, fees, etc.).\n totalBuyingPower: Total buying power being used in portfolio.\n openProfitLoss: Current value of open positions in dollars (positive or negative).\n dayProfitLoss: Amount of money gained / lost for the current day in dollars (positive or negative).\n openProfitLossPercent: Same as PLopen, but expressed as a percent of total capital being used.\n dayProfitLossPercent: Same as PLday, but expressed as a percentage of total capital being used.\n totalDelta: Sum of deltas for all positions (positive or negative).\n totalVega: Sum of vegas for all positions (positive or negative).\n totalTheta: Sum of thetas for all positions (positive or negative).\n totalGamma: Sum of gammas for all positions (positive or negative).\n \"\"\"\n\n startingCapital: decimal.Decimal\n maxCapitalToUse: float\n maxCapitalToUsePerTrade: float\n realizedCapital: typing.ClassVar[decimal.Decimal]\n netLiquidity: typing.ClassVar[decimal.Decimal]\n totalBuyingPower: typing.ClassVar[decimal.Decimal] = decimal.Decimal(0.0)\n openProfitLoss: typing.ClassVar[decimal.Decimal] = decimal.Decimal(0.0)\n dayProfitLoss: typing.ClassVar[decimal.Decimal] = decimal.Decimal(0.0)\n openProfitLossPercent: typing.ClassVar[float] = 0.0\n dayProfitLossPercent: typing.ClassVar[float] = 0.0\n totalDelta: typing.ClassVar[float] = 0.0\n totalVega: typing.ClassVar[float] = 0.0\n totalTheta: typing.ClassVar[float] = 0.0\n totalGamma: typing.ClassVar[float] = 0.0\n activePositions: typing.ClassVar[list] = []\n\n def __post_init__(self):\n self.realizedCapital = self.startingCapital\n self.netLiquidity = self.startingCapital\n self.activePositions = []\n\n def onSignal(self, event: signalEvent) -> None:\n \"\"\"Handle a new signal event; indicates that a new position should be added to the portfolio if portfolio risk\n management conditions are satisfied.\n\n :param event: Event to be handled by portfolio; a signal event in this case.\n \"\"\"\n # Get the data from the tick event\n eventData = event.getData()\n\n # Return if there's no data\n if not eventData:\n return\n\n positionData = eventData[0]\n\n # Determine if the eventData meets the portfolio criteria for adding a position.\n tradeCapitalRequirement = positionData.getBuyingPower()\n\n # Amount of buying power that would be used with this strategy.\n tentativeBuyingPower = self.totalBuyingPower + tradeCapitalRequirement\n\n # If we have not used too much total buying power in the portfolio, and the current trade is using less\n # than the maximum allowed per trade, we add the position to the portfolio.\n if ((tentativeBuyingPower < self.netLiquidity*decimal.Decimal(self.maxCapitalToUse)) and\n (tradeCapitalRequirement < self.netLiquidity*decimal.Decimal(self.maxCapitalToUsePerTrade))):\n self.activePositions.append(eventData)\n self.totalBuyingPower += tentativeBuyingPower\n logging.info('Buying power updated.')\n\n # Update delta, vega, theta and gamma for portfolio.\n self.totalDelta += positionData.getDelta()\n self.totalGamma += positionData.getGamma()\n self.totalTheta += positionData.getTheta()\n self.totalVega += positionData.getVega()\n else:\n if tentativeBuyingPower >= self.netLiquidity * decimal.Decimal(self.maxCapitalToUse):\n logging.info(\"Not enough buying power available based on maxCapitalToUse threshold.\")\n else:\n logging.info(\"Trade uses too much buying power based on maxCapitalToUsePerTrade threshold.\")\n\n def updatePortfolio(self, event: tickEvent) -> None:\n \"\"\" Updates the intrinsics of the portfolio by updating the values of the options used in the different\n optionPrimitives.\n :param event: Tick event with the option chain which will be be used to update the portfolio.\n \"\"\"\n # Get the data from the tick event.\n tickData = event.getData()\n\n # If we did not get any tick data or there are no positions in the portfolio, return.\n if not tickData or not self.activePositions:\n return\n\n # Go through the positions currently in the portfolio and update the prices.\n # We first reset the entire portfolio and recalculate the values.\n self.totalDelta = 0\n self.totalGamma = 0\n self.totalVega = 0\n self.totalTheta = 0\n self.totalBuyingPower = 0\n self.netLiquidity = 0\n self.openProfitLoss = 0\n self.dayProfitLoss = 0\n self.openProfitLossPercent = 0\n self.dayProfitLossPercent = 0\n\n # Array / list used to keep track of which positions we should remove.\n idxsToDelete = []\n\n # Go through all positions in portfolio and update the values.\n for idx, curPosition in enumerate(self.activePositions):\n positionData = curPosition[0]\n riskMangementStrategy = curPosition[1]\n\n # Update the option intrinsic values.\n # TODO(msantoro): Can just 'continue' here if the position doesn't need to be updated.\n positionData.updateValues(tickData)\n\n # Called even if position is removed to update netLiquidity in the portfolio.\n self.netLiquidity += positionData.calcProfitLoss()\n\n if riskMangementStrategy.managePosition(positionData):\n idxsToDelete.append(idx)\n else:\n # Update greeks and total buying power.\n self.__calcPortfolioValues(positionData)\n\n # Add the realized capital to the profit / loss of all open positions to get final net liq.\n self.netLiquidity += self.realizedCapital\n logging.info(\"Net liquidity: %f.\", self.netLiquidity)\n\n # Go through and delete any positions which were added to the idxsToDelete array.\n for idx in reversed(idxsToDelete):\n logging.info('The %s position was closed.', self.activePositions[idx][0].getUnderlyingTicker())\n del(self.activePositions[idx])\n\n def __calcPortfolioValues(self, curPosition: optionPrimitive.OptionPrimitive) -> None:\n \"\"\"Updates portfolio values for current position.\n\n :param curPosition: Current position in portfolio being processed.\n \"\"\"\n self.totalDelta += curPosition.getDelta()\n self.totalGamma += curPosition.getGamma()\n self.totalTheta += curPosition.getTheta()\n self.totalVega += curPosition.getVega()\n self.totalBuyingPower += curPosition.getBuyingPower()\n\n # TODO: Add self.openProfitLoss,self.dayProfitLoss,self.openProfitLossPercent, and self.dayProfitLossPercent.", "id": "4055576", "language": "Python", "matching_score": 4.854714393615723, "max_stars_count": 82, "path": "portfolioManager/portfolio.py" }, { "content": "import unittest\nimport datetime\nimport decimal\nfrom portfolioManager import portfolio\nfrom optionPrimitives import optionPrimitive, strangle\nfrom base import put\nfrom base import call\nfrom events import signalEvent, tickEvent\nfrom riskManagement import strangleRiskManagement\n\nclass TestPortfolio(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Create portfolio object to be shared among tests.\"\"\"\n startingCapital = decimal.Decimal(1000000)\n maxCapitalToUse = 0.5\n maxCapitalToUsePerTrade = 0.5\n self.portfolioObj = portfolio.Portfolio(startingCapital, maxCapitalToUse, maxCapitalToUsePerTrade)\n\n # Strangle object to be shared among tests.\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2690), delta=-0.16, gamma=0.01, theta=0.02, vega=0.03,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(7.45), askPrice=decimal.Decimal(7.50), tradePrice=decimal.Decimal(7.475))\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2855), delta=0.16, gamma=0.01, theta=0.02, vega=0.03,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(5.20), askPrice=decimal.Decimal(5.40),\n tradePrice=decimal.Decimal(5.30))\n self.strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n self.riskManagement = strangleRiskManagement.StrangleRiskManagement(\n strangleRiskManagement.StrangleManagementStrategyTypes.HOLD_TO_EXPIRATION)\n\n def testOnSignalSucess(self):\n \"\"\"Tests that onSignal event successfully updates portfolio.\"\"\"\n # Create signal event.\n event = signalEvent.SignalEvent()\n event.createEvent([self.strangleObj, self.riskManagement])\n\n # Test portfolio onSignal event.\n self.portfolioObj.onSignal(event)\n\n # Check that positions array in portfolio is not empty.\n self.assertNotEqual(len(self.portfolioObj.activePositions), 0)\n\n # Check that the buying power used by the strangle is correct.\n self.assertAlmostEqual(self.portfolioObj.totalBuyingPower, decimal.Decimal(63310.0))\n\n # Get the total delta value of the portfolio and check that it is 0.01.\n self.assertAlmostEqual(self.portfolioObj.totalDelta, 0.0)\n\n def testOnSignalNotEnoughBuyingPower(self):\n \"\"\"Tests that total buying power is not updated if there's not enough buying power.\"\"\"\n startingCapital = decimal.Decimal(100000)\n maxCapitalToUse = 0.1\n maxCapitalToUsePerTrade = 0.1\n portfolioObj = portfolio.Portfolio(startingCapital, maxCapitalToUse, maxCapitalToUsePerTrade)\n\n event = signalEvent.SignalEvent()\n event.createEvent([self.strangleObj, self.riskManagement])\n portfolioObj.onSignal(event)\n\n def testUpdatePortfolio(self):\n \"\"\"Tests the ability to update option values for a position in the portfolio.\"\"\"\n # Create strangle event.\n event = signalEvent.SignalEvent()\n event.createEvent([self.strangleObj, self.riskManagement])\n\n # Create portfolio onSignal event, which adds the position to the portfolio.\n startingCapital = decimal.Decimal(1000000)\n maxCapitalToUse = 0.5\n maxCapitalToUsePerTrade = 0.5\n portfolioObj = portfolio.Portfolio(startingCapital, maxCapitalToUse, maxCapitalToUsePerTrade)\n portfolioObj.onSignal(event)\n\n # Next, create a strangle with the next days prices and update the portfolio values.\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2690), delta=-0.16, gamma=0.01, theta=0.02, vega=0.03,\n dateTime=datetime.datetime.strptime('01/02/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(6.45), askPrice=decimal.Decimal(6.50), tradePrice=decimal.Decimal(6.475))\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2855), delta=0.16, gamma=0.01, theta=0.02, vega=0.03,\n dateTime=datetime.datetime.strptime('01/02/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(4.20), askPrice=decimal.Decimal(4.40),\n tradePrice=decimal.Decimal(4.30))\n\n # Create tick event and update portfolio values.\n testOptionChain = [callOpt, putOpt]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n portfolioObj.updatePortfolio(event)\n\n # Check that the new portfolio values are correct (e.g., buying power, total delta, total gamma, etc).\n self.assertAlmostEqual(portfolioObj.totalBuyingPower, decimal.Decimal(63310.0))\n self.assertAlmostEqual(portfolioObj.totalVega, 0.06)\n self.assertAlmostEqual(portfolioObj.totalDelta, 0.0)\n self.assertAlmostEqual(portfolioObj.totalGamma, 0.02)\n self.assertAlmostEqual(portfolioObj.totalTheta, 0.04)\n self.assertAlmostEqual(portfolioObj.netLiquidity, decimal.Decimal(1000200.0))\n\n def testUpdatePortfolioRiskManagementHoldToExpiration(self):\n \"\"\"Tests that the position is removed from the portfolio when expiration occurs.\"\"\"\n # Create a new position in addition to the default self.strangleObj position.\n startingCapital = decimal.Decimal(1000000)\n maxCapitalToUse = 0.5\n maxCapitalToUsePerTrade = 0.25\n portfolioObj = portfolio.Portfolio(startingCapital, maxCapitalToUse, maxCapitalToUsePerTrade)\n\n # Add first position to the portfolio\n event = signalEvent.SignalEvent()\n event.createEvent([self.strangleObj, self.riskManagement])\n portfolioObj.onSignal(event)\n\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2800.00),\n strikePrice=decimal.Decimal(2700), delta=-0.16, gamma=0.01, theta=0.02, vega=0.03,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(8.00), askPrice=decimal.Decimal(8.50), tradePrice=decimal.Decimal(8.25))\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2800.00),\n strikePrice=decimal.Decimal(3000), delta=0.16, gamma=0.01, theta=0.02, vega=0.03,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(6.00), askPrice=decimal.Decimal(6.50),\n tradePrice=decimal.Decimal(6.25))\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n\n # Add second position to the portfolio.\n event = signalEvent.SignalEvent()\n event.createEvent([strangleObj, self.riskManagement])\n portfolioObj.onSignal(event)\n\n # Update the portfolio, which should remove the second event. We do not change the prices of the putOpt or callOpt.\n testOptionChain = [callOpt, putOpt]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n portfolioObj.updatePortfolio(event)\n # Only one position should be left in the portfolio after removing the expired position.\n self.assertEqual(len(portfolioObj.activePositions), 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "7890338", "language": "Python", "matching_score": 6.26988410949707, "max_stars_count": 82, "path": "portfolioManager/portfolioTest.py" }, { "content": "import unittest\nfrom optionPrimitives import optionPrimitive\nfrom optionPrimitives import strangle\nfrom base import put\nfrom base import call\nimport datetime\nimport decimal\n\nclass TestStrangle(unittest.TestCase):\n\n def setUp(self):\n orderQuantity = 1\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2690), delta=0.15, vega=0.04, theta=-0.07, gamma=0.11,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(7.45), askPrice=decimal.Decimal(7.50), tradePrice=decimal.Decimal(7.475))\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2855), delta=-0.16, vega=0.05, theta=-0.06, gamma=0.12,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(5.20), askPrice=decimal.Decimal(5.40),\n tradePrice=decimal.Decimal(5.30))\n self.__strangleObj = strangle.Strangle(orderQuantity=orderQuantity, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n # The parameters below are used to update the prices of the initial strangle above.\n self.__tickData = []\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2790.0), strikePrice=decimal.Decimal(2690),\n delta=0.13, vega=0.03, theta=-0.06, gamma=0.12,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(7.25), askPrice=decimal.Decimal(7.350))\n self.__tickData.append(putOpt)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2790.0),\n strikePrice=decimal.Decimal(2855), delta=-0.20, vega=0.06, theta=-0.07, gamma=0.14,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(5.60), askPrice=decimal.Decimal(5.80))\n self.__tickData.append(callOpt)\n\n def testGetDelta(self):\n \"\"\"Tests that delta values are summed for the strangle.\"\"\"\n self.assertAlmostEqual(self.__strangleObj.getDelta(), -0.01)\n\n def testGetDeltaMultipleContracts(self):\n \"\"\"Tests that delta values are summed for the strangle.\"\"\"\n self.__strangleObj.setNumContracts(2)\n self.assertAlmostEqual(self.__strangleObj.getDelta(), -0.02)\n self.__strangleObj.setNumContracts(1)\n\n def testGetDeltaNoneValue(self):\n \"\"\"Tests that a value of None is returned if one of the delta is None.\"\"\"\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2690, delta=None,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=7.45,\n askPrice=7.50, tradePrice=7.475)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2855, delta=-0.16,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=5.20,\n askPrice=5.40, tradePrice=5.30)\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n self.assertIsNone(strangleObj.getDelta())\n\n def testGetVega(self):\n \"\"\"Tests that vega values are summed for the strangle.\"\"\"\n self.assertAlmostEqual(self.__strangleObj.getVega(), 0.09)\n\n def testGetVegaMultipleContracts(self):\n \"\"\"Tests that vega values are summed for the strangle.\"\"\"\n self.__strangleObj.setNumContracts(2)\n self.assertAlmostEqual(self.__strangleObj.getVega(), 0.18)\n self.__strangleObj.setNumContracts(1)\n\n def testGetVegaNoneValue(self):\n \"\"\"Tests that a value of None is returned if one of the vega is None.\"\"\"\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2690, delta=0.15, vega=None,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=7.45,\n askPrice=7.50, tradePrice=7.475)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2855, delta=-0.16, vega=0.05,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=5.20,\n askPrice=5.40, tradePrice=5.30)\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n self.assertIsNone(strangleObj.getVega())\n\n def testGetTheta(self):\n \"\"\"Tests that theta values are summed for the strangle.\"\"\"\n self.assertAlmostEqual(self.__strangleObj.getTheta(), -0.13)\n\n def testGetThetaMultipleContracts(self):\n \"\"\"Tests that theta values are summed for the strangle.\"\"\"\n self.__strangleObj.setNumContracts(2)\n self.assertAlmostEqual(self.__strangleObj.getTheta(), -0.26)\n self.__strangleObj.setNumContracts(1)\n\n def testGetThetaNoneValue(self):\n \"\"\"Tests that a value of None is returned if one of the theta is None.\"\"\"\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2690, delta=0.15, vega=None,\n theta=None, dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=7.45,\n askPrice=7.50, tradePrice=7.475)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2855, delta=-0.16, vega=0.05,\n theta=-0.06, dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=5.20,\n askPrice=5.40, tradePrice=5.30)\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n self.assertIsNone(strangleObj.getTheta())\n\n def testGetGamma(self):\n \"\"\"Tests that gamma values are summed for the strangle.\"\"\"\n self.assertAlmostEqual(self.__strangleObj.getGamma(), 0.23)\n\n def testGetGammaMultipleContracts(self):\n \"\"\"Tests that theta values are summed for the strangle.\"\"\"\n self.__strangleObj.setNumContracts(2)\n self.assertAlmostEqual(self.__strangleObj.getGamma(), 0.46)\n self.__strangleObj.setNumContracts(1)\n\n def testGetGammaNoneValue(self):\n \"\"\"Tests that a value of None is returned if one of the theta is None.\"\"\"\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2690, delta=0.15, vega=None,\n theta=None, gamma=None, dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=7.45,\n askPrice=7.50, tradePrice=7.475)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2855, delta=-0.16, vega=0.05,\n theta=-0.06, gamma=0.12, dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=5.20,\n askPrice=5.40, tradePrice=5.30)\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n self.assertIsNone(strangleObj.getGamma())\n\n def testStrangleCalcProfitLossNoDataUpdate(self):\n \"\"\"Tests that the profit / loss is zero if we haven't updated the option.\"\"\"\n self.assertAlmostEqual(self.__strangleObj.calcProfitLoss(), decimal.Decimal(0.0))\n\n def testStrangleCalcProfitLossWithDataUpdateSellingStrangle(self):\n \"\"\"Tests that the profit / loss is calculated correctly when new data is available.\"\"\"\n self.__strangleObj.updateValues(self.__tickData)\n self.assertAlmostEqual(self.__strangleObj.calcProfitLoss(), decimal.Decimal(-22.5))\n\n def testStrangleCalcProfitLossWithDataUpdateBuyingStrangle(self):\n \"\"\"Tests that the profit / loss is calculated correctly when buying a strangle.\"\"\"\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2690),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(7.45), askPrice=decimal.Decimal(7.50), tradePrice=decimal.Decimal(7.475))\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2855),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(5.20), askPrice=decimal.Decimal(5.40),\n tradePrice=decimal.Decimal(5.30))\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.BUY)\n # The parameters below are used to update the prices of the initial strangle above.\n tickData = []\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2790.0),\n strikePrice=decimal.Decimal(2690),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(7.25), askPrice=decimal.Decimal(7.350))\n tickData.append(putOpt)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2790.0),\n strikePrice=decimal.Decimal(2855),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(5.60), askPrice=decimal.Decimal(5.80))\n tickData.append(callOpt)\n strangleObj.updateValues(tickData)\n self.assertAlmostEqual(strangleObj.calcProfitLoss(), decimal.Decimal(22.5))\n\n def testStrangeCalcProfitLossPercentage(self):\n \"\"\"Tests that the profit / loss percentage is calculated correctly.\"\"\"\n self.__strangleObj.updateValues(self.__tickData)\n self.assertAlmostEqual(self.__strangleObj.calcProfitLossPercentage(), decimal.Decimal(-1.76125244618395))\n\n def testStrangleBuyingPower25PercentRule(self):\n # Tests the buying power calculation for the 25% rule.\n buyingPower = self.__strangleObj.getBuyingPower()\n self.assertAlmostEqual(buyingPower, decimal.Decimal(63309.99999999997))\n\n def testStrangleBuyingPower15PercentRule(self):\n # Tests the buying power calculation for the 15% rule.\n # TODO(msantoro): Get live values from Tastyworks to use here.\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2500), delta=0.01,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(1.45), askPrice=decimal.Decimal(1.50), tradePrice=decimal.Decimal(1.475))\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(3200), delta=-0.01,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(1.20), askPrice=decimal.Decimal(1.40),\n tradePrice=decimal.Decimal(1.30))\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n buyingPower = strangleObj.getBuyingPower()\n self.assertAlmostEqual(buyingPower, decimal.Decimal(48130.0))\n\n def testStrangleUpdateValuesNoMatchingOption(self):\n \"\"\"Tests that the profit loss calculation is unchanged if no option is available to update.\"\"\"\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2690),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(7.45), askPrice=decimal.Decimal(7.50), tradePrice=decimal.Decimal(7.475))\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2786.24),\n strikePrice=decimal.Decimal(2855),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(5.20), askPrice=decimal.Decimal(5.40),\n tradePrice=decimal.Decimal(5.30))\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.BUY)\n initialProfitLoss = strangleObj.calcProfitLoss()\n\n tickData = []\n # Changed the PUT strike price from 2690 to 2790 to prevent a match.\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2790.0), strikePrice=decimal.Decimal(2790),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(7.25), askPrice=decimal.Decimal(7.350))\n tickData.append(putOpt)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=decimal.Decimal(2790.0),\n strikePrice=decimal.Decimal(2855),\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(5.60), askPrice=decimal.Decimal(5.80))\n tickData.append(callOpt)\n strangleObj.updateValues(tickData)\n\n # The profit / loss should be the same since the option wasn't updated.\n self.assertAlmostEqual(strangleObj.calcProfitLoss(), initialProfitLoss)\n\n def testStrangeGetNumberOfDaysLeft(self):\n \"\"\"Tests that we calculate the number of days between two date / times correctly.\"\"\"\n putOpt = put.Put(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2690, delta=0.15, vega=None,\n theta=None, gamma=None, dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=7.45,\n askPrice=7.50, tradePrice=7.475)\n callOpt = call.Call(underlyingTicker='SPX', underlyingPrice=2786.24, strikePrice=2855, delta=-0.16, vega=0.05,\n theta=-0.06, gamma=0.12, dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/20/2021', \"%m/%d/%Y\"), bidPrice=5.20,\n askPrice=5.40, tradePrice=5.30)\n strangleObj = strangle.Strangle(orderQuantity=1, callOpt=callOpt, putOpt=putOpt,\n buyOrSell=optionPrimitive.TransactionType.SELL)\n self.assertEqual(strangleObj.getNumberOfDaysLeft(), 19)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "3125758", "language": "Python", "matching_score": 6.095303058624268, "max_stars_count": 82, "path": "optionPrimitives/strangleTest.py" }, { "content": "from base import call\nfrom base import option\nfrom base import put\nfrom optionPrimitives import optionPrimitive\nfrom typing import Iterable, Optional, Text\nimport decimal\nimport logging\n\nclass Strangle(optionPrimitive.OptionPrimitive):\n \"\"\"This class sets up the strangle option primitive.\n\n Attributes:\n orderQuantity: number of strangles\n callOpt: call option\n putOpt: put option\n buyOrSell: Indicates if we want to buy or sell the strangle.\n \"\"\"\n def __init__(self, orderQuantity: int, callOpt: call.Call, putOpt: put.Put,\n buyOrSell: optionPrimitive.TransactionType) -> None:\n\n if orderQuantity < 1:\n raise ValueError('Order quantity must be a positive (> 0) number.')\n self.__numContracts = orderQuantity\n self.__putOpt = putOpt\n self.__callOpt = callOpt\n self.__buyOrSell = buyOrSell\n\n def getUnderlyingTicker(self) -> Optional[Text]:\n \"\"\"Get the name of the underlying being used for the strangle.\"\"\"\n if self.__putOpt.underlyingTicker is not None:\n return self.__putOpt.underlyingTicker\n return None\n\n def getDelta(self) -> Optional[float]:\n \"\"\"Get the delta for the strangle.\n\n :return Delta of strangle or None if deltas don't exist for both options.\n \"\"\"\n if self.__putOpt.delta is not None and self.__callOpt.delta is not None:\n return self.__numContracts * (self.__putOpt.delta + self.__callOpt.delta)\n return None\n\n def getVega(self) -> Optional[float]:\n \"\"\"Get the vega for the strangle.\n\n :return Vega of strangle or None if vegas don't exist for both options.\n \"\"\"\n if self.__putOpt.vega is not None and self.__callOpt.vega is not None:\n return self.__numContracts * (self.__putOpt.vega + self.__callOpt.vega)\n return None\n\n def getTheta(self) -> Optional[float]:\n \"\"\"Get the theta for the strangle.\n\n :return Theta of strange or None if thetas don't exist for both options.\n \"\"\"\n if self.__putOpt.theta is not None and self.__callOpt.theta is not None:\n return self.__numContracts * (self.__putOpt.theta + self.__callOpt.theta)\n return None\n\n def getGamma(self) -> Optional[float]:\n \"\"\"Get the gamma for the strangle.\n\n :return Gamma of strange or None if gammas don't exist for both options.\n \"\"\"\n if self.__putOpt.gamma is not None and self.__callOpt.gamma is not None:\n return self.__numContracts * (self.__putOpt.gamma + self.__callOpt.gamma)\n return None\n\n def setNumContracts(self, numContracts: int) -> None:\n \"\"\"Sets the number of contracts for the strangle primitive.\n :param numContracts: Number of strangle contracts we want to put on.\n \"\"\"\n self.__numContracts = numContracts\n\n def calcProfitLoss(self) -> decimal.Decimal:\n \"\"\"Calculate the profit and loss for the strangle position using option values when the trade\n was placed and new option values. Note that profit and loss are reversed if we buy or sell a put/call;\n if we buy a put/call, we want the option value to increase; if we sell a put/call, we want the option value\n to decrease.\n\n :return: Profit / loss (positive decimal for profit, negative decimal for loss).\n \"\"\"\n # Handle profit / loss for put first.\n putProfitLoss = self.__putOpt.calcOptionPriceDiff()\n callProfitLoss = self.__callOpt.calcOptionPriceDiff()\n\n # If we're buying the strangle, we have the opposite of the selling case.\n if self.__buyOrSell == optionPrimitive.TransactionType.BUY:\n putProfitLoss = -putProfitLoss\n callProfitLoss = -callProfitLoss\n\n # Add the profit / loss of put and call, and multiply by the number of contracts.\n totProfitLoss = (putProfitLoss + callProfitLoss) * self.__numContracts\n return totProfitLoss\n\n def calcProfitLossPercentage(self) -> float:\n \"\"\"Calculate the profit and loss for the strangle position as a percentage of the initial trade price.\n\n :return: Profit / loss as a percentage of the initial option prices. Returns negative percentage for a loss.\n \"\"\"\n # Add the profit / loss of put and call.\n totProfitLoss = self.calcProfitLoss()\n\n # Get the initial credit or debit paid for selling or buying the strangle, respectively.\n callCreditDebit = self.__callOpt.tradePrice\n putCreditDebit = self.__putOpt.tradePrice\n totCreditDebit = (callCreditDebit + putCreditDebit) * 100\n\n # Express totProfitLoss as a percentage.\n percentProfitLoss = (totProfitLoss / totCreditDebit) * 100\n return percentProfitLoss\n\n def getNumContracts(self) -> int:\n \"\"\"Returns the total number of strangles.\"\"\"\n return self.__numContracts\n\n def getBuyingPower(self) -> decimal.Decimal:\n \"\"\"The formula for calculating buying power is based off of TastyWorks. This is for cash settled indices!\n There are two possible methods to calculate buying power, and the method which generates the maximum possible\n buying power is the one chosen.\n\n :return: Amount of buying power required to put on the trade.\n \"\"\"\n # Method 1 - 25% rule -- 25% of the underlying, less the difference between the strike price and the stock\n # price, plus the option value, multiplied by number of contracts. Use one of the options to get underlying\n # price (call option used here).\n underlyingPrice = self.__callOpt.underlyingPrice\n\n # Handle call side of strangle.\n callBuyingPower1 = ((decimal.Decimal(0.25) * underlyingPrice)-(\n self.__callOpt.strikePrice - underlyingPrice) + self.__callOpt.tradePrice) * self.__numContracts * 100\n # Handle put side of strangle.\n putBuyingPower1 = ((decimal.Decimal(0.25) * underlyingPrice)-(\n underlyingPrice - self.__putOpt.strikePrice) + self.__putOpt.tradePrice) * self.__numContracts * 100\n methodOneBuyingPower = max(callBuyingPower1, putBuyingPower1)\n\n # Method 2 - 15% rule -- 15% of the exercise value plus premium value.\n # Handle call side of strangle.\n callBuyingPower2 = (decimal.Decimal(0.15) * self.__callOpt.strikePrice + self.__callOpt.tradePrice) * (\n self.__numContracts * 100)\n # Handle put side of strangle.\n putBuyingPower2 = (decimal.Decimal(0.15) * self.__putOpt.strikePrice + self.__putOpt.tradePrice) * (\n self.__numContracts * 100)\n methodTwoBuyingPower = max(callBuyingPower2, putBuyingPower2)\n\n return max(methodOneBuyingPower, methodTwoBuyingPower)\n\n def updateValues(self, tickData: Iterable[option.Option]) -> None:\n \"\"\"Based on the latest pricing data, update the option values for the strangle.\n\n :param tickData: option chain with pricing information (puts, calls)\n \"\"\"\n # Work with put option first.\n putOpt = self.__putOpt\n putOptSymbol = putOpt.optionSymbol\n putStrike = putOpt.strikePrice\n putExpiration = putOpt.expirationDateTime\n\n # Go through the tickData to find the PUT option with a strike price that matches the putStrike above.\n # Note that this should not return more than one option since we specify the strike price, expiration,\n # option type (PUT), and option symbol.\n # TODO: we can speed this up by indexing / keying the options by option symbol.\n matchingPutOption = None\n for currentOption in tickData:\n if (currentOption.strikePrice == putStrike and currentOption.expirationDateTime == putExpiration and (\n currentOption.optionSymbol == putOptSymbol)):\n matchingPutOption = currentOption\n break\n\n if not matchingPutOption:\n logging.warning(\"No matching PUT was found in the option chain for the strangle; cannot update strangle.\")\n return\n\n # Work with call option.\n callOpt = self.__callOpt\n callOptSymbol = callOpt.optionSymbol\n callStrike = callOpt.strikePrice\n callExpiration = callOpt.expirationDateTime\n\n # Go through the tickData to find the CALL option with a strike price that matches the callStrike above\n # Note that this should not return more than one option since we specify the strike price, expiration,\n # the option type (CALL), and option symbol.\n # TODO: we can speed this up by indexing / keying the options by option symbol.\n matchingCallOption = None\n for currentOption in tickData:\n if (currentOption.strikePrice == callStrike and currentOption.expirationDateTime == callExpiration and (\n currentOption.optionSymbol == callOptSymbol)):\n matchingCallOption = currentOption\n break\n\n if not matchingCallOption:\n logging.warning(\"No matching CALL was found in the option chain for the strangle; cannot update strangle.\")\n return\n\n # If we were able to find an update for both the put and call option, we update option intrinsics.\n if matchingCallOption and matchingPutOption:\n # Update option intrinsics\n putOpt.updateOption(matchingPutOption)\n callOpt.updateOption(matchingCallOption)\n\n def getNumberOfDaysLeft(self) -> int:\n \"\"\"\n Determine the number of days between the dateTime and the expirationDateTime.\n :return: number of days between curDateTime and expDateTime.\n \"\"\"\n # Since we require the put and call options to have the same dateTime and expirationDateTime, we can use either\n # option to get the number of days until expiration.\n putOpt = self.__putOpt\n currentDateTime = putOpt.dateTime\n expirationDateTime = putOpt.expirationDateTime\n return (expirationDateTime - currentDateTime).days\n", "id": "11524052", "language": "Python", "matching_score": 4.6457109451293945, "max_stars_count": 82, "path": "optionPrimitives/strangle.py" }, { "content": "import abc\nimport decimal\nimport enum\nfrom base import option\nfrom typing import Iterable\n\nclass TransactionType(enum.Enum):\n BUY = 0\n SELL = 1\n\nclass OptionPrimitive(abc.ABC):\n \"\"\"This class is a generic type for any primitive that can be made using a PUT or CALL option and/or stock,\n e.g., iron condor or strangle.\n \"\"\"\n\n @abc.abstractmethod\n def getBuyingPower(self) -> decimal.Decimal:\n \"\"\"Used to calculate the buying power needed for the option primitive.\"\"\"\n pass\n\n @abc.abstractmethod\n def getDelta(self) -> float:\n \"\"\"Used to get the delta for the option primitive.\"\"\"\n pass\n\n @abc.abstractmethod\n def getVega(self) -> float:\n \"\"\"Used to get the vega for the option primitive.\"\"\"\n pass\n\n @abc.abstractmethod\n def getTheta(self) -> float:\n \"\"\"Used to get the theta for the option primitive.\"\"\"\n pass\n\n @abc.abstractmethod\n def getGamma(self) -> float:\n \"\"\"Used to get the gamma for the option primitive.\"\"\"\n pass\n\n @abc.abstractmethod\n def calcProfitLoss(self) -> decimal.Decimal:\n \"\"\"Calculate the profit and loss for the option primitive based on option values when the trade was placed and new\n option values.\n\n :return: Profit / loss (positive decimal for profit, negative decimal for loss).\n \"\"\"\n pass\n\n @abc.abstractmethod\n def calcProfitLossPercentage(self) -> float:\n \"\"\"Calculate the profit and loss for the option primitive based on option values when the trade was placed and new\n option values.\n\n :return: Profit / loss as a percentage of the initial option prices. Returns negative percentage for a loss.\n \"\"\"\n pass\n\n @abc.abstractmethod\n def updateValues(self, tickData: Iterable[option.Option]) -> bool:\n \"\"\"Based on the latest pricing data, update the option values.\n :param tickData: option chain with pricing information.\n :return True if we were able to update values, false otherwise.\n \"\"\"\n pass\n", "id": "5047968", "language": "Python", "matching_score": 1.5642471313476562, "max_stars_count": 82, "path": "optionPrimitives/optionPrimitive.py" }, { "content": "import unittest\nimport pytz\nimport queue\nfrom datetime import datetime\nfrom dataHandler import csvData\nfrom events import tickEvent\nfrom optionPrimitives import optionPrimitive\nfrom riskManagement import strangleRiskManagement\nfrom strategyManager import strangleStrat\n\nclass TestStrangleStrategy(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Create instance of strangle strategy\n Strangle specific attributes:\n optCallDelta: Optimal delta for call, usually around 16 delta.\n maxCallDelta: Max delta for call, usually around 30 delta.\n optPutDelta: Optimal delta for put, usually around 16 delta.\n maxPutDelta: Max delta for put, usually around 30 delta.\n\n General strategy attributes:\n startDateTime: Date/time to start the live trading or backtest.\n strategy: Option strategy to use -- e.g., iron condor, strangle\n buyOrSell: Do we buy an iron condor or sell an iron condor? 0 = buy, 1 = sell.\n underlying: Which underlying to use for the strategy.\n orderQuantity: Number of strangles, iron condors, etc.\n daysBeforeClose: Number of days before expiration to close the trade.\n\n Optional attributes:\n expCycle: Specifies if we want to do monthly ('m'); unspecified means we can do weekly, quarterly, etc.\n optimalDTE: Optimal number of days before expiration to put on strategy.\n minimumDTE: Minimum number of days before expiration to put on strategy.\n roc: Minimal return on capital for overall trade as a decimal.\n minDaysToEarnings: Minimum number of days to put on trade before earnings.\n minCredit: Minimum credit to collect on overall trade.\n maxBuyingPower: Maximum buying power to use on overall trade.\n profitTargetPercent: Percentage of initial credit to use when closing trade.\n maxBidAsk: Maximum price to allow between bid and ask prices of option (for any strike or put/call).\n maxMidDev: Maximum deviation from midprice on opening and closing of trade (e.g., 0.02 cents from midprice).\n minDaysSinceEarnings: Minimum number of days to wait after last earnings before putting on strategy.\n minIVR: Minimum implied volatility rank needed to put on strategy.\n \"\"\"\n # Use CSV data source to test.\n tickEventQueue = queue.Queue()\n dataProvider = 'iVolatility'\n filename = '/Users/msantoro/PycharmProjects/Backtester/sampleData/aapl_sample_ivolatility.csv'\n csvObj = csvData.CsvData(csvPath=filename, dataProvider=dataProvider, eventQueue=tickEventQueue)\n csvObj.getNextTick()\n self.optionChain = tickEventQueue.get()\n\n # Create strangle strategy object.\n self.signalEventQueue = queue.Queue()\n self.optCallDelta = 0.16\n self.maxCallDelta = 0.30\n self.optPutDelta = -0.16\n self.maxPutDelta = -0.30\n self.startDateTime = datetime.now(pytz.utc)\n self.buyOrSell = optionPrimitive.TransactionType.SELL\n self.underlyingTicker = 'AAPL'\n self.orderQuantity = 1\n self.riskManagement = strangleRiskManagement.StrangleRiskManagement(\n strangleRiskManagement.StrangleManagementStrategyTypes.HOLD_TO_EXPIRATION)\n self.expCycle = strangleStrat.strategy.ExpirationTypes.MONTHLY\n self.optimalDTE = 45\n self.minimumDTE = 25\n self.minimumROC = 0.001\n self.minCredit = 0.5\n self.maxBidAsk = 0.15\n self.minBuyingPower = None\n self.curStrategy = strangleStrat.StrangleStrat(self.signalEventQueue, self.optCallDelta, self.maxCallDelta,\n self.optPutDelta, self.maxPutDelta, self.startDateTime,\n self.buyOrSell, self.underlyingTicker, self.orderQuantity,\n self.riskManagement, self.expCycle, self.optimalDTE, self.minimumDTE,\n self.minimumROC, self.minCredit, self.maxBidAsk, self.minBuyingPower)\n\n\n def testUpdateWithOptimalOptionNonSupportedExpiration(self):\n \"\"\"Tests that no signal event is created if we choose an unsupported expiration.\"\"\"\n expCycle = strangleStrat.strategy.ExpirationTypes.QUARTERLY\n curStrategy = strangleStrat.StrangleStrat(self.signalEventQueue, self.optCallDelta, self.maxCallDelta,\n self.optPutDelta, self.maxPutDelta, self.startDateTime,\n self.buyOrSell, self.underlyingTicker, self.orderQuantity,\n expCycle, self.optimalDTE, self.minimumDTE, self.minimumROC,\n self.minCredit, self.maxBidAsk, self.minBuyingPower)\n curStrategy.checkForSignal(self.optionChain)\n self.assertEqual(self.signalEventQueue.qsize(), 0)\n\n def testUpdateWithOptimalOptionNotMonthlyExpiration(self):\n \"\"\"Tests that no signal event is created if we do not have a monthly expiration.\"\"\"\n # These options do not have a monthly expiration.\n testOptionChain = [self.optionChain.getData()[0], self.optionChain.getData()[1]]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n self.assertEqual(self.signalEventQueue.qsize(), 0)\n\n def testUpdateWithOptimalOptionDTELessThanMinimum(self):\n \"\"\"Tests that no signal event is created if the number of days to expiration is less than minimum.\"\"\"\n callOption = self.optionChain.getData()[0]\n putOption = self.optionChain.getData()[1]\n # Modify expiration to be a monthly expiration, but set the number of days such that it is less than\n # self.minimumDTE.\n callOption.expirationDateTime = datetime.fromisoformat('2014-08-15')\n putOption.expirationDateTime = datetime.fromisoformat('2014-08-15')\n testOptionChain = [callOption, putOption]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n self.assertEqual(self.signalEventQueue.qsize(), 0)\n\n def testUpdateWithOptimalOptionDeltaGreaterThanMaxCallDelta(self):\n \"\"\"Tests that no signal event is created if the call delta is greater than the max delta.\"\"\"\n callOption = self.optionChain.getData()[0]\n putOption = self.optionChain.getData()[1]\n # Set expiration to be monthly and less than self.minimumDTE.\n callOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n # Modify delta of call option to be greater than max delta.\n callOption.delta = self.maxCallDelta*2\n putOption.delta = self.optPutDelta\n testOptionChain = [callOption, putOption]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n self.assertEqual(self.signalEventQueue.qsize(), 0)\n\n def testUpdateWithOptimalOptionDeltaGreaterThanMaxPutDelta(self):\n \"\"\"Tests that no signal event is created if the put delta is greater than the max delta.\"\"\"\n callOption = self.optionChain.getData()[0]\n putOption = self.optionChain.getData()[1]\n # Set expiration to be monthly and less than self.minimumDTE.\n callOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n # Modify delta of put option to be greater than max delta.\n putOption.delta = self.maxPutDelta * 2\n callOption.delta = self.optCallDelta\n testOptionChain = [callOption, putOption]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n self.assertEqual(self.signalEventQueue.qsize(), 0)\n\n def testUpdateWithOptimalOptionBidAskDiffGreaterThanMax(self):\n \"\"\"Tests that no signal event is created if the bid/ask difference is greater than the max bid/ask.\"\"\"\n callOption = self.optionChain.getData()[0]\n putOption = self.optionChain.getData()[1]\n # Set expiration to be monthly and less than self.minimumDTE.\n callOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n # Set put and call delta to be the desired values.\n putOption.delta = self.optPutDelta\n callOption.delta = self.optCallDelta\n # Set the bidPrice and askPrice such that the difference is greater than self.maxBidAsk.\n putOption.bidPrice = 0.00\n putOption.askPrice = self.maxBidAsk*2\n callOption.bidPrice = 0.00\n callOption.askPrice = self.maxBidAsk\n testOptionChain = [callOption, putOption]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n self.assertEqual(self.signalEventQueue.qsize(), 0)\n\n def testUpdateWithOptimalOptionChooseCloserExpiration(self):\n \"\"\"Tests that we choose the option with the expiration date closer to self.optimalDTE.\"\"\"\n callOptionNonOptimalDTE = self.optionChain.getData()[0]\n callOptionOptimalDTE = self.optionChain.getData()[2]\n putOptionNonOptimalDTE = self.optionChain.getData()[1]\n putOptionOptimalDTE = self.optionChain.getData()[3]\n\n # Set expiration to be monthly and less than self.minimumDTE.\n callOptionNonOptimalDTE.expirationDateTime = datetime.fromisoformat('2014-10-17')\n callOptionOptimalDTE.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOptionNonOptimalDTE.expirationDateTime = datetime.fromisoformat('2014-10-17')\n putOptionOptimalDTE.expirationDateTime = datetime.fromisoformat('2014-09-19')\n\n # Set put and call delta. We use these delta values to check that the options with the optimal DTE\n # were chosen.\n callOptionNonOptimalDTE.delta = self.optCallDelta\n callOptionOptimalDTE.delta = 0.20\n putOptionNonOptimalDTE.delta = self.optPutDelta\n putOptionOptimalDTE.delta = -0.10\n\n # Set the bidPrice and askPrice such that the difference is less than self.maxBidAsk.\n callOptionNonOptimalDTE.bidPrice = 0.00\n callOptionNonOptimalDTE.askPrice = self.maxBidAsk\n callOptionOptimalDTE.bidPrice = 0.00\n callOptionOptimalDTE.askPrice = self.maxBidAsk\n putOptionNonOptimalDTE.bidPrice = 0.00\n putOptionNonOptimalDTE.askPrice = self.maxBidAsk\n putOptionOptimalDTE.bidPrice = 0.00\n putOptionOptimalDTE.askPrice = self.maxBidAsk\n\n testOptionChain = [callOptionNonOptimalDTE, putOptionNonOptimalDTE, callOptionOptimalDTE, putOptionOptimalDTE]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n strangleObj = self.signalEventQueue.get().getData()[0]\n self.assertAlmostEqual(strangleObj.getDelta(), callOptionOptimalDTE.delta + putOptionOptimalDTE.delta)\n\n def testUpdateWithOptimalOptionChooseCloserDelta(self):\n \"\"\"Tests that if options have the same DTE, chose option with the delta closer to requested delta.\"\"\"\n callOptionNonOptimalDelta = self.optionChain.getData()[0]\n callOptionOptimalDelta = self.optionChain.getData()[2]\n putOptionNonOptimalDelta = self.optionChain.getData()[1]\n putOptionOptimalDelta = self.optionChain.getData()[3]\n\n # Set expiration to be the same, monthly, and less than self.minimumDTE.\n callOptionNonOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n callOptionOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOptionNonOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOptionOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n\n # Set put and call delta. We use these delta values to check that the options with the optimal DTE\n # were chosen.\n callOptionNonOptimalDelta.delta = 0.20\n callOptionOptimalDelta.delta = self.optCallDelta\n putOptionNonOptimalDelta.delta = -0.10\n putOptionOptimalDelta.delta = self.optPutDelta\n\n # Set the bidPrice and askPrice such that the difference is less than self.maxBidAsk.\n callOptionNonOptimalDelta.bidPrice = 0.00\n callOptionNonOptimalDelta.askPrice = self.maxBidAsk\n callOptionOptimalDelta.bidPrice = 0.00\n callOptionOptimalDelta.askPrice = self.maxBidAsk\n putOptionNonOptimalDelta.bidPrice = 0.00\n putOptionNonOptimalDelta.askPrice = self.maxBidAsk\n putOptionOptimalDelta.bidPrice = 0.00\n putOptionOptimalDelta.askPrice = self.maxBidAsk\n\n testOptionChain = [callOptionNonOptimalDelta, putOptionNonOptimalDelta, callOptionOptimalDelta, putOptionOptimalDelta]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n strangleObj = self.signalEventQueue.get().getData()[0]\n self.assertAlmostEqual(strangleObj.getDelta(), callOptionOptimalDelta.delta + putOptionOptimalDelta.delta)\n\n def testUpdateWithOptimalOptionCurrentOptionHasFurtherDTE(self):\n \"\"\"Tests second put[3] and call[2] options are not chosen as the optimal options because their deltas are further\n from the requested delta. All put and call options have the same expiration.\"\"\"\n callOptionOptimalDelta = self.optionChain.getData()[0]\n callOptionNonOptimalDelta = self.optionChain.getData()[2]\n putOptionOptimalDelta = self.optionChain.getData()[1]\n putOptionNonOptimalDelta = self.optionChain.getData()[3]\n\n # Set expiration to be the same, monthly, and less than self.minimumDTE.\n callOptionOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n callOptionNonOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOptionOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOptionNonOptimalDelta.expirationDateTime = datetime.fromisoformat('2014-09-19')\n\n # Set put and call delta. We use these delta values to check that the options with the optimal DTE\n # were chosen.\n callOptionOptimalDelta.delta = self.optCallDelta\n callOptionNonOptimalDelta.delta = 0.05\n putOptionOptimalDelta.delta = self.optPutDelta\n putOptionNonOptimalDelta.delta = -0.1\n\n # Set the bidPrice and askPrice such that the difference is less than self.maxBidAsk.\n callOptionOptimalDelta.bidPrice = 0.00\n callOptionOptimalDelta.askPrice = self.maxBidAsk\n callOptionNonOptimalDelta.bidPrice = 0.00\n callOptionNonOptimalDelta.askPrice = self.maxBidAsk\n putOptionOptimalDelta.bidPrice = 0.00\n putOptionOptimalDelta.askPrice = self.maxBidAsk\n putOptionNonOptimalDelta.bidPrice = 0.00\n putOptionNonOptimalDelta.askPrice = self.maxBidAsk\n\n testOptionChain = [callOptionOptimalDelta, putOptionOptimalDelta, callOptionNonOptimalDelta,\n putOptionNonOptimalDelta]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n strangleObj = self.signalEventQueue.get().getData()[0]\n self.assertAlmostEqual(strangleObj.getDelta(), callOptionOptimalDelta.delta + putOptionOptimalDelta.delta)\n\n def testCheckForSignalCallAndPutWithDifferentExpirations(self):\n \"\"\"Tests that no signal event is created if put and call options have different expirations.\"\"\"\n callOption = self.optionChain.getData()[0]\n putOption = self.optionChain.getData()[1]\n # Set expiration to be monthly and less than self.minimumDTE.\n callOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOption.expirationDateTime = datetime.fromisoformat('2014-10-17')\n # Set put and call delta to be the desired values.\n putOption.delta = self.optPutDelta\n callOption.delta = self.optCallDelta\n # Set the bidPrice and askPrice such that the difference is less than self.maxBidAsk.\n putOption.bidPrice = 0.00\n putOption.askPrice = self.maxBidAsk\n callOption.bidPrice = 0.00\n callOption.askPrice = self.maxBidAsk\n testOptionChain = [callOption, putOption]\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n self.curStrategy.checkForSignal(event)\n self.assertEqual(self.signalEventQueue.qsize(), 0)\n\n def testCheckForSignalSetMinBuyingPowerToForceMoreContracts(self):\n \"\"\"Checks that more than one strangle is created if minBuyingPower is set.\"\"\"\n callOption = self.optionChain.getData()[0]\n putOption = self.optionChain.getData()[1]\n # Set expiration to be monthly and less than self.minimumDTE.\n callOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n putOption.expirationDateTime = datetime.fromisoformat('2014-09-19')\n # Set put and call delta to be the desired values.\n putOption.delta = self.optPutDelta\n callOption.delta = self.optCallDelta\n # Set the bidPrice and askPrice such that the difference is less than self.maxBidAsk.\n putOption.bidPrice = 0.00\n putOption.askPrice = self.maxBidAsk\n callOption.bidPrice = 0.00\n callOption.askPrice = self.maxBidAsk\n testOptionChain = [callOption, putOption]\n\n # Set minimum buying power to force more strangles to be added.\n minBuyingPower = 20600 # two strangles in 'AAPL' for test data.\n curStrategy = strangleStrat.StrangleStrat(self.signalEventQueue, self.optCallDelta, self.maxCallDelta,\n self.optPutDelta, self.maxPutDelta, self.startDateTime,\n self.buyOrSell, self.underlyingTicker, self.orderQuantity,\n self.riskManagement, self.expCycle, self.optimalDTE, self.minimumDTE,\n self.minimumROC, self.minCredit, self.maxBidAsk, minBuyingPower)\n\n event = tickEvent.TickEvent()\n event.createEvent(testOptionChain)\n curStrategy.checkForSignal(event)\n strangleObj = self.signalEventQueue.get().getData()[0]\n self.assertEqual(strangleObj.getNumContracts(), 2)\n #self.assertEqual(self.signalEventQueue.qsize(), 0)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "696935", "language": "Python", "matching_score": 6.391848087310791, "max_stars_count": 82, "path": "strategyManager/strangleStratTest.py" }, { "content": "import decimal\nimport queue\nfrom dataHandler import csvData\nfrom events import event as event_class\nfrom optionPrimitives import optionPrimitive\nfrom riskManagement import strangleRiskManagement\nfrom strategyManager import strategy, strangleStrat\nfrom portfolioManager import portfolio\nfrom datetime import datetime\nimport pytz\nimport logging\n\n\"\"\"\nThis file contains a basic strategy example, and can be thought of \nas an end-to-end test of the whole Backtester project.\nIn this example, we actually backtest a strategy and do not use\nthe suite for live trading. Live trading is currently not supported.\n\"\"\"\n\nclass BackTestSession(object):\n \"\"\"Class for holding all parameters of backtesting session.\"\"\"\n\n def __init__(self):\n # Create queue to hold events (ticks, signals, etc.).\n self.eventQueue = queue.Queue()\n\n # Create CsvData class object.\n dataProvider = 'iVolatility'\n filename = '/Users/msantoro/PycharmProjects/Backtester/marketData/iVolatility/SPX/combinedCSV.csv'\n self.dataHandler = csvData.CsvData(csvPath=filename, dataProvider=dataProvider, eventQueue=self.eventQueue)\n\n # Parameters for strangle strategy -- TODO: move params to file.\n optCallDelta = 0.16\n maxCallDelta = 0.30\n optPutDelta = -0.16\n maxPutDelta = -0.30\n startDateTime = datetime.now(pytz.utc)\n buyOrSell = optionPrimitive.TransactionType.SELL\n underlyingTicker = 'SPX'\n orderQuantity = 1\n expCycle = strategy.ExpirationTypes.MONTHLY\n optimalDTE = 45\n minimumDTE = 25\n minCredit = 0.5\n maxBidAsk = 15 # A general rule of thumb is to take 0.001*underlyingPrice. Set to 15 to mostly ignore field.\n startingCapital = decimal.Decimal(1000000)\n maxCapitalToUse = 0.5 # Up to 50% of net liq can be used in trades.\n maxCapitalToUsePerTrade = 0.10 # 10% max capital to use per trade / strategy.\n minBuyingPower = decimal.Decimal(maxCapitalToUsePerTrade)*startingCapital\n\n # Set up strategy (strangle strategy) and risk management preference.\n riskManagement = strangleRiskManagement.StrangleRiskManagement(\n strangleRiskManagement.StrangleManagementStrategyTypes.HOLD_TO_EXPIRATION)\n self.strategyManager = strangleStrat.StrangleStrat(self.eventQueue, optCallDelta, maxCallDelta, optPutDelta,\n maxPutDelta, startDateTime, buyOrSell, underlyingTicker,\n orderQuantity, riskManagement, expCycle, optimalDTE,\n minimumDTE, minCredit=minCredit, maxBidAsk=maxBidAsk,\n minBuyingPower=minBuyingPower)\n # Set up portfolio.\n self.portfolioManager = portfolio.Portfolio(startingCapital, maxCapitalToUse, maxCapitalToUsePerTrade)\n\ndef run(session):\n while (1): #Infinite loop to keep processing items in queue.\n try:\n event = session.eventQueue.get(False)\n except queue.Empty:\n #Get data for tick event.\n if not session.dataHandler.getNextTick():\n # Get out of infinite while loop; no more data available.\n break\n else:\n if event is not None:\n if event.type == event_class.EventTypes.TICK:\n session.strategyManager.checkForSignal(event)\n session.portfolioManager.updatePortfolio(event)\n elif event.type == event_class.EventTypes.SIGNAL:\n session.portfolioManager.onSignal(event)\n else:\n raise NotImplemented(\"Unsupported event.type '%s'.\" % event.type)\n\nif __name__ == \"__main__\":\n # Set up logging for the session.\n logging.basicConfig(filename='session.log', level=logging.DEBUG)\n\n # Create a session and configure the session.\n session = BackTestSession()\n\n # Run the session.\n run(session)", "id": "2091727", "language": "Python", "matching_score": 6.193804740905762, "max_stars_count": 82, "path": "backTester.py" }, { "content": "from strategyManager import strategy\nfrom events import tickEvent, signalEvent\nfrom optionPrimitives import optionPrimitive, strangle\nfrom base import option\nfrom riskManagement import riskManagement\nfrom typing import Optional, Text, Tuple\nimport datetime\nimport decimal\nimport queue\n\nclass StrangleStrat(strategy.Strategy):\n \"\"\"This class sets up strangle strategy, which involves buying or selling strangles.\n\n Strangle specific attributes:\n optCallDelta: Optimal delta for call.\n maxCallDelta: Max delta for call.\n optPutDelta: Optimal delta for put.\n maxPutDelta: Max delta for put.\n\n General strategy attributes:\n startDateTime: Date/time to start the live trading or backtest.\n buyOrSell: Do we buy a strangle or sell a strangle.\n underlyingTicker: Which underlying to use for the strategy.\n orderQuantity: Number of strangles.\n riskManagement: Risk management strategy (how to manage the trade; e.g., close, roll, hold to expiration).\n\n Optional attributes:\n expCycle: Specifies if we want to do monthly, weekly, quarterly, etc.\n optimalDTE: Optimal number of days before expiration to put on strategy.\n minimumDTE: Minimum number of days before expiration to put on strategy.\n minimumROC: Minimal return on capital for overall trade as a decimal.\n minCredit: Minimum credit to collect on overall trade.\n maxBidAsk: Maximum price to allow between bid and ask prices of option (for any strike or put/call).\n minBuyingPower: Minimum investment we want for the strategy -- since prices vary greatly over a range like\n 1990 to 2017, we would like to have the same amount of money in the market at any given\n time, so we increase the number of contracts to reach this minBuyingPower.\n\"\"\"\n\n def __init__(self, eventQueue:queue.Queue, optCallDelta: float, maxCallDelta: float, optPutDelta: float,\n maxPutDelta: float, startDateTime: datetime.datetime, buyOrSell: optionPrimitive.TransactionType,\n underlyingTicker: Text, orderQuantity: int, riskManagement: riskManagement.RiskManagement,\n expCycle: Optional[strategy.ExpirationTypes]=None, optimalDTE: Optional[int]=None,\n minimumDTE: Optional[int]=None, minimumROC: Optional[float]=None,\n minCredit: Optional[decimal.Decimal]=None, maxBidAsk: Optional[decimal.Decimal]=None,\n minBuyingPower: Optional[decimal.Decimal]=None):\n\n self.__eventQueue = eventQueue\n self.__optCallDelta = optCallDelta\n self.__maxCallDelta = maxCallDelta\n self.__optPutDelta = optPutDelta\n self.__maxPutDelta = maxPutDelta\n\n self.startDateTime=startDateTime\n self.buyOrSell=buyOrSell\n self.underlyingTicker=underlyingTicker\n self.orderQuantity=orderQuantity\n self.riskManagement = riskManagement\n self.expCycle=expCycle\n self.optimalDTE=optimalDTE\n self.minimumDTE=minimumDTE\n self.minimumROC=minimumROC\n self.minCredit=minCredit\n self.maxBidAsk=maxBidAsk\n self.minBuyingPower=minBuyingPower\n\n def __updateWithOptimalOption(self, currentOption: option.Option,\n optimalOption: option.Option) -> Tuple[bool, option.Option]:\n \"\"\"Find the option that is closest to the requested parameters (delta, expiration).\n\n :param currentOption: current option from the option chain.\n :param optimalOption: current optimal option based on expiration and delta.\n :return: tuple of (updateOption: bool, optimalOpt: option.Option). updateOption bool is used to indicate if we\n should update the optimal option with the current option.\n \"\"\"\n # noUpdateRule means we don't update the optimal option with the current option.\n noUpdateRule = (False, optimalOption)\n # TODO: Add support for expiration cycles other than monthly.\n if self.expCycle == strategy.ExpirationTypes.MONTHLY:\n if not self.__isMonthlyExp(currentOption.expirationDateTime):\n return noUpdateRule\n else:\n return noUpdateRule\n\n if self.minimumDTE:\n if not self.__hasMinimumDTE(currentOption.dateTime, currentOption.expirationDateTime):\n return noUpdateRule\n\n # Check that delta is less or equal to max delta specified.\n if currentOption.optionType == option.OptionTypes.CALL:\n if currentOption.delta >= self.__maxCallDelta:\n return noUpdateRule\n else:\n # PUT option.\n if currentOption.delta <= self.__maxPutDelta:\n return noUpdateRule\n\n # Check if bid / ask of option < maxBidAsk specific in strangle strategy.\n if self.maxBidAsk:\n if self.__calcBidAskDiff(currentOption.bidPrice, currentOption.askPrice) > self.maxBidAsk:\n return noUpdateRule\n\n # Get current DTE in days.\n currentDTE = self.__getNumDays(currentOption.dateTime, currentOption.expirationDateTime)\n optimalDTE = self.__getNumDays(optimalOption.dateTime, optimalOption.expirationDateTime) if optimalOption else None\n requestedDTE = self.optimalDTE\n\n # Check if there is no current optimal DTE or an expiration closer to the requested expiration.\n newOptimalOption = optimalOption\n if optimalDTE is None or (abs(currentDTE - requestedDTE) < abs(optimalDTE - requestedDTE)):\n newOptimalOption = currentOption\n # Option has same DTE as optimalOpt; check deltas to choose best option.\n elif currentDTE == optimalDTE:\n currentDelta = currentOption.delta\n optimalDelta = optimalOption.delta\n if currentOption.optionType == option.OptionTypes.CALL:\n requestedDelta = self.__optCallDelta\n else:\n requestedDelta = self.__optPutDelta\n\n if abs(currentDelta - requestedDelta) < abs(optimalDelta - requestedDelta):\n newOptimalOption = currentOption\n else:\n return (False, newOptimalOption)\n\n return (True, newOptimalOption)\n\n def checkForSignal(self, event: tickEvent) -> None:\n \"\"\"Criteria that we need to check before generating a signal event.\n We go through each option in the option chain and find all of the options that meet the criteria. If there are\n multiple options that meet the criteria, we choose the first one, but we could use some other type of rule.\n\n Attributes:\n event - Tick data we parse through to determine if we want to create a strangle for the strategy.\n \"\"\"\n # These variables will be used to keep track of the optimal options as we go through the option chain.\n optimalCallOpt = None\n optimalPutOpt = None\n\n # Get the data from the tick event.\n eventData = event.getData()\n\n # Process one option at a time from the option chain (objects of option class).\n for currentOption in eventData:\n if currentOption.optionType == option.OptionTypes.CALL:\n updateOption, callOpt = self.__updateWithOptimalOption(currentOption, optimalCallOpt)\n if updateOption:\n optimalCallOpt = callOpt\n else:\n # PUT option\n updateOption, putOpt = self.__updateWithOptimalOption(currentOption, optimalPutOpt)\n if updateOption:\n optimalPutOpt = putOpt\n\n # Must check that both a CALL and PUT were found which meet criteria and are in the same expiration.\n if optimalPutOpt and optimalCallOpt and optimalPutOpt.expirationDateTime == optimalCallOpt.expirationDateTime:\n strangleObj = strangle.Strangle(self.orderQuantity, optimalCallOpt, optimalPutOpt, self.buyOrSell)\n\n # If we are requiring that we always have the same amount of money invested regardless of time frame,\n # then we may need to increase the number of strangles to meet this minBuyingPower requirement.\n minBuyingPower = self.minBuyingPower\n if minBuyingPower:\n buyingPowerUsed = strangleObj.getBuyingPower()\n # Require at least one contract; too much buying power will be rejected in the portfolio class.\n numContractsToAdd = max(1, int(minBuyingPower / buyingPowerUsed))\n strangleObj.setNumContracts(numContractsToAdd)\n\n # Create signal event to put on strangle strategy and add to queue\n # TODO: We need to pass the management strategy to createEvent below.\n signalObj = [strangleObj, self.riskManagement]\n event = signalEvent.SignalEvent()\n event.createEvent(signalObj)\n #event.createEvent(strangleObj)\n self.__eventQueue.put(event)\n\n def __calcBidAskDiff(self, bidPrice: decimal.Decimal, askPrice: decimal.Decimal):\n \"\"\" Calculate the absolute difference between the bid and ask price.\n If any of the arguments are <= 0, return a very large difference (100).\n :param bidPrice: price at which the option can be sold.\n :param askPrice: price at which the option can be bought.\n :return: Absolute difference;\n \"\"\"\n return abs(bidPrice - askPrice)\n\n def __isMonthlyExp(self, dateTime: datetime.datetime):\n \"\"\"\n Check if the option expiration falls on the third Friday of the month, or if the third Friday is a holiday,\n check if the expiration falls on the Thursday that precedes it.\n :param dateTime: option expiration date in mm/dd/yy format.\n :return: True if it's a monthly option; False otherwise.\n \"\"\"\n return (dateTime.weekday() == 4 and 14 < dateTime.day < 22)\n\n def __hasMinimumDTE(self, curDateTime: datetime.datetime, expDateTime: datetime.datetime):\n \"\"\"\"\n Determine if the current expiration date of the option is >= self.minimumDTE days from the current date.\n :param curDateTime: current date in mm/dd/yy format.\n :param expDateTime: option expiration date in mm/dd/yy format.\n :return: True if difference between current date and dateTime is >= self.minimumDTE; else False.\n \"\"\"\n return (expDateTime - curDateTime).days >= self.minimumDTE\n\n def __getNumDays(self, curDateTime: datetime.datetime, expDateTime: datetime.datetime):\n \"\"\"\"\n Determine the number of days between the curDateTime and the expDateTime.\n :param curDateTime: current date in mm/dd/yy format.\n :param expDateTime: option expiration date in mm/dd/yy format.\n :return: Number of days between curDateTime and expDateTime.\n \"\"\"\n return (expDateTime - curDateTime).days", "id": "8929929", "language": "Python", "matching_score": 8.620586395263672, "max_stars_count": 82, "path": "strategyManager/StrangleStrat.py" }, { "content": "import abc\nimport dataclasses\nimport datetime\nimport decimal\nimport enum\nfrom events import tickEvent\nfrom optionPrimitives import optionPrimitive\nfrom typing import Optional, Text\n\nclass ExpirationTypes(enum.Enum):\n MONTHLY = 0\n WEEKLY = 1\n QUARTERLY = 2\n\[email protected]\nclass Strategy(abc.ABC):\n \"\"\"This class sets up the basics for every strategy that will be used; For example, if we want to do an iron condor\n or a strangle, there are certain parameters that must be defined.\n\n Attributes:\n startDateTime: Date/time to start the backtest.\n # TODO(add risk management lookup code here so we know how to manage this strategy. The code we create here will\n need to go in the portfolio as well.\n buyOrSell: Do we buy or sell the strategy? E.g. sell a strangle.\n underlyingTicker: Which underlying to use for the strategy.\n orderQuantity: Number of the strategy, e.g. number of strangles.\n expCycle: Specifies if we want to do monthly, weekly, quarterly, etc.\n optimalDTE: Optimal number of days before expiration to put on strategy.\n minimumDTE: Minimum number of days before expiration to put on strategy.\n minimumROC: Minimum return on capital for overall trade as a decimal.\n minCredit: Minimum credit to collect on overall trade.\n maxBidAsk: Maximum price to allow between bid and ask prices of option (for any strike or put/call).\n minBuyingPower: Minimum investment we want for the strategy -- since prices vary greatly over a range like\n 1990 to 2017, we would like to have the same amount of money in the market at any given\n time, so we increase the number of contracts to reach this minBuyingPower.\n \"\"\"\n\n startDateTime: datetime.datetime\n buyOrSell: optionPrimitive.TransactionType\n underlyingTicker: Text\n orderQuantity: int\n expCycle: Optional[ExpirationTypes] = None\n optimalDTE: Optional[int] = None\n minimumDTE: Optional[int] = None\n minimumROC: Optional[float] = None\n minCredit: Optional[decimal.Decimal] = None\n maxBidAsk: Optional[decimal.Decimal] = None\n minBuyingPower: Optional[decimal.Decimal] = None\n\n def __post_init__(self):\n if self.__class__ == Strategy:\n raise TypeError('Cannot instantiate abstract class.')\n\n @abc.abstractmethod\n def checkForSignal(self, event: tickEvent) -> None:\n pass\n", "id": "7332112", "language": "Python", "matching_score": 4.285778522491455, "max_stars_count": 82, "path": "strategyManager/strategy.py" }, { "content": "import datetime\nimport unittest\nfrom optionPrimitives import optionPrimitive\nfrom strategyManager import strategy\n\nclass TestStrategyClass(unittest.TestCase):\n\n def testStrategyClassCreation(self):\n \"\"\"Tests than an exception is raised when class is instantiated.\"\"\"\n with self.assertRaisesRegex(TypeError, 'Can\\'t instantiate abstract class Strategy with abstract methods'\n ' checkForSignal'):\n strategy.Strategy(startDateTime=datetime.datetime.now(), buyOrSell=optionPrimitive.TransactionType.SELL,\n underlyingTicker='SPY', orderQuantity=1)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "9563299", "language": "Python", "matching_score": 5.154565811157227, "max_stars_count": 82, "path": "strategyManager/strategyTest.py" }, { "content": "import datetime\nimport unittest\nfrom base import option\n\nclass TestOptionsClass(unittest.TestCase):\n\n def testOptionClassCreation(self):\n \"\"\"Tests than an exception is raised when class is instantiated.\"\"\"\n with self.assertRaisesRegex(TypeError, \"Cannot instantiate abstract class.\"):\n option.Option(underlyingTicker='SPY', strikePrice=250, delta=0.3, expirationDateTime=datetime.datetime.now())\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "2855196", "language": "Python", "matching_score": 2.7358202934265137, "max_stars_count": 82, "path": "base/optionTest.py" }, { "content": "import datetime\nimport decimal\nimport unittest\nfrom base import put\n\nclass TestPutOption(unittest.TestCase):\n def setUp(self):\n self._putOptionToTest = put.Put(underlyingTicker='SPY', strikePrice=decimal.Decimal(250), delta=0.3,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/01/2050', \"%m/%d/%Y\"),\n bidPrice=decimal.Decimal(1.50), askPrice=decimal.Decimal(1.00),\n tradePrice=decimal.Decimal(3.00))\n\n def testCalcOptionPriceDiff(self):\n \"\"\"Tests that the difference between current price and trade price is calculated correctly.\"\"\"\n expectedPriceDiff = 175\n self.assertEqual(self._putOptionToTest.calcOptionPriceDiff(), expectedPriceDiff)\n\n def testNumberDaysUntilExpiration(self):\n \"\"\"Tests that the number of days to expiration is computed correctly.\"\"\"\n expectedDays = 10592\n self.assertEqual(self._putOptionToTest.getNumDaysLeft(), expectedDays)\n\n def testUpdateOptionSuccess(self):\n \"\"\"Tests that option values are successfully updated with latest data.\"\"\"\n updatedPut = put.Put(underlyingTicker='SPY', strikePrice=250, delta=0.3,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/01/2050', \"%m/%d/%Y\"), bidPrice=0.50,\n askPrice=0.75, tradePrice=3.00)\n self._putOptionToTest.updateOption(updatedPut)\n self.assertEqual(self._putOptionToTest.bidPrice, 0.50)\n self.assertEqual(self._putOptionToTest.askPrice, 0.75)\n\n def testUpdateOptionInvalidOptionStrikePrice(self):\n \"\"\"Tests that error is raised if we update an option with different parameters (wrong strike price).\"\"\"\n updatedPut = put.Put(underlyingTicker='SPY', strikePrice=255, delta=0.3,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/01/2050', \"%m/%d/%Y\"), bidPrice=0.50,\n askPrice=0.75, tradePrice=3.00)\n with self.assertRaisesRegex(ValueError, ('Cannot update option; this option appears to be from a different option '\n 'chain.')):\n self._putOptionToTest.updateOption(updatedPut)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "10356102", "language": "Python", "matching_score": 4.200516223907471, "max_stars_count": 82, "path": "base/putTest.py" }, { "content": "import datetime\nimport unittest\nfrom base import call\nfrom base import option\n\nclass TestCallOption(unittest.TestCase):\n def testCallOptionCreation(self):\n \"\"\"Tests that a CALL option is created successfully.\"\"\"\n callOption = call.Call(underlyingTicker='SPY', strikePrice=250, delta=0.3,\n dateTime=datetime.datetime.strptime('01/01/2021', \"%m/%d/%Y\"),\n expirationDateTime=datetime.datetime.strptime('01/01/2050', \"%m/%d/%Y\"),\n bidPrice = 1.50, askPrice = 1.00, tradePrice = 3.00)\n self.assertEqual(callOption.strikePrice, 250)\n self.assertEqual(callOption.optionType, option.OptionTypes.CALL)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "6973837", "language": "Python", "matching_score": 2.620098114013672, "max_stars_count": 82, "path": "base/callTest.py" }, { "content": "import dataclasses\nfrom base import option\n\[email protected]\nclass Call(option.Option):\n \"\"\"This class defines a CALL option, which inherits from the Option class.\"\"\"\n optionType: option.OptionTypes = option.OptionTypes.CALL\n", "id": "7430562", "language": "Python", "matching_score": 3.075387716293335, "max_stars_count": 82, "path": "base/call.py" }, { "content": "import dataclasses\nfrom base import option\n\[email protected]\nclass Put(option.Option):\n \"\"\"This class defines a PUT option, which inherits from the Option class.\"\"\"\n optionType: option.OptionTypes = option.OptionTypes.PUT\n", "id": "4554383", "language": "Python", "matching_score": 0.611794650554657, "max_stars_count": 82, "path": "base/put.py" }, { "content": "import abc\nimport dataclasses\nimport datetime\nimport decimal\nimport enum\nfrom typing import Optional, Text\n\nclass OptionTypes(enum.Enum):\n PUT = 0\n CALL = 1\n\[email protected]\nclass Option(abc.ABC):\n \"\"\"This class defines the basic type for the backtester or live trader -- an option.\n Other classes such as put or call are derived from this class.\n\n Attributes:\n underlyingTicker: ticker symbol (e.g., SPY) of underlying.\n strikePrice: strike price of option.\n expirationDateTime: date/time at which option expires.\n underlyingPrice: price of the underlying / stock which has option derivatives in dollars.\n optionSymbol: code different than the underlying ticker used to denote option.\n bidPrice: current bid price of option.\n askPrice: current asking price of option.\n tradePrice: price of option when trade was executed / put on.\n openInterest: number of open option contracts.\n volume: number of contracts traded.\n dateTime: data / time of quote received; would also be date / time bought / sold.\n delta: greek for quantifying percent of stock we're long or short (-1 to 1).\n theta: daily return in dollars if no movement in underlying price.\n gamma: describes rate of change of delta (float).\n rho: how much option price changes with change in interest rate (dollars).\n vega: change in price of option for every 1% change in volatility.\n impliedVol: implied volatility percentage.\n exchangeCode: symbol used to denote which exchanged used or where quote came from.\n exercisePrice: price to exercise option early.\n assignPrice: price you must pay if other party exercises option.\n openCost: cost to open the option trade.\n closeCost: cost to close out the option trade.\n \"\"\"\n underlyingTicker: Text\n strikePrice: decimal.Decimal\n expirationDateTime: datetime.datetime\n underlyingPrice: Optional[decimal.Decimal] = None\n optionSymbol: Optional[Text] = None\n bidPrice: Optional[decimal.Decimal] = None\n askPrice: Optional[decimal.Decimal] = None\n tradePrice: decimal.Decimal = None\n openInterest: Optional[int] = None\n volume: Optional[int] = None\n dateTime: Optional[datetime.datetime] = None\n delta: Optional[float] = None\n theta: Optional[float] = None\n gamma: Optional[float] = None\n rho: Optional[float] = None\n vega: Optional[float] = None\n impliedVol: Optional[float] = None\n exchangeCode: Optional[Text] = None\n exercisePrice: Optional[decimal.Decimal] = None\n assignPrice: Optional[decimal.Decimal] = None\n openCost: Optional[decimal.Decimal] = None\n closeCost: Optional[decimal.Decimal] = None\n\n def __post_init__(self):\n if self.__class__ == Option:\n raise TypeError('Cannot instantiate abstract class.')\n\n def calcOptionPriceDiff(self) -> decimal.Decimal:\n \"\"\"Calculate the difference in price of the put/call when the trade was placed versus its current value.\n Specifically, diff = original price - current price. The current price used is actually the mid price, or\n the average of the bid price and ask price.\n :return: price difference (original price - current price).\n \"\"\"\n midPrice = (self.bidPrice + self.askPrice) / decimal.Decimal(2.0)\n return (self.tradePrice - midPrice) * 100\n\n def getNumDaysLeft(self) -> int:\n \"\"\"Determine the number of days between the current date/time and expiration date / time.\n :return: number of days between curDateTime and expDateTime.\n \"\"\"\n return (self.expirationDateTime - self.dateTime).days\n\n def updateOption(self, updatedOption: 'Option') -> None:\n \"\"\"Update the relevant values of the original option with those of the new option; e.g., update price, delta.\n :param updatedOption: new option from the latest tick.\n :raises ValueError: option cannot be updated.\n \"\"\"\n # Check that we are dealing with the same option.\n # TODO(easier to check option symbol)?\n if self.underlyingTicker == updatedOption.underlyingTicker and self.strikePrice == updatedOption.strikePrice and (\n self.expirationDateTime == updatedOption.expirationDateTime):\n self.underlyingPrice = updatedOption.underlyingPrice\n self.bidPrice = updatedOption.bidPrice\n self.askPrice = updatedOption.askPrice\n self.openInterest = updatedOption.openInterest\n self.volume = updatedOption.volume\n self.dateTime = updatedOption.dateTime\n self.delta = updatedOption.delta\n self.theta = updatedOption.theta\n self.gamma = updatedOption.gamma\n self.rho = updatedOption.rho\n self.vega = updatedOption.vega\n self.impliedVol = updatedOption.impliedVol\n else:\n raise ValueError('Cannot update option; this option appears to be from a different option chain.')\n", "id": "3298498", "language": "Python", "matching_score": 6.387882709503174, "max_stars_count": 82, "path": "base/option.py" }, { "content": "import dataclasses\nimport datetime\nimport decimal\nfrom typing import Optional, Text\n\[email protected]\nclass Stock:\n \"\"\"This class defines one the basic types for the backtester or live trader -- a stock.\n Attributes:\n underlyingPrice: price of the underlying / stock which has option derivatives in dollars.\n underlyingTicker: ticker symbol (e.g., SPY) of underlying.\n bidPrice: current bid price of option.\n askPrice: current asking price of option.\n tradePrice: price of stock when order was executed.\n openInterest: number of open option contracts.\n volume: number of contracts traded.\n dateTime: data / time of quote received; would also be data / time bought / sold.\n exchangeCode: symbol used to denote which exchanged used or where quote came from.\n openCost: cost to open the option trade.\n closeCost: cost to close out the option trade.\n \"\"\"\n underlyingPrice: decimal.Decimal\n underlyingTicker: Text\n bidPrice: Optional[decimal.Decimal] = None\n askPrice: Optional[decimal.Decimal] = None\n tradePrice: decimal.Decimal = None\n openInterest: Optional[int] = 0\n volume: Optional[int] = 0\n dateTime: Optional[datetime.datetime] = None\n exchangeCode: Optional[Text] = None\n openCost: Optional[decimal.Decimal] = None\n closeCost: Optional[decimal.Decimal] = None", "id": "6098079", "language": "Python", "matching_score": 2.026890993118286, "max_stars_count": 82, "path": "base/stock.py" }, { "content": "import unittest\nfrom base import stock\n\nclass TestStockClass(unittest.TestCase):\n def testStockClassCreation(self):\n # Test Stock class creation and getter methods\n stockObj = stock.Stock(underlyingTicker='SPY', underlyingPrice=250)\n # Test that the underlying ticker, direction, and underlying price are populated correctly.\n self.assertEqual(stockObj.underlyingTicker, 'SPY')\n self.assertEqual(stockObj.underlyingPrice, 250)\n\nif __name__ == '__main__':\n unittest.main()\n\n", "id": "4672324", "language": "Python", "matching_score": 0, "max_stars_count": 82, "path": "base/stockTest.py" }, { "content": "import pandas as pd\n\nif __name__ == \"__main__\":\n\n files = ['/Users/msantoro/PycharmProjects/Backtester/marketData/iVolatility/SPX/SPX_1990_1999/RawIV.csv',\n '/Users/msantoro/PycharmProjects/Backtester/marketData/iVolatility/SPX/SPX_2000_2010/RawIV.csv',\n '/Users/msantoro/PycharmProjects/Backtester/marketData/iVolatility/SPX/SPX_2011_2017/RawIV.csv']\n\n chunkSize = 10000\n useHeader = True\n\n for file in files:\n for chunk in pd.read_csv(file, chunksize=chunkSize):\n if useHeader:\n chunk.to_csv('/Users/msantoro/PycharmProjects/Backtester/marketData/iVolatility/SPX/combinedCSV.csv',\n header=True, mode='a', index=False)\n useHeader = False\n else:\n chunk.to_csv('/Users/msantoro/PycharmProjects/Backtester/marketData/iVolatility/SPX/combinedCSV.csv',\n header=False, mode='a', index=False)\n", "id": "2079041", "language": "Python", "matching_score": 0, "max_stars_count": 82, "path": "utils/combineCSVs.py" } ]
3.480701
koborit
[ { "content": "\"\"\"\nMIT License\n\nCopyright (c) 2017 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nDocumentation\n\nSub module for UI implementation.\n\n\"\"\"\n\n\nfrom pymel.core import *\n\nimport maya.OpenMayaUI as OpenMayaUI\n\nfrom Qt import QtCore, QtGui, QtWidgets\nfrom Qt.QtCore import Slot\n\ntry:\n from shiboken2 import wrapInstance\nexcept:\n from shiboken import wrapInstance\n\nfrom . import core\n\n\nclass Ui_SpaceSwitcherWindow(object):\n def setupUi(self, SpaceSwitcherWindow):\n SpaceSwitcherWindow.setObjectName(\"SpaceSwitcherWindow\")\n SpaceSwitcherWindow.setWindowModality(QtCore.Qt.NonModal)\n SpaceSwitcherWindow.resize(246, 256)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(SpaceSwitcherWindow.sizePolicy().hasHeightForWidth())\n SpaceSwitcherWindow.setSizePolicy(sizePolicy)\n SpaceSwitcherWindow.setMinimumSize(QtCore.QSize(246, 256))\n SpaceSwitcherWindow.setMaximumSize(QtCore.QSize(246, 256))\n SpaceSwitcherWindow.setWindowTitle(\"SpaceSwitcher\")\n SpaceSwitcherWindow.setWindowOpacity(1.0)\n SpaceSwitcherWindow.setToolTip(\"\")\n SpaceSwitcherWindow.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.centralWidget = QtWidgets.QWidget(SpaceSwitcherWindow)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.centralWidget.sizePolicy().hasHeightForWidth())\n self.centralWidget.setSizePolicy(sizePolicy)\n self.centralWidget.setMinimumSize(QtCore.QSize(0, 0))\n self.centralWidget.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.centralWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.centralWidget.setObjectName(\"centralWidget\")\n self.layout_centralWidget = QtWidgets.QHBoxLayout(self.centralWidget)\n self.layout_centralWidget.setSpacing(2)\n self.layout_centralWidget.setContentsMargins(2, 2, 2, 2)\n self.layout_centralWidget.setObjectName(\"layout_centralWidget\")\n self.frame_Root = QtWidgets.QFrame(self.centralWidget)\n self.frame_Root.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_Root.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_Root.setObjectName(\"frame_Root\")\n self.layout_Root = QtWidgets.QVBoxLayout(self.frame_Root)\n self.layout_Root.setSpacing(2)\n self.layout_Root.setContentsMargins(2, 2, 2, 2)\n self.layout_Root.setObjectName(\"layout_Root\")\n self.frame_Parent = QtWidgets.QFrame(self.frame_Root)\n self.frame_Parent.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_Parent.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_Parent.setObjectName(\"frame_Parent\")\n self.layout_Parent = QtWidgets.QVBoxLayout(self.frame_Parent)\n self.layout_Parent.setSpacing(4)\n self.layout_Parent.setContentsMargins(2, 2, 2, 2)\n self.layout_Parent.setObjectName(\"layout_Parent\")\n self.frame_LabelAndButton = QtWidgets.QFrame(self.frame_Parent)\n self.frame_LabelAndButton.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_LabelAndButton.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_LabelAndButton.setObjectName(\"frame_LabelAndButton\")\n self.layout_LabelAndButton = QtWidgets.QHBoxLayout(self.frame_LabelAndButton)\n self.layout_LabelAndButton.setSpacing(2)\n self.layout_LabelAndButton.setContentsMargins(0, 0, 0, 0)\n self.layout_LabelAndButton.setObjectName(\"layout_LabelAndButton\")\n self.label_Parent = QtWidgets.QLabel(self.frame_LabelAndButton)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_Parent.sizePolicy().hasHeightForWidth())\n self.label_Parent.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setWeight(75)\n font.setBold(True)\n self.label_Parent.setFont(font)\n self.label_Parent.setText(\"Parent\")\n self.label_Parent.setObjectName(\"label_Parent\")\n self.layout_LabelAndButton.addWidget(self.label_Parent)\n self.pushButton_SetParent = QtWidgets.QPushButton(self.frame_LabelAndButton)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_SetParent.sizePolicy().hasHeightForWidth())\n self.pushButton_SetParent.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setWeight(75)\n font.setBold(True)\n self.pushButton_SetParent.setFont(font)\n self.pushButton_SetParent.setText(\"Set\")\n self.pushButton_SetParent.setObjectName(\"pushButton_SetParent\")\n self.layout_LabelAndButton.addWidget(self.pushButton_SetParent)\n self.pushButton_ClearParent = QtWidgets.QPushButton(self.frame_LabelAndButton)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_ClearParent.sizePolicy().hasHeightForWidth())\n self.pushButton_ClearParent.setSizePolicy(sizePolicy)\n self.pushButton_ClearParent.setMaximumSize(QtCore.QSize(52, 16777215))\n self.pushButton_ClearParent.setText(\"Clear\")\n self.pushButton_ClearParent.setObjectName(\"pushButton_ClearParent\")\n self.layout_LabelAndButton.addWidget(self.pushButton_ClearParent)\n self.layout_Parent.addWidget(self.frame_LabelAndButton)\n self.lineEdit_Parent = QtWidgets.QLineEdit(self.frame_Parent)\n self.lineEdit_Parent.setText(\"\")\n self.lineEdit_Parent.setObjectName(\"lineEdit_Parent\")\n self.layout_Parent.addWidget(self.lineEdit_Parent)\n self.layout_Root.addWidget(self.frame_Parent)\n self.frame_CreateConstraint = QtWidgets.QFrame(self.frame_Root)\n self.frame_CreateConstraint.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_CreateConstraint.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_CreateConstraint.setObjectName(\"frame_CreateConstraint\")\n self.layout_CreateConstraint = QtWidgets.QVBoxLayout(self.frame_CreateConstraint)\n self.layout_CreateConstraint.setSpacing(0)\n self.layout_CreateConstraint.setContentsMargins(2, 2, 2, 2)\n self.layout_CreateConstraint.setObjectName(\"layout_CreateConstraint\")\n self.label_CreateConstraint = QtWidgets.QLabel(self.frame_CreateConstraint)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_CreateConstraint.sizePolicy().hasHeightForWidth())\n self.label_CreateConstraint.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setWeight(75)\n font.setBold(True)\n self.label_CreateConstraint.setFont(font)\n self.label_CreateConstraint.setToolTip(\"Create constraints: Select nodes to be constrained\")\n self.label_CreateConstraint.setText(\"Create Constraint\")\n self.label_CreateConstraint.setObjectName(\"label_CreateConstraint\")\n self.layout_CreateConstraint.addWidget(self.label_CreateConstraint)\n self.frame_TranslateCheckBoxes = QtWidgets.QFrame(self.frame_CreateConstraint)\n self.frame_TranslateCheckBoxes.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_TranslateCheckBoxes.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_TranslateCheckBoxes.setObjectName(\"frame_TranslateCheckBoxes\")\n self.layout_TranslateCheckBoxes = QtWidgets.QHBoxLayout(self.frame_TranslateCheckBoxes)\n self.layout_TranslateCheckBoxes.setSpacing(8)\n self.layout_TranslateCheckBoxes.setContentsMargins(0, 6, 0, 0)\n self.layout_TranslateCheckBoxes.setObjectName(\"layout_TranslateCheckBoxes\")\n self.label_Translate = QtWidgets.QLabel(self.frame_TranslateCheckBoxes)\n self.label_Translate.setText(\"Translate\")\n self.label_Translate.setObjectName(\"label_Translate\")\n self.layout_TranslateCheckBoxes.addWidget(self.label_Translate)\n self.checkBox_TranslateX = QtWidgets.QCheckBox(self.frame_TranslateCheckBoxes)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.checkBox_TranslateX.sizePolicy().hasHeightForWidth())\n self.checkBox_TranslateX.setSizePolicy(sizePolicy)\n self.checkBox_TranslateX.setText(\"X\")\n self.checkBox_TranslateX.setChecked(True)\n self.checkBox_TranslateX.setObjectName(\"checkBox_TranslateX\")\n self.layout_TranslateCheckBoxes.addWidget(self.checkBox_TranslateX)\n self.checkBox_TranslateY = QtWidgets.QCheckBox(self.frame_TranslateCheckBoxes)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.checkBox_TranslateY.sizePolicy().hasHeightForWidth())\n self.checkBox_TranslateY.setSizePolicy(sizePolicy)\n self.checkBox_TranslateY.setText(\"Y\")\n self.checkBox_TranslateY.setChecked(True)\n self.checkBox_TranslateY.setObjectName(\"checkBox_TranslateY\")\n self.layout_TranslateCheckBoxes.addWidget(self.checkBox_TranslateY)\n self.checkBox_TranslateZ = QtWidgets.QCheckBox(self.frame_TranslateCheckBoxes)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.checkBox_TranslateZ.sizePolicy().hasHeightForWidth())\n self.checkBox_TranslateZ.setSizePolicy(sizePolicy)\n self.checkBox_TranslateZ.setText(\"Z\")\n self.checkBox_TranslateZ.setChecked(True)\n self.checkBox_TranslateZ.setObjectName(\"checkBox_TranslateZ\")\n self.layout_TranslateCheckBoxes.addWidget(self.checkBox_TranslateZ)\n self.layout_CreateConstraint.addWidget(self.frame_TranslateCheckBoxes)\n self.frame_RotateCheckBoxes = QtWidgets.QFrame(self.frame_CreateConstraint)\n self.frame_RotateCheckBoxes.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_RotateCheckBoxes.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_RotateCheckBoxes.setObjectName(\"frame_RotateCheckBoxes\")\n self.layout_RotateCheckBoxes = QtWidgets.QHBoxLayout(self.frame_RotateCheckBoxes)\n self.layout_RotateCheckBoxes.setSpacing(8)\n self.layout_RotateCheckBoxes.setContentsMargins(0, 0, 0, 0)\n self.layout_RotateCheckBoxes.setObjectName(\"layout_RotateCheckBoxes\")\n self.label_Rotate = QtWidgets.QLabel(self.frame_RotateCheckBoxes)\n self.label_Rotate.setText(\"Rotate\")\n self.label_Rotate.setObjectName(\"label_Rotate\")\n self.layout_RotateCheckBoxes.addWidget(self.label_Rotate)\n self.checkBox_RotateX = QtWidgets.QCheckBox(self.frame_RotateCheckBoxes)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.checkBox_RotateX.sizePolicy().hasHeightForWidth())\n self.checkBox_RotateX.setSizePolicy(sizePolicy)\n self.checkBox_RotateX.setText(\"X\")\n self.checkBox_RotateX.setChecked(True)\n self.checkBox_RotateX.setObjectName(\"checkBox_RotateX\")\n self.layout_RotateCheckBoxes.addWidget(self.checkBox_RotateX)\n self.checkBox_RotateY = QtWidgets.QCheckBox(self.frame_RotateCheckBoxes)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.checkBox_RotateY.sizePolicy().hasHeightForWidth())\n self.checkBox_RotateY.setSizePolicy(sizePolicy)\n self.checkBox_RotateY.setText(\"Y\")\n self.checkBox_RotateY.setChecked(True)\n self.checkBox_RotateY.setObjectName(\"checkBox_RotateY\")\n self.layout_RotateCheckBoxes.addWidget(self.checkBox_RotateY)\n self.checkBox_RotateZ = QtWidgets.QCheckBox(self.frame_RotateCheckBoxes)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.checkBox_RotateZ.sizePolicy().hasHeightForWidth())\n self.checkBox_RotateZ.setSizePolicy(sizePolicy)\n self.checkBox_RotateZ.setText(\"Z\")\n self.checkBox_RotateZ.setChecked(True)\n self.checkBox_RotateZ.setObjectName(\"checkBox_RotateZ\")\n self.layout_RotateCheckBoxes.addWidget(self.checkBox_RotateZ)\n self.layout_CreateConstraint.addWidget(self.frame_RotateCheckBoxes)\n self.frame_CreateConstraintButtons = QtWidgets.QFrame(self.frame_CreateConstraint)\n self.frame_CreateConstraintButtons.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_CreateConstraintButtons.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_CreateConstraintButtons.setObjectName(\"frame_CreateConstraintButtons\")\n self.layout_CreateConstraintButtons = QtWidgets.QHBoxLayout(self.frame_CreateConstraintButtons)\n self.layout_CreateConstraintButtons.setSpacing(2)\n self.layout_CreateConstraintButtons.setContentsMargins(0, 0, 0, 0)\n self.layout_CreateConstraintButtons.setObjectName(\"layout_CreateConstraintButtons\")\n self.pushButton_CreateConstraint = QtWidgets.QPushButton(self.frame_CreateConstraintButtons)\n self.pushButton_CreateConstraint.setToolTip(\"\")\n self.pushButton_CreateConstraint.setText(\"Create\")\n self.pushButton_CreateConstraint.setObjectName(\"pushButton_CreateConstraint\")\n self.layout_CreateConstraintButtons.addWidget(self.pushButton_CreateConstraint)\n self.pushButton_CreateAndBakeConstraint = QtWidgets.QPushButton(self.frame_CreateConstraintButtons)\n self.pushButton_CreateAndBakeConstraint.setText(\"Create and Bake\")\n self.pushButton_CreateAndBakeConstraint.setObjectName(\"pushButton_CreateAndBakeConstraint\")\n self.layout_CreateConstraintButtons.addWidget(self.pushButton_CreateAndBakeConstraint)\n self.layout_CreateConstraint.addWidget(self.frame_CreateConstraintButtons)\n self.layout_Root.addWidget(self.frame_CreateConstraint)\n self.frame_DeleteConstraint = QtWidgets.QFrame(self.frame_Root)\n self.frame_DeleteConstraint.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_DeleteConstraint.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_DeleteConstraint.setObjectName(\"frame_DeleteConstraint\")\n self.layout_DeleteConstraint = QtWidgets.QVBoxLayout(self.frame_DeleteConstraint)\n self.layout_DeleteConstraint.setSpacing(0)\n self.layout_DeleteConstraint.setContentsMargins(2, 2, 2, 2)\n self.layout_DeleteConstraint.setObjectName(\"layout_DeleteConstraint\")\n self.label_DeleteConstraint = QtWidgets.QLabel(self.frame_DeleteConstraint)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_DeleteConstraint.sizePolicy().hasHeightForWidth())\n self.label_DeleteConstraint.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setWeight(75)\n font.setBold(True)\n self.label_DeleteConstraint.setFont(font)\n self.label_DeleteConstraint.setToolTip(\"Delete constraints: Select constraining locators\")\n self.label_DeleteConstraint.setText(\"Delete Constraint\")\n self.label_DeleteConstraint.setObjectName(\"label_DeleteConstraint\")\n self.layout_DeleteConstraint.addWidget(self.label_DeleteConstraint)\n self.frame_DeleteConstraintButtons = QtWidgets.QFrame(self.frame_DeleteConstraint)\n self.frame_DeleteConstraintButtons.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_DeleteConstraintButtons.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_DeleteConstraintButtons.setObjectName(\"frame_DeleteConstraintButtons\")\n self.layout_DeleteConstraintButtons = QtWidgets.QHBoxLayout(self.frame_DeleteConstraintButtons)\n self.layout_DeleteConstraintButtons.setSpacing(2)\n self.layout_DeleteConstraintButtons.setContentsMargins(0, 4, 0, 0)\n self.layout_DeleteConstraintButtons.setObjectName(\"layout_DeleteConstraintButtons\")\n self.pushButton_DeleteConstraint = QtWidgets.QPushButton(self.frame_DeleteConstraintButtons)\n self.pushButton_DeleteConstraint.setToolTip(\"\")\n self.pushButton_DeleteConstraint.setText(\"Delete\")\n self.pushButton_DeleteConstraint.setObjectName(\"pushButton_DeleteConstraint\")\n self.layout_DeleteConstraintButtons.addWidget(self.pushButton_DeleteConstraint)\n self.pushButton_BakeAndDeleteConstraint = QtWidgets.QPushButton(self.frame_DeleteConstraintButtons)\n self.pushButton_BakeAndDeleteConstraint.setText(\"Bake and Delete\")\n self.pushButton_BakeAndDeleteConstraint.setObjectName(\"pushButton_BakeAndDeleteConstraint\")\n self.layout_DeleteConstraintButtons.addWidget(self.pushButton_BakeAndDeleteConstraint)\n self.layout_DeleteConstraint.addWidget(self.frame_DeleteConstraintButtons)\n self.layout_Root.addWidget(self.frame_DeleteConstraint)\n self.frame_BakeRange = QtWidgets.QFrame(self.frame_Root)\n self.frame_BakeRange.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_BakeRange.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_BakeRange.setObjectName(\"frame_BakeRange\")\n self.layout_BakeRange = QtWidgets.QVBoxLayout(self.frame_BakeRange)\n self.layout_BakeRange.setSpacing(0)\n self.layout_BakeRange.setContentsMargins(2, 2, 2, 2)\n self.layout_BakeRange.setObjectName(\"layout_BakeRange\")\n self.frame_BakeRangeTop = QtWidgets.QFrame(self.frame_BakeRange)\n self.frame_BakeRangeTop.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_BakeRangeTop.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_BakeRangeTop.setObjectName(\"frame_BakeRangeTop\")\n self.layout_BakeRangeTop = QtWidgets.QHBoxLayout(self.frame_BakeRangeTop)\n self.layout_BakeRangeTop.setSpacing(0)\n self.layout_BakeRangeTop.setContentsMargins(0, 0, 0, 0)\n self.layout_BakeRangeTop.setObjectName(\"layout_BakeRangeTop\")\n self.label_BakeRange = QtWidgets.QLabel(self.frame_BakeRangeTop)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.label_BakeRange.sizePolicy().hasHeightForWidth())\n self.label_BakeRange.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setWeight(75)\n font.setBold(True)\n self.label_BakeRange.setFont(font)\n self.label_BakeRange.setText(\"Bake Range\")\n self.label_BakeRange.setObjectName(\"label_BakeRange\")\n self.layout_BakeRangeTop.addWidget(self.label_BakeRange)\n self.pushButton_SetFromTimeline = QtWidgets.QPushButton(self.frame_BakeRangeTop)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_SetFromTimeline.sizePolicy().hasHeightForWidth())\n self.pushButton_SetFromTimeline.setSizePolicy(sizePolicy)\n self.pushButton_SetFromTimeline.setMaximumSize(QtCore.QSize(16777215, 16777215))\n self.pushButton_SetFromTimeline.setText(\"Set from timeline\")\n self.pushButton_SetFromTimeline.setObjectName(\"pushButton_SetFromTimeline\")\n self.layout_BakeRangeTop.addWidget(self.pushButton_SetFromTimeline)\n self.layout_BakeRange.addWidget(self.frame_BakeRangeTop)\n self.frame_BakeRangeSpinBoxes = QtWidgets.QFrame(self.frame_BakeRange)\n self.frame_BakeRangeSpinBoxes.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.frame_BakeRangeSpinBoxes.setFrameShadow(QtWidgets.QFrame.Plain)\n self.frame_BakeRangeSpinBoxes.setObjectName(\"frame_BakeRangeSpinBoxes\")\n self.layout_BakeRangeSpinBoxes = QtWidgets.QHBoxLayout(self.frame_BakeRangeSpinBoxes)\n self.layout_BakeRangeSpinBoxes.setSpacing(2)\n self.layout_BakeRangeSpinBoxes.setContentsMargins(0, 4, 0, 0)\n self.layout_BakeRangeSpinBoxes.setObjectName(\"layout_BakeRangeSpinBoxes\")\n self.spinBox_BakeStart = QtWidgets.QSpinBox(self.frame_BakeRangeSpinBoxes)\n self.spinBox_BakeStart.setAccelerated(True)\n self.spinBox_BakeStart.setMinimum(-16777215)\n self.spinBox_BakeStart.setMaximum(16777215)\n self.spinBox_BakeStart.setProperty(\"value\", 1)\n self.spinBox_BakeStart.setObjectName(\"spinBox_BakeStart\")\n self.layout_BakeRangeSpinBoxes.addWidget(self.spinBox_BakeStart)\n self.spinBox_BakeEnd = QtWidgets.QSpinBox(self.frame_BakeRangeSpinBoxes)\n self.spinBox_BakeEnd.setAccelerated(True)\n self.spinBox_BakeEnd.setMinimum(-16777215)\n self.spinBox_BakeEnd.setMaximum(16777215)\n self.spinBox_BakeEnd.setProperty(\"value\", 24)\n self.spinBox_BakeEnd.setObjectName(\"spinBox_BakeEnd\")\n self.layout_BakeRangeSpinBoxes.addWidget(self.spinBox_BakeEnd)\n self.layout_BakeRange.addWidget(self.frame_BakeRangeSpinBoxes)\n self.layout_Root.addWidget(self.frame_BakeRange)\n self.layout_centralWidget.addWidget(self.frame_Root)\n SpaceSwitcherWindow.setCentralWidget(self.centralWidget)\n\n self.retranslateUi(SpaceSwitcherWindow)\n QtCore.QMetaObject.connectSlotsByName(SpaceSwitcherWindow)\n\n def retranslateUi(self, SpaceSwitcherWindow):\n pass\n\n\nclass ControlMainWindow(QtWidgets.QMainWindow):\n def __init__(self, window_title, parent=None):\n super(ControlMainWindow, self).__init__(parent)\n self.window_title = window_title\n self.ui = Ui_SpaceSwitcherWindow()\n self.ui.setupUi(self)\n\n # signal - slot connections\n self.ui.pushButton_SetParent.clicked.connect(self.setParent_cliciked)\n self.ui.pushButton_ClearParent.clicked.connect(self.clearParent_clicked)\n self.ui.pushButton_CreateConstraint.clicked.connect(self.createConstraint_clicked)\n self.ui.pushButton_CreateAndBakeConstraint.clicked.connect(self.createAndBakeConstraint_clicked)\n self.ui.pushButton_DeleteConstraint.clicked.connect(self.deleteConstraint_clicked)\n self.ui.pushButton_BakeAndDeleteConstraint.clicked.connect(self.bakeAndDeleteConstraint_clicked)\n self.ui.pushButton_SetFromTimeline.clicked.connect(self.setBakeRange_clicked)\n\n #\n # UI query methods\n #\n def get_parentname(self):\n return self.ui.lineEdit_Parent.text()\n\n def get_translate_switches(self):\n return (self.ui.checkBox_TranslateX.isChecked(),\n self.ui.checkBox_TranslateY.isChecked(),\n self.ui.checkBox_TranslateZ.isChecked())\n\n def get_rotate_switches(self):\n return (self.ui.checkBox_RotateX.isChecked(),\n self.ui.checkBox_RotateY.isChecked(),\n self.ui.checkBox_RotateZ.isChecked())\n\n def get_bakestart(self):\n return self.ui.spinBox_BakeStart.value()\n\n def get_bakeend(self):\n return self.ui.spinBox_BakeEnd.value()\n\n #\n # UI edit methods\n #\n def set_parentname(self, name=None):\n _name = name\n if name is None:\n selections = ls(selection=True)\n if selections:\n _name = selections[0].name()\n if _name is not None:\n self.ui.lineEdit_Parent.setText(_name)\n\n def set_bakestart(self, value):\n self.ui.spinBox_BakeStart.setValue(value)\n\n def set_bakeend(self, value):\n self.ui.spinBox_BakeEnd.setValue(value)\n\n #\n # UI update methods\n #\n def update_bakerange(self):\n self.set_bakestart(playbackOptions(q=1, minTime=True))\n self.set_bakeend(playbackOptions(q=1, maxTime=True))\n\n def update_all(self):\n self.update_bakerange()\n\n #\n # slot callback functions\n #\n @Slot()\n def setParent_cliciked(self):\n self.set_parentname()\n\n @Slot()\n def clearParent_clicked(self):\n self.set_parentname(name = '')\n\n @Slot()\n def createConstraint_clicked(self):\n undoInfo(openChunk=True)\n\n parent = None\n try:\n parent = PyNode(self.get_parentname())\n except:\n pass\n\n try:\n core.switch_space(None, parent,\n translate_switches=self.get_translate_switches(),\n rotate_switches=self.get_rotate_switches())\n except Exception as err:\n print(str(err))\n finally:\n undoInfo(closeChunk=True)\n\n @Slot()\n def createAndBakeConstraint_clicked(self):\n undoInfo(openChunk=True)\n\n parent = None\n try:\n parent = PyNode(self.get_parentname())\n except:\n pass\n\n try:\n core.switch_space(None, parent, self.get_translate_switches(), self.get_rotate_switches(),\n bake=True, start=self.get_bakestart(), end=self.get_bakeend())\n except Exception as err:\n print(str(err))\n finally:\n undoInfo(closeChunk=True)\n\n @Slot()\n def deleteConstraint_clicked(self):\n undoInfo(openChunk=True)\n try:\n core.delete_switch_space_constraints()\n except Exception as err:\n print(str(err))\n finally:\n undoInfo(closeChunk=True)\n\n @Slot()\n def bakeAndDeleteConstraint_clicked(self):\n undoInfo(openChunk=True)\n try:\n core.delete_switch_space_constraints(bake=True, start=self.get_bakestart(), end=self.get_bakeend())\n except Exception as err:\n print(str(err))\n finally:\n undoInfo(closeChunk=True)\n\n @Slot()\n def setBakeRange_clicked(self):\n self.update_bakerange()\n\n\ndef launch_ui(window_title='SpaceSwitcher'):\n existing_win_ptr = OpenMayaUI.MQtUtil.findWindow('SpaceSwitcherWindow')\n if existing_win_ptr:\n existing_win = wrapInstance(long(existing_win_ptr), QtWidgets.QMainWindow)\n if existing_win:\n if existing_win.windowTitle() == window_title:\n existing_win.close()\n\n main_win = ControlMainWindow(window_title,\n parent=wrapInstance(long(OpenMayaUI.MQtUtil.mainWindow()), QtWidgets.QWidget))\n main_win.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n main_win.setWindowTitle(window_title)\n main_win.update_all()\n main_win.show()\n", "id": "12769325", "language": "Python", "matching_score": 4.152382850646973, "max_stars_count": 0, "path": "tools/spaceSwitcher/python/spaceswitcher/ui.py" }, { "content": "\"\"\"\nMIT License\n\nCopyright (c) 2017 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nDocumentation\n\nSub module for main class and functions implementation except for UI.\n\n\"\"\"\n\n\nimport re\n\nfrom pymel.core import *\n\nimport maya.OpenMaya as OpenMaya\nimport maya.api.OpenMaya as OpenMaya2\n\nfrom . import utils\n\n\nclass SpaceSwitcher(object):\n \"\"\"\n This class encapsulates constraint structure of a target controller and a\n transform which works as temporary \"parent\".\n Operations such as creating or deleting constraints with or without\n keyframe bake are accessible via instance methods.\n \"\"\"\n\n locator_prefix = 'cnst_'\n\n locator_target_attrname = 'spaceSwitcherTarget'\n\n locator_startinit_attrname = 'spaceSwitchStartInit'\n locator_start_attrname = 'spaceSwitcherStart'\n\n locator_endinit_attrname = 'spaceSwitcherEndInit'\n locator_end_attrname = 'spaceSwitcherEnd'\n\n constraint_types = {'translate':OpenMaya.MFn.kPointConstraint, 'rotate':OpenMaya.MFn.kOrientConstraint}\n\n @classmethod\n def get_target_from_locator(cls, locator):\n \"\"\"\n Parameters:\n locator: pymel.core.general.PyNode\n Constraint locator.\n\n Return value:\n Target transform node. pymel.core.general.PyNode\n\n Description:\n Obtains target transform from constraint locator.\n \"\"\"\n\n if not isinstance(locator, nodetypes.Transform):\n return None\n try:\n attr = locator.attr(cls.locator_target_attrname)\n except:\n return None\n else:\n inputs = attr.inputs(plugs=True)\n if not inputs:\n return None\n if isinstance(inputs[0].node(), nodetypes.Transform) and inputs[0].attrName(longName=True) == 'message':\n return inputs[0].node()\n return None\n\n @classmethod\n def get_locator_from_target(cls, target):\n \"\"\"\n Parameters:\n target: pymel.core.general.PyNode\n Target transform.\n\n Return value:\n Constraint locator node. pymel.core.general.PyNode\n\n Description:\n Obtains constraint locator from target.\n \"\"\"\n\n if not isinstance(target, nodetypes.Transform):\n return None\n try:\n attr = target.attr('message')\n except:\n return None\n else:\n outputs = attr.outputs(plugs=True)\n if not outputs:\n return None\n for output in outputs:\n if isinstance(output.node(), nodetypes.Transform)\\\n and output.attrName(longName=True) == cls.locator_target_attrname:\n return output.node()\n return None\n\n @classmethod\n def get_bakerange_from_locator(cls, locator):\n \"\"\"\n Parameters:\n locator: pymel.core.general.PyNode\n Constraint locator.\n\n Return value:\n A dictionary containing bake range information. dict\n\n Description:\n Obtains bake framerange from values stored in constraint locator's\n custom attributes and returns as a dictionary with keys and values\n shown below:\n\n spaceSwitcherStartInit : True if start is initialized (bool)\n spaceSwitcherStart : bake start frame (int)\n spaceSwitcherEndInit : True if end is initialized (bool)\n spaceSwitcherEnd : bake end frame (int)\n\n \"\"\"\n\n if not isinstance(locator, nodetypes.Transform):\n return {}\n\n result = {}\n\n try:\n attr = locator.attr(cls.locator_startinit_attrname)\n except Exception as err:\n print(str(err))\n else:\n result[cls.locator_startinit_attrname] = attr.get()\n\n try:\n attr = locator.attr(cls.locator_start_attrname)\n except Exception as err:\n print(str(err))\n else:\n result[cls.locator_start_attrname] = attr.get()\n\n try:\n attr = locator.attr(cls.locator_endinit_attrname)\n except Exception as err:\n print(str(err))\n else:\n result[cls.locator_endinit_attrname] = attr.get()\n\n try:\n attr = locator.attr(cls.locator_end_attrname)\n except Exception as err:\n print(str(err))\n else:\n result[cls.locator_end_attrname] = attr.get()\n\n return result\n\n def __init__(self, target=None, locator=None):\n \"\"\"\n Parameters:\n target: pymel.core.general.PyNode\n Target transform node you are animating.\n locator: pymel.core.general.PyNode\n Constraint locator node which drives target transform via\n point constraint and orientation constraint.\n\n For a new constraint structure, a class instance must be constructed\n with a target node before creating constraints:\n\n switcher = spaceswitcher.SpaceSwitcher(target=target)\n\n From an existing constraint structure, a class instance can be\n constructed and restored with a target node and/or a\n constraint locator:\n\n switcher = spaceswitcher.SpaceSwitcher(target=target)\n\n or\n\n switcher = spaceswitcher.SpaceSwitcher(locator=locator)\n\n or\n\n switcher = spaceswitcher.SpaceSwitcher(target=target, locator=locator)\n \"\"\"\n\n # target: target transform\n self.target = None\n\n # locator: constraint locator\n self.locator = None\n\n # parent: parent transform\n self.parent = None\n\n # bake frame range\n self.start = None\n self.end = None\n\n # Restore instance variables\n self._restore(target=target, locator=locator)\n\n def _restore(self, target=None, locator=None):\n is_target_set = False\n is_locator_set = False\n\n # target\n if target:\n if not isinstance(target, nodetypes.Transform):\n raise ValueError('Not a transform node is specified as target: %s' % target.name())\n self.target = target\n is_target_set = True\n\n # locator\n if locator:\n if not isinstance(locator, nodetypes.Transform):\n raise ValueError('Not a transform node is specified as locator: %s' % locator.name())\n self.locator = locator\n is_locator_set = True\n\n # get target from locator\n if not is_target_set:\n self.target = SpaceSwitcher.get_target_from_locator(self.locator)\n\n # get locator from target\n if not is_locator_set:\n self.locator = SpaceSwitcher.get_locator_from_target(self.target)\n\n # get parent from locator\n if self.locator:\n parent = self.locator.getParent()\n if parent:\n self.parent = parent\n\n # get bake framerange from locator\n bakerange = SpaceSwitcher.get_bakerange_from_locator(self.locator)\n\n if bakerange.get(SpaceSwitcher.locator_startinit_attrname, False):\n self.start = bakerange.get(SpaceSwitcher.locator_start_attrname, None)\n else:\n self.start = None\n\n if bakerange.get(SpaceSwitcher.locator_endinit_attrname, False):\n self.end = bakerange.get(SpaceSwitcher.locator_end_attrname, None)\n else:\n self.end = None\n\n def _create_constraint_locator(self, parent):\n self.locator = spaceLocator(name='%s%s' % (self.__class__.locator_prefix,\n re.sub(r':', '_', self.target.name())))\n if parent and isinstance(parent, nodetypes.Transform):\n self.locator.setParent(parent)\n self.parent = parent\n\n # add attribute to link target\n self.locator.addAttr(self.__class__.locator_target_attrname, attributeType='message')\n src_attr = self.target.attr('message')\n src_attr.connect(self.locator.attr(self.__class__.locator_target_attrname))\n\n # add bake start attributes\n self.locator.addAttr(self.__class__.locator_startinit_attrname, attributeType='bool')\n self.locator.attr(self.__class__.locator_startinit_attrname).set(False)\n\n self.locator.addAttr(self.__class__.locator_start_attrname, attributeType='float')\n\n # add bake end attributes\n self.locator.addAttr(self.__class__.locator_endinit_attrname, attributeType='bool')\n self.locator.attr(self.__class__.locator_endinit_attrname).set(False)\n\n self.locator.addAttr(self.__class__.locator_end_attrname, attributeType='float')\n\n # Set locator scale to handy value\n scale_x, scale_y, scale_z = self.locator.attr('scale').get()\n\n self.locator.attr('scale').set(1.0, 1.0, 1.0)\n\n bb = exactWorldBoundingBox(self.target)\n local_scale = OpenMaya2.MDistance.internalToUI((bb[3] - bb[0] + bb[4] - bb[1] + bb[5] - bb[2]) / 3.25)\n\n self.locator.getShape().attr('localScale').set(local_scale * scale_x,\n local_scale * scale_y,\n local_scale * scale_z)\n\n def list_target_constraints(self, typ):\n \"\"\"\n Parameters:\n typ: unicode\n Constraint type. 'translate' or 'rotate' can be accepted.\n\n Return value:\n A list of constraint nodes. list of pymel.core.general.PyNode\n\n Description:\n Obtains constraint nodes which drive the target node's attributes\n which typ specifies.\n \"\"\"\n\n if not self.target or not self.locator:\n return []\n\n constraints = set()\n for r in ['X', 'Y', 'Z']:\n dg_iter = OpenMaya.MItDependencyGraph(self.target.attr('%s%s' % (typ, r)).__apimplug__(),\n self.__class__.constraint_types[typ],\n OpenMaya.MItDependencyGraph.kUpstream,\n OpenMaya.MItDependencyGraph.kBreadthFirst,\n OpenMaya.MItDependencyGraph.kNodeLevel)\n while not dg_iter.isDone():\n try:\n fn_node = OpenMaya.MFnDependencyNode(dg_iter.currentItem())\n except Exception as err:\n print(str(err))\n raise err\n else:\n if fn_node:\n constraint = PyNode(fn_node.name())\n if utils.is_affected(self.locator, constraint):\n constraints.add(constraint)\n dg_iter.next()\n\n return list(constraints)\n\n def list_driven_target_channels(self, typ):\n \"\"\"\n Parameters:\n typ: unicode\n Constraint type. 'translate' or 'rotate' can be accepted.\n\n Return value:\n A list of driven attributes. list of pymel.core.general.Attribute\n\n Description:\n Obtains component attributes of target node's typ attribute which\n are really driven by constraint locator.\n \"\"\"\n\n if not self.target or not self.locator:\n return []\n\n results = []\n for r in ['X', 'Y', 'Z']:\n attr = self.target.attr('%s%s' % (typ, r))\n if utils.is_affected(self.locator, attr):\n results.append(attr)\n\n return results\n\n def _create_constraints(self, parent, translate_switches, rotate_switches, for_bake=False):\n self._create_constraint_locator(parent)\n\n # get target world translation and world rotation\n translation = self.target.getTranslation(space='world')\n rotation = utils.get_rotation(self.target)\n rotation_order = self.target.getRotationOrder()\n\n # set locator world position and world orientation to target\n xform(self.locator, translation=translation, worldSpace=True)\n self.locator.setRotationOrder(rotation_order, True)\n xform(self.locator, rotation=rotation, worldSpace=True)\n\n result = {'channels':[], 'translate_constraint':None, 'rotate_constraint':None}\n\n # create point constraint\n skip_list = [r for i, r in enumerate(['X', 'Y', 'Z'])\n if not translate_switches[i] or not utils.is_attr_drivable(self.target.attr('translate%s' % r))]\n if len(skip_list) < 3:\n if for_bake:\n result['translate_constraint'] = pointConstraint([self.target], [self.locator],\n maintainOffset=True, skip=skip_list)\n result['channels'].extend([self.locator.attr('translate%s' % r) for r in ['X', 'Y', 'Z']])\n else:\n pointConstraint([self.locator], [self.target], maintainOffset=True, skip=skip_list)\n\n # create orient constraint\n skip_list = [r for i, r in enumerate(['X', 'Y', 'Z'])\n if not rotate_switches[i] or not utils.is_attr_drivable(self.target.attr('rotate%s' % r))]\n if len(skip_list) < 3:\n if for_bake:\n result['rotate_constraint'] = orientConstraint([self.target], [self.locator],\n maintainOffset=True, skip=skip_list)\n result['channels'].extend([self.locator.attr('rotate%s' % r) for r in ['X', 'Y', 'Z']])\n else:\n orientConstraint([self.locator], [self.target], maintainOffset=True, skip=skip_list)\n\n return result\n\n def _restore_constraints(self, translate_switches, rotate_switches):\n # restore translate constraint\n skip_list = [r for i, r in enumerate(['X', 'Y', 'Z'])\n if not translate_switches[i] or not utils.is_attr_drivable(self.target.attr('translate%s' % r))]\n if len(skip_list) < 3:\n for r in ['X', 'Y', 'Z']:\n if not r in skip_list:\n attr = self.target.attr('translate%s' % r)\n inputs = attr.inputs(plugs=True)\n if inputs:\n inputs[0].disconnect(attr)\n pointConstraint([self.locator], [self.target], maintainOffset=True, skip=skip_list)\n\n # restore orient constraint\n skip_list = [r for i, r in enumerate(['X', 'Y', 'Z'])\n if not rotate_switches[i] or not utils.is_attr_drivable(self.target.attr('rotate%s' % r))]\n if len(skip_list) < 3:\n for r in ['X', 'Y', 'Z']:\n if not r in skip_list:\n attr = self.target.attr('rotate%s' % r)\n inputs = attr.inputs(plugs=True)\n if inputs:\n inputs[0].disconnect(attr)\n orientConstraint([self.locator], [self.target], maintainOffset=True, skip=skip_list)\n\n def switch_space(self, parent, translate_switches, rotate_switches, bake=False, start=1, end=24):\n \"\"\"\n Parameters:\n parent: pymel.core.general.PyNode\n A transform node which you want make serve as\n temporary parent.\n translate_switches: [bool, bool, bool]\n Translate constraint switch for x, y and z\n component respectively.\n rotate_switches: [bool, bool, bool]\n Rotate constraint switch for x, y and z\n component respectively.\n bake: bool\n If True, keyframes are baked to constraint locator so that\n target's animation is kept the same after constraints\n are made.\n start: int\n Start frame of keyframe bake.\n end: int\n End frame of keyframe bake.\n\n Return value:\n Constraint locator, which is created to constrain the target node.\n pymel.core.general.PyNode\n\n Description:\n Creates a new locator under parent as the constraint locator and\n constraint nodes which drive the target node, according to\n translate_switches and rotate_switches channel settings.\n If bake is True, keyframes are baked to constraint locator so that\n target's animation is kept the same after constraints are made.\n \"\"\"\n\n if self.target is None:\n error('Target to switch space is not set yet.')\n\n if self.locator:\n error('Target already has space-swicth locator.')\n\n if not translate_switches[0] and not translate_switches[1] and not translate_switches[2] and\\\n not rotate_switches[0] and not rotate_switches[1] and not rotate_switches[2]:\n raise Exception('Failed to switch space, since constraint switches are all disabled.')\n\n result = self._create_constraints(parent, translate_switches, rotate_switches, for_bake=bake)\n\n if bake:\n self.start = start\n self.locator.attr(SpaceSwitcher.locator_startinit_attrname).set(True)\n self.locator.attr(SpaceSwitcher.locator_start_attrname).set(self.start)\n\n self.end = end\n self.locator.attr(SpaceSwitcher.locator_endinit_attrname).set(True)\n self.locator.attr(SpaceSwitcher.locator_end_attrname).set(self.end)\n\n utils.bake_animation(result['channels'], self.start, self.end)\n\n try:\n delete(result['translate_constraint'])\n except Exception as err:\n print(str(err))\n print('Failed to delete point constraint: %s' % result['translate_constraint'].name())\n\n try:\n delete(result['rotate_constraint'])\n except Exception as err:\n print(str(err))\n print('Failed to delete orient constraint: %s' % result['rotate_constraint'].name())\n\n self._restore_constraints(translate_switches, rotate_switches)\n\n return self.locator\n\n def delete_constraints(self, bake=False, start=None, end=None):\n \"\"\"\n Parameters:\n bake: bool\n If True, the resultant animation is baked to the target node\n before constraints are deleted.\n start: NoneType or int\n Start frame of keyframe bake. If None is specified,\n start frame is obtained from the value stored in locator's\n custom attribute.\n A specific integer value can be used to override the start\n frame stored as custom attribute.\n end: NoneType or int\n End frame of keyframe bake. If None is specified, end frame is\n obtained from the value stored in locator's custom attribute.\n A specific integer value can be used to override the end frame\n stored as custom attribute.\n\n Return value:\n The target node. pymel.core.general.PyNode\n\n Description:\n Deletes the constraint locator and constraint nodes which drive\n the target.\n If bake is True, the resultant animation is baked to the target\n node before constraint structure is deleted.\n \"\"\"\n\n if bake:\n channels = self.list_driven_target_channels('translate') + self.list_driven_target_channels('rotate')\n if channels:\n _start = start\n _end = end\n if start is None:\n _start = self.start\n if end is None:\n _end = self.end\n utils.bake_animation(channels, _start, _end)\n\n for constraint in self.list_target_constraints('translate'):\n try:\n delete(constraint)\n except Exception as err:\n print(str(err))\n print('Failed to delete: %s' % constraint.name())\n\n for constraint in self.list_target_constraints('rotate'):\n try:\n delete(constraint)\n except Exception as err:\n print(str(err))\n print('Failed to delete: %s' % constraint.name())\n\n try:\n delete(self.locator)\n except Exception as err:\n print(str(err))\n print('Failed to delete: %s' % self.locator)\n\n self.locator = None\n self.parent = None\n self.start = None\n self.end = None\n\n return self.target\n\n\ndef switch_space(targets, parent, translate_switches=[True, True, True], rotate_switches=[True, True, True],\n bake=False, start=1, end=24):\n \"\"\"\n Parameters:\n targets: list of PyNode or PyNode or list of unicode or unicode or\n NoneType\n Target transform node(s).\n If None is specified, current selection is used as targets.\n parent: PyNode or unicode\n A transform node which you want to make serve as temporary parent.\n translate_switches: [bool, bool, bool]\n Translate constraint switch for x, y and z component respectively.\n rotate_switches: [bool, bool, bool]\n Rotate constraint switch for x, y and z component respectively.\n bake : bool\n If True, keyframes are baked to constraint locators so that\n targets' animations are kept the same after constraints are made.\n start: int\n Start frame of keyframe bake.\n end: int\n End frame of keyframe bake.\n\n Return value\n A list of constraint locaros, which are created to constrain the\n target nodes. list of PyNode\n\n Description:\n Creates a new locator under parent as the constraint locator and\n constraint nodes which drive each target node according to\n translate_switches and rotate_switches channel settings. \n If bake is True, keyframes are baked to each constraint locator\n so that each target's animation is kept the same after constraints\n are made.\n As parameter targets, several argument types can be accepted and\n if None is specified, current selected nodes are considered as\n targets.\n \"\"\"\n\n _targets = None\n if targets is None:\n selections = ls(selection=1)\n if not selections:\n raise Exception('Current selection is empty.')\n _targets = selections\n else:\n _targets = utils.get_pynodes(targets, nodetype=nodetypes.Transform)\n\n _parent = None\n if parent:\n try:\n _parent = PyNode(parent)\n except Exception as err:\n print(str(err))\n print('Failed to obtain PyNode(\"%s\")' % str(parent))\n raise err\n else:\n if not isinstance(_parent, nodetypes.Transform):\n raise ValueError('argument \"parent\" must be a transform or None.')\n\n if not translate_switches[0] and not translate_switches[1] and not translate_switches[2] and\\\n not rotate_switches[0] and not rotate_switches[1] and not rotate_switches[2]:\n raise Exception('Failed to switch space, since constraint switches are all disabled.')\n\n locators = [SpaceSwitcher(target=target).switch_space(_parent, translate_switches, rotate_switches,\n bake=bake, start=start, end=end) for target in _targets]\n\n select(locators, replace=True)\n\n return locators\n\n\ndef delete_switch_space_constraints(targets=None, locators=None, bake=False, start=None, end=None):\n \"\"\"\n Parameters:\n targets: list of PyNode or PyNode or list of unicode or unicode or\n NoneType\n Target transform node(s).\n locators: list of PyNode or PyNode or list of unicode or unicode or\n NoneType\n Constraint locator node(s).\n bake: bool\n If True, the resultant animation is baked to the target node\n before constraints are deleted.\n start: NoneType or int\n Start frame of keyframe bake. If None is specified, start frame is\n obtained from the value stored in locator's custom attribute.\n A specific integer value can be used to override the start frame\n stored as custom attribute.\n end: NoneType or int\n End frame of keyframe bake. If `None` is specified, end frame is\n obtained from the value stored in locator's custom attribute.\n A specific integer value can be used to override the end frame\n stored as custom attribute.\n\n Return value:\n A list of target nodes. list of PyNode\n\n Description:\n Deletes the specified constraint locator nodes and their constraint\n nodes.\n Constraint locator nodes can be obtained by specifying target nodes\n also.\n If both targets and locators is None, constraint locators are obtained\n from current selection.\n If bake is True, the resultant animation is baked to each target node\n before constraint structure is deleted.\n \"\"\"\n\n _locators = []\n if targets is None and locators is None:\n selections = ls(selection=1)\n if not selections:\n raise Exception('Current selection is empty.')\n for selection in selections:\n if SpaceSwitcher.get_target_from_locator(selection) and not selection in _locators:\n _locators.append(selection)\n else:\n locator = SpaceSwitcher.get_locator_from_target(selection)\n if locator and not locator in _locators:\n _locators.append(locator)\n else:\n if locators:\n _locators = utils.get_pynodes(locators, nodetype=nodetypes.Transform)\n if targets:\n for target in utils.get_pynodes(targets, nodetype=nodetypes.Transform):\n locator = SpaceSwitcher.get_locator_from_target(target)\n if locator and not locator in _locators:\n _locators.append(locator)\n\n results = [SpaceSwitcher(locator=locator).delete_constraints(bake=bake, start=start, end=end)\n for locator in _locators]\n\n if results:\n select(results, replace=True)\n\n return results\n", "id": "975685", "language": "Python", "matching_score": 7.3857855796813965, "max_stars_count": 0, "path": "tools/spaceSwitcher/python/spaceswitcher/core.py" }, { "content": "\"\"\"\nMIT License\n\nCopyright (c) 2017 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nDocumentation\n\nSub module for implementation of basic utility functions used by modules above.\n\n\"\"\"\n\n\nfrom pymel.core import *\n\nimport maya.OpenMaya as OpenMaya\nimport maya.api.OpenMaya as OpenMaya2\n\n\ndef is_affected(upstream_obj, downstream_obj, dg_filter=OpenMaya.MFn.kInvalid):\n root = None\n if isinstance(upstream_obj, general.Attribute):\n root = upstream_obj.__apimplug__()\n elif isinstance(upstream_obj, nt.DependNode):\n root = upstream_obj.__apimobject__()\n\n if root is None:\n raise Exception('Invalid object: %s' % str(upstream_obj))\n\n end = None\n level = None\n if isinstance(downstream_obj, Attribute):\n end = downstream_obj.__apimplug__()\n level = OpenMaya.MItDependencyGraph.kPlugLevel\n elif isinstance(downstream_obj, nt.DependNode):\n end = downstream_obj.__apimobject__()\n level = OpenMaya.MItDependencyGraph.kNodeLevel\n\n if end is None:\n raise Exception('Invalid object: %s' % str(downstream_obj))\n\n try:\n dg_iter = OpenMaya.MItDependencyGraph(root,\n dg_filter,\n OpenMaya.MItDependencyGraph.kDownstream,\n OpenMaya.MItDependencyGraph.kBreadthFirst,\n level)\n except Exception as err:\n print('Failed obtain dg_iter')\n raise err\n\n while not dg_iter.isDone():\n if level == OpenMaya.MItDependencyGraph.kPlugLevel:\n if end == dg_iter.thisPlug():\n return True\n else:\n if end == dg_iter.currentItem():\n return True\n dg_iter.next()\n\n return False\n\n\ndef get_rotation(node):\n rotation = xform(node, q=1, rotation=True, worldSpace=True)\n rotation_order = node.getRotationOrder()\n er = OpenMaya2.MEulerRotation([OpenMaya2.MAngle(r, OpenMaya2.MAngle.uiUnit()).asRadians()\n for r in (rotation[0], rotation[1], rotation[2])],\n rotation_order.index - 1)\n return (OpenMaya2.MAngle.internalToUI(er.x),\n OpenMaya2.MAngle.internalToUI(er.y),\n OpenMaya2.MAngle.internalToUI(er.z))\n\n\ndef is_attr_drivable(attr):\n if not attr.isKeyable() or attr.isLocked():\n return False\n\n inputs = attr.inputs()\n if not inputs:\n return True\n\n if isinstance(inputs[0], nt.AnimCurve):\n return True\n\n return False\n\n\ndef bake_animation(channels, start, end):\n if not channels:\n return\n\n suspend = False\n if not about(batch=True):\n refresh(suspend=True)\n suspend = True\n\n bakeResults(channels,\n simulation=True,\n time=(start, end), sampleBy=1,\n disableImplicitControl=True, preserveOutsideKeys=True,\n sparseAnimCurveBake=True, removeBakedAttributeFromLayer=False,\n bakeOnOverrideLayer=False, minimizeRotation=False)\n\n if suspend:\n refresh(suspend=False)\n\n\ndef get_pynodes(nodes, nodetype=None):\n if nodes is None:\n return None\n\n _nodes = []\n if isinstance(nodes, list) or isinstance(nodes, tuple):\n _nodes = nodes\n else:\n _nodes = [nodes]\n\n pynodes = []\n for _node in _nodes:\n try:\n pynode = PyNode(_node)\n except Exception as err:\n print(str(err))\n print('Failed to obtain PyNode(\"%s\")' % str(_node))\n raise err\n else:\n if pynode:\n if nodetype is None or isinstance(pynode, nodetype):\n pynodes.append(pynode)\n\n return pynodes\n", "id": "11841341", "language": "Python", "matching_score": 7.972876071929932, "max_stars_count": 0, "path": "tools/spaceSwitcher/python/spaceswitcher/utils.py" }, { "content": "\"\"\"\nMIT License\n\nCopyright (c) 2017 <NAME>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\nDocumentation\n\nPortal module for main class and functions, designed to be the interface, i.e.\nthe only module that the users of this tool need to import.\nTheir actual implementation is split into sub modules below:\n\nspaceswitcher.core\nspaceswitcher.ui\nspaceswitcher.utils\n\n\"\"\"\n\n\nfrom .ui import launch_ui\nfrom .core import SpaceSwitcher, switch_space, delete_switch_space_constraints\n", "id": "54888", "language": "Python", "matching_score": 0.010155525989830494, "max_stars_count": 0, "path": "tools/spaceSwitcher/python/spaceswitcher/__init__.py" }, { "content": "import os\nimport platform\nimport re\n\n\nclass ValueWrapper(object):\n \"\"\"Wraps a value to be held by a Variable\"\"\"\n\n def __init__(self,\n value=None):\n self._value = value\n\n @property\n def _current_os(self):\n return platform.system().lower()\n\n @property\n def value(self):\n if isinstance(self._value, dict):\n return self._value.get(self._current_os) or self._value.get('common')\n return self._value\n\n @property\n def strict_value(self):\n return self._value.get('strict') if isinstance(self._value, dict) else False\n\n @property\n def absolute_value(self):\n if isinstance(self._value, dict):\n abs_value = self._value.get('abs')\n return (self._current_os in self._value['abs']) if isinstance(abs_value, list) else abs_value\n return None\n\n\nclass Variable(object):\n \"\"\"Defines a variable required by a tool\"\"\"\n\n def __init__(self, name):\n self.name = name\n self.dependency_re = None\n self.dependents = []\n self.values = []\n self.dependencies = []\n self.strict = False\n self.absolute = False\n\n @property\n def env(self):\n var_values = [x for x in self.values]\n if self.absolute:\n var_values = [os.path.abspath(x) for x in var_values]\n return os.pathsep.join(var_values)\n # return os.pathsep.join(self.values)\n\n def list_dependencies(self, value):\n \"\"\"Checks the value to see if it has any dependency on other Variables, returning them in a list\"\"\"\n try:\n self.dependency_re = self.dependency_re or re.compile(r\"\\${\\w*}\")\n matched = self.dependency_re.findall(value)\n if matched:\n dependencies = [match[2:-1] for match in matched if match[2:-1] != self.name]\n return list(set(dependencies))\n except:\n pass\n return []\n\n def append_value(self, value):\n \"\"\"Sets and/or appends a value to the Variable\"\"\"\n value_wrapper = ValueWrapper(value)\n if value_wrapper.strict_value is not None:\n self.strict = value_wrapper.strict_value\n elif value_wrapper.absolute_value is not None:\n self.absolute = value_wrapper.absolute_value\n\n value_wrapper_value = value_wrapper.value\n if value_wrapper_value not in self.values and value_wrapper_value is not None:\n self.values += [value_wrapper_value]\n for var_dependency in self.list_dependencies(value_wrapper_value):\n if not var_dependency in self.dependencies:\n self.dependencies.append(var_dependency)\n\n def has_value(self):\n return self.values\n", "id": "10414434", "language": "Python", "matching_score": 1.9314972162246704, "max_stars_count": 0, "path": "Ecosystem/ecosystem/variable.py" }, { "content": "import platform\nfrom .variable import Variable\n\n\nclass Tool(object):\n \"\"\"Defines a tool - more specifically, a version of a tool\"\"\"\n\n def __init__(self, filename):\n try:\n with open(filename, 'r') as f:\n self.in_dictionary = eval(f.read())\n except IOError:\n self.in_dictionary = {}\n print('Unable to find file {0} ...'.format(filename))\n\n self.tool = self.in_dictionary.get('tool', None)\n self.version = self.in_dictionary.get('version', None)\n self.platforms = self.in_dictionary.get('platforms', None)\n # self.requirements = self.in_dictionary.get('requires', None)\n\n @property\n def requirements(self):\n return self.in_dictionary.get('requires', None)\n\n @property\n def tool_plus_version(self):\n return self.tool + (self.version or '')\n\n @property\n def platform_supported(self):\n \"\"\"Check to see if the tool is supported on the current platform\"\"\"\n return platform.system().lower() in self.platforms if self.platforms else False\n\n # TODO: move this to environment?\n def get_vars(self, env):\n for name, value in self.in_dictionary['environment'].items():\n if name not in env.variables:\n env.variables[name] = Variable(name)\n env.variables[name].append_value(value)\n\n # check for optional parameters\n if 'optional' in self.in_dictionary:\n for optional_name, optional_value in self.in_dictionary['optional'].items():\n if optional_name in env.tools:\n for name, value in optional_value.items():\n if name not in env.variables:\n env.variables[name] = Variable(name)\n env.variables[name].append_value(value)\n", "id": "680841", "language": "Python", "matching_score": 0.7146018147468567, "max_stars_count": 0, "path": "Ecosystem/ecosystem/tool.py" }, { "content": "import re\n\n\nclass Want(object):\n \"\"\"Defines a request, possibly with a specific version\"\"\"\n\n def __init__(self,\n requirement):\n self.requirement = requirement\n\n @property\n def tool(self):\n return re.findall(r\".*?(?=[0-9])\", self.requirement + '0')[0]\n\n @property\n def version(self):\n result = re.findall(r\"(?=[0-9]).*\", self.requirement)\n return result[0] if result else ''\n", "id": "4163520", "language": "Python", "matching_score": 1.1627634763717651, "max_stars_count": 0, "path": "Ecosystem/ecosystem/want.py" }, { "content": "import os\nimport glob\nfrom .tool import Tool\nfrom .want import Want\n\n\nclass Environment(object):\n \"\"\"Once initialized this will represent the environment defined by the wanted tools.\"\"\"\n\n def __init__(self, wants, env_dir=None, force=False):\n self._wants = wants\n env_dir = env_dir or os.getenv('ECO_ENV', '')\n self.force = force\n\n # self.tools = {}\n self.variables = {}\n self.success = True\n self.environment_files = os.path.join(env_dir, '*.env')\n\n missing_tools = self.missing_tools\n if missing_tools:\n missing_tools = ', '.join(missing_tools)\n print('Unable to resolve all of the requested tools ({0} is missing), ' \\\n 'please check your list and try again!'.format(missing_tools))\n self.success = False\n\n missing_requirements = self.missing_requirements\n if missing_requirements:\n missing_requirements = ', '.join(missing_requirements)\n print('Unable to resolve all of the requirements ({0} is missing), ' \\\n 'please check your list and try again!'.format(missing_requirements))\n self.success = False\n\n for tool_name, tool in self.tools.items():\n tool.get_vars(self)\n\n missing_dependencies = self.missing_dependencies\n if missing_dependencies:\n missing_vars = ', '.join(missing_dependencies)\n print('Unable to resolve all of the required variables ({0} is missing), \\\n please check your list and try again!'.format(missing_vars))\n self.success = False\n\n @property\n def wants(self):\n wants_dict = {}\n for want in [Want(x) for x in set(self._wants)]:\n if want.version and (want.tool in wants_dict):\n # have maya2015 while 'maya' has been processed\n print('Duplicate tool specified: {0} using {1}'.format(want.tool, want.requirement))\n if want.version or (want.tool not in wants_dict):\n # have maya2015, or 'maya' has not been processed\n wants_dict[want.tool] = want\n return wants_dict\n\n @property\n def define_tools(self):\n defined = [Tool(file_name) for file_name in glob.glob(self.environment_files)]\n return dict([(tool.tool_plus_version, tool) for tool in defined])\n\n @property\n def requested_tools(self):\n defined_tools = self.define_tools\n return [defined_tools[want.requirement] for want in self.wants.values() if want.requirement in defined_tools]\n\n @property\n def missing_tools(self):\n defined_tools = self.define_tools\n return [want.requirement for want in self.wants.values() if want.requirement not in defined_tools]\n\n @property\n def required_tools(self):\n required = []\n for requested_tool in self.requested_tools:\n required.extend(requested_tool.requirements)\n return list(set(required))\n\n @property\n def missing_requirements(self):\n requested_tool_names = [x.tool for x in self.requested_tools]\n return [required_tool for required_tool in self.required_tools if required_tool not in requested_tool_names]\n\n @property\n def ext_dependencies(self):\n # check and see if any of the variables dependencies are defined locally to the tool or are considered external\n ext_dependency_list = []\n for name, var in self.variables.items():\n if var.dependencies:\n for dep in var.dependencies:\n if dep not in self.variables:\n if dep not in ext_dependency_list:\n ext_dependency_list.append(dep)\n else:\n self.variables[dep].dependents.append(name)\n return ext_dependency_list\n\n @property\n def missing_dependencies(self):\n return set([dep for dep in self.ext_dependencies if not os.getenv(dep)])\n\n @property\n def tools(self):\n return dict([(new_tool.tool, new_tool) for new_tool in self.requested_tools])\n\n def get_var(self, var):\n if self.success and var is not None:\n if var.name not in self.defined_variables:\n for dependency in var.dependencies:\n self.get_var(self.variables.get(dependency, None))\n self.value += 'setenv {0} {1}'.format(var.name, var.env)\n # self.value += 'export {0}={1}'.format(var.name, var.env)\n if os.getenv(var.name):\n if not self.force and not var.strict:\n if var.env != '':\n self.value += os.pathsep\n self.value += '${{{0}}}'.format(var.name)\n self.value += '\\n'\n self.defined_variables.append(var.name)\n\n def get_var_env(self, var):\n if self.success and var is not None:\n if var.name not in self.defined_variables:\n for dependency in var.dependencies:\n self.get_var_env(self.variables.get(dependency, None))\n var_value = var.env\n if var.name in os.environ:\n if not self.force and not var.strict:\n if var_value != '':\n var_value += os.pathsep\n var_value += os.environ[var.name]\n self.defined_variables.append(var.name)\n os.environ[var.name] = var_value\n\n def get_env(self):\n \"\"\"Combine all of the variables in all the tools based on a dependency list and return as string.\"\"\"\n if self.success:\n self.defined_variables = []\n self.value = '#Environment created via Ecosystem\\n'\n for var_name, variable in sorted(self.variables.items()):\n if self.variables[var_name].has_value():\n self.get_var(variable)\n return self.value\n\n def set_env(self, environ=None):\n \"\"\"Combine all of the variables in all the tools based on a dependency list and use to set environment.\"\"\"\n if self.success:\n environ = environ or os.environ\n self.defined_variables = []\n for var_name, variable in self.variables.items():\n if self.variables[var_name].has_value():\n self.get_var_env(variable)\n\n # run this code twice to cross-expand any variables\n for _ in range(2):\n for env_name, env_value in environ.items():\n os.environ[env_name] = os.path.expandvars(env_value)\n", "id": "966073", "language": "Python", "matching_score": 2.7167961597442627, "max_stars_count": 0, "path": "Ecosystem/ecosystem/environment.py" }, { "content": "#!/usr/bin/python\n\n# Copyright (c) 2014, Peregrine Labs, a division of Peregrine Visual Storytelling Ltd. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# * Neither the name of Peregrine Visual Storytelling Ltd., Peregrine Labs\n# and any of it's affiliates nor the names of any other contributors\n# to this software may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nimport glob\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom .environment import Tool, Environment\n\nfrom .settings import MAKE_COMMAND, MAKE_TARGET\n\n_ON_WINDOWS = (platform.system().lower() == 'windows')\n\n\ndef list_available_tools():\n \"\"\"Reads all of the found .env files, parses the tool name and version creates a list.\"\"\"\n eco_env = os.environ.get('ECO_ENV')\n if eco_env is None:\n print('Ecosystem environment folder not found; please set environment variable ECO_ENV...')\n environment_files = os.path.join(os.getenv('ECO_ENV'), '*.env')\n possible_tools = [Tool(file_name) for file_name in glob.glob(environment_files)]\n tool_names = [new_tool.tool_plus_version for new_tool in possible_tools if new_tool.platform_supported]\n return sorted(list(set(tool_names)))\n\n\ndef call_process(arguments):\n if _ON_WINDOWS:\n subprocess.call(arguments, shell=True)\n else:\n subprocess.call(arguments)\n\n\ndef build(tools=None, force_rebuild=False, quick_build=False, deploy=False):\n tools = tools or []\n env = Environment(tools)\n if env.success:\n env.set_env(os.environ)\n build_type = os.getenv('PG_BUILD_TYPE')\n\n if not quick_build:\n if force_rebuild:\n try:\n open('CMakeCache.txt')\n os.remove('CMakeCache.txt')\n except IOError:\n print(\"Cache doesn't exist...\")\n\n call_process(['cmake', '-DCMAKE_BUILD_TYPE={0}'.format(build_type), '-G', MAKE_TARGET, '..'])\n\n if deploy:\n MAKE_COMMAND.append(\"package\")\n\n call_process(MAKE_COMMAND)\n\n\ndef run(tools=None, run_application=None):\n tools = tools or []\n env = Environment(tools)\n if env.success:\n env.set_env(os.environ)\n call_process([run_application])\n\n\ndef set_environment(tools=None):\n tools = tools or []\n env = Environment(tools)\n if env.success:\n output = env.get_env()\n if output:\n print(output)\n\n\ndef main(argv=None):\n argv = argv or sys.argv[1:]\n\n # parse the (command line) arguments; python 2.7+ (or download argparse)\n import argparse\n description = 'Peregrine Ecosystem, environment, build and deploy management toolset v0.5.1'\n parser = argparse.ArgumentParser(prog='ecosystem',\n formatter_class=argparse.RawTextHelpFormatter,\n description=description,\n epilog='''\nExample:\n python ecosystem.py -t maya2014,vray3.05,yeti1.3.0 -r maya\n ''')\n parser.add_argument('-t', '--tools', type=str, default=None,\n help='specify a list of tools required separated by commas')\n parser.add_argument('-l', '--listtools', action='store_true',\n help='list the available tools')\n parser.add_argument('-b', '--build', action='store_true',\n help='run the desired build process')\n parser.add_argument('-d', '--deploy', action='store_true',\n help='build and package the tool for deployment')\n parser.add_argument('-f', '--force', action='store_true',\n help='force the full CMake cache to be rebuilt')\n parser.add_argument('-m', '--make', action='store_true',\n help='just run make')\n parser.add_argument('-r', '--run', type=str, default=None,\n help='run an application')\n parser.add_argument('-s', '--setenv', action='store_true',\n help='output setenv statements to be used to set the shells environment')\n\n args = parser.parse_args(argv)\n\n tools = args.tools.split(',') if args.tools is not None else []\n\n try:\n if args.listtools:\n import pprint\n pprint.pprint(list_available_tools(), width=1)\n elif args.build:\n if args.deploy:\n build(tools, True, False, args.deploy)\n else:\n build(tools, args.force, args.make, args.deploy)\n elif args.run is not None:\n run(tools, args.run)\n elif args.setenv:\n set_environment(tools)\n return 0\n except Exception as e:\n sys.stderr.write('ERROR: {0:s}'.format(str(e)))\n return 1\n\n\ndef eneedenv():\n \"\"\"Hook for entry_point eneedenv\"\"\"\n return main(['--setenv', '-t'] + sys.argv[1:])\n\n\ndef elist(aegv=None):\n \"\"\"Hook for entry_point elist\"\"\"\n return main(['--listtools'])\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv[1:]))\n", "id": "9920047", "language": "Python", "matching_score": 3.50716233253479, "max_stars_count": 0, "path": "Ecosystem/ecosystem/main.py" }, { "content": "import os\nimport platform\nimport subprocess\n\n_ON_WINDOWS = (platform.system().lower() == 'windows')\n\n\ndef _determine_number_of_cpus():\n \"\"\"\n Number of virtual or physical CPUs on this system, i.e.\n user/real as output by time(1) when called with an optimally scaling\n userspace-only program\n \"\"\"\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE)\n sc_stdout = sysctl.communicate()[0]\n res = int(sc_stdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudo_devices = os.listdir('/devices/pseudo/')\n import re\n expr = re.compile('^cpuid@[0-9]+$')\n\n res = 0\n for pd in pseudo_devices:\n if expr.match(pd) is not None:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesg_process = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesg_process.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')\n\n\n# set up some global variables\nNUMBER_OF_PROCESSORS = _determine_number_of_cpus()\nMAKE_COMMAND = ['make', '-j', str(NUMBER_OF_PROCESSORS)]\nCLEAN_COMMAND = ['make', 'clean']\nMAKE_TARGET = 'Unix Makefiles'\nif _ON_WINDOWS:\n MAKE_COMMAND = ['jom']\n CLEAN_COMMAND = ['jom', 'clean']\n MAKE_TARGET = 'NMake Makefiles'\n", "id": "5778605", "language": "Python", "matching_score": 0.3095399737358093, "max_stars_count": 0, "path": "Ecosystem/ecosystem/settings.py" }, { "content": "from .main import list_available_tools, build, run, set_environment\n", "id": "8007932", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Ecosystem/ecosystem/__init__.py" }, { "content": "import sys\n\nimport ecosystem\n\nif __name__ == \"__main__\":\n sys.exit(ecosystem.main.main(sys.argv[1:]))\n", "id": "4105053", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Ecosystem/ecorun.py" } ]
1.54713
aleksas
[ { "content": "from datetime import datetime, date\n\ndef get_users_activites(external_api_root_url):\n stub_users_activites = [\n {'Email': '<EMAIL>', 'LoginName':'username1'},\n {'Email': '<EMAIL>', 'LoginName':'username1'},\n {'Email': '<EMAIL>', 'LoginName':'username1'}\n ]\n \n return stub_users_activites\n\ndef get_user_map(external_api_root_url):\n acts = get_users_activites(external_api_root_url)\n return { a['Email']:a['LoginName'] for a in acts if a['Email'] }, { a['LoginName']:a['Email'] for a in acts if a['LoginName'] } \n\ndef get_events(external_api_root_url, username, dt):\n stub_events = [\n {'username': username, 'year': dt.year, 'month': dt.month, 'day': dt.day, 'year': dt.year, 'datetime':datetime.now(), 'status': 'Valid'},\n {'username': username, 'year': dt.year, 'month': dt.month, 'day': dt.day, 'year': dt.year, 'datetime':datetime.now(), 'status': 'Valid'},\n {'username': username, 'year': dt.year, 'month': dt.month, 'day': dt.day, 'year': dt.year, 'datetime':datetime.now(), 'status': 'Valid'},\n {'username': username, 'year': dt.year, 'month': dt.month, 'day': dt.day, 'year': dt.year, 'datetime':datetime.now(), 'status': 'Valid'},\n {'username': username, 'year': dt.year, 'month': dt.month, 'day': dt.day, 'year': dt.year, 'datetime':datetime.now(), 'status': 'Valid'},\n {'username': username, 'year': dt.year, 'month': dt.month, 'day': dt.day, 'year': dt.year, 'datetime':datetime.now(), 'status': 'Valid'}\n ]\n \n return stub_events\n\nif __name__ == '__main__':\n external_api_root_url = 'http://external.api.address/'\n users_activites = get_users_activites(external_api_root_url)\n dt = date(year=2019, month=2, day=6)\n\n for users_activity in users_activites:\n username = users_activity['LoginName']\n events = get_events(external_api_root_url, username, dt)\n\n print (events)\n", "id": "6597813", "language": "Python", "matching_score": 1.9846656322479248, "max_stars_count": 5, "path": "attendance_extension/attendance_extension/doctype/attendance_settings/external_api.py" }, { "content": "from frappe.utils.background_jobs import enqueue\nfrom frappe import get_single, get_all, get_doc\nfrom attendance_extension.attendance_extension.doctype.attendance_settings.external_api import get_user_map, get_events\nfrom datetime import datetime, timedelta, date\n\ndef update_attendance_from_external_api(start_date, is_async=True, queue='default'):\n external_api_root_url = get_single('Attendance Settings').external_api_url\n employees = get_all_active_employees()\n employees = {e['company_email'].strip():e for e in employees}\n external_email_map, _ = get_user_map(external_api_root_url)\n\n for email, username in external_email_map.items():\n enqueue(\n 'attendance_extension.attendance_extension.doctype.attendance_settings.tools.update_employee_attendance_from_external_api_internal', \n queue=queue, \n is_async=is_async, \n start_date=start_date, \n email=email, \n external_username=username, \n external_api_root_url=external_api_root_url)\n\ndef update_employee_attendance(start_date, email):\n external_api_root_url = get_single('Attendance Settings').external_api_url\n external_email_map, _ = get_user_map(external_api_root_url)\n if email not in external_email_map.keys():\n return #ToDo: fix hack. not all external users may have email\n update_employee_attendance_internal(\n start_date, \n email, \n external_email_map[email], \n external_api_root_url)\n\ndef update_employee_attendance_from_external_api_internal(start_date, email, external_username, external_api_root_url=None, include_attendance=True):\n if not external_api_root_url:\n external_api_root_url = get_single('Attendance Settings').external_api_url\n\n if isinstance(start_date, datetime):\n start_date = start_date.date()\n\n past_days = (date.today() - start_date).days + 1\n\n emp_ = get_employee(email)\n if not emp_:\n return \n employee, company = emp_\n\n for days in range(past_days):\n time_logs = []\n dt = (start_date + timedelta(days = days))\n events = get_events(external_api_root_url, external_username, dt)\n if events and len(events) > 0:\n for event in events:\n if event['Status'] != 'Valid' and event['Status'] != 'Check-in':\n continue\n event_timestamp = event['datetime'].isoformat(sep=' ')\n\n if len(time_logs) == 0 or \"to_time\" in time_logs[-1]:\n time_logs.append({})\n \n if \"from_time\" not in time_logs[-1]:\n time_logs[-1][\"from_time\"] = event_timestamp\n else:\n time_logs[-1][\"to_time\"] = event_timestamp\n\n if len(time_logs) != 0 and \"to_time\" not in time_logs[-1]:\n time_logs[-1][\"to_time\"] = None\n\n update_attendance(\n employee, \n (start_date + timedelta(days = days)).strftime('%Y-%m-%d'), \n company, \n time_logs)\n\ndef add_attendance(employee, date, time_logs, company):\n d_ = {'doctype': 'Attendance',\n 'attendance_date': date,\n 'company': company,\n 'employee': employee,\n 'time_logs': time_logs}\n\n attendance = get_doc(d_)\n attendance.insert()\n attendance.submit()\n\n return attendance\n\ndef get_employee(email, company=None):\n filters = {'company_email': email}\n if company:\n filters['company'] = company\n\n for doc in get_all('Employee', filters=filters, fields=['employee', 'company']):\n return doc.employee, doc.company\n\ndef get_all_active_employees(company=None):\n filters = {'status': 'Active'}\n if company:\n filters['company'] = company\n\n return get_all('Employee', filters=filters, fields=['employee', 'company', 'company_email'])\n\ndef find_attendance(employee, date, company=None):\n params = {'employee':employee, 'attendance_date':date}\n if company:\n params['company'] = company\n doc = get_all('Attendance', filters=params, fields=['name'])\n if len(doc) > 0:\n return get_doc('Attendance', doc[0]['name'])\n\ndef update_attendance(employee, start_date_str, company, time_logs):\n attendance = find_attendance(employee, start_date_str, company)\n\n if attendance:\n attendance.update({'time_logs': time_logs})\n attendance.save()\n else:\n add_attendance(employee, start_date_str, time_logs, company)\n", "id": "754495", "language": "Python", "matching_score": 3.3522722721099854, "max_stars_count": 5, "path": "attendance_extension/attendance_extension/doctype/attendance_settings/tools.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport frappe \nfrom datetime import datetime, date, timedelta\nfrom attendance_extension.attendance_extension.doctype.attendance_settings.tools import update_attendance_from_external_api\n\[email protected]()\ndef test():\n hourly(is_async=False)\n\ndef hourly(is_async=True):\n start_date = datetime.now()\n update_attendance_from_external_api(start_date, queue='default', is_async=is_async)\n\ndef daily():\n start_date = datetime.now() - timedelta(weeks=1)\n update_attendance_from_external_api(start_date, queue='long')\n\ndef weekly():\n start_date = datetime.now() - timedelta(weeks=4)\n update_attendance_from_external_api(start_date, queue='long')\n\ndef monthly():\n start_date = datetime.now() - timedelta(weeks=52)\n update_attendance_from_external_api(start_date, queue='long')\n", "id": "2319314", "language": "Python", "matching_score": 2.39463472366333, "max_stars_count": 5, "path": "attendance_extension/attendance_extension/doctype/attendance_settings/tasks.py" }, { "content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\nfrom __future__ import unicode_literals\n\nimport frappe, unittest\nfrom attendance_extension.attendance_extension.doctype.attendance_settings.tasks import test\n\nclass TestAttendance(unittest.TestCase):\n\tdef test_attendance_hourly(self):\n\t\ttest()\n", "id": "3781904", "language": "Python", "matching_score": 1.7229044437408447, "max_stars_count": 5, "path": "attendance_extension/tests/test_attendance.py" }, { "content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# MIT License. See license.txt\n\nfrom __future__ import unicode_literals\n\nimport frappe\nfrom werkzeug.wrappers import Request\nfrom werkzeug.test import EnvironBuilder\n\ndef set_request(**kwargs):\n\tbuilder = EnvironBuilder(**kwargs)\n\tfrappe.local.request = Request(builder.get_environ())\n\ndef insert_test_data(doctype, sort_fn=None):\n\tpass\n\n", "id": "305939", "language": "Python", "matching_score": 0.0412667915225029, "max_stars_count": 5, "path": "attendance_extension/tests/__init__.py" }, { "content": "import numpy as np\nimport pylab as pl\nfrom pydelay import dde23\n\ntfinal = 10000\ntau = 1000\n\n#the laser equations\neqns = { \n 'E:c': '0.5*(1.0+ii*a)*E*n + K*E(t-tau)',\n 'n' : '(p - n - (1.0 +n) * pow(abs(E),2))/T'\n}\n\nparams = { \n 'a' : 4.0, \n 'p' : 1.0, \n 'T' : 200.0, \n 'K' : 0.1, \n 'tau': tau,\n 'nu' : 10**-5,\n 'n0' : 10.0\n}\n\nnoise = { 'E': 'sqrt(0.5*nu*(n+n0)) * (gwn() + ii*gwn())' }\n\ndde = dde23(eqns=eqns, params=params, noise=noise)\ndde.set_sim_params(tfinal=tfinal)\n\n# use a dictionary to set the history\nthist = np.linspace(0, tau, tfinal)\nEhist = np.zeros(len(thist))+1.0\nnhist = np.zeros(len(thist))-0.2\ndic = {'t' : thist, 'E': Ehist, 'n': nhist}\n\n# 'useend' is True by default in hist_from_dict and thus the \n# time array is shifted correctly\ndde.hist_from_arrays(dic)\n\ndde.run()\n\nt = dde.sol['t']\nE = dde.sol['E']\nn = dde.sol['n']\n\nspl = dde.sample(-tau, tfinal, 0.1)\n\npl.plot(t[:-1], t[1:] - t[:-1], '0.8', label='step size')\npl.plot(spl['t'], abs(spl['E']), 'g', label='sampled solution')\npl.plot(t, abs(E), '.', label='calculated points')\npl.legend()\n\npl.xlabel('$t$')\npl.ylabel('$|E|$')\n\npl.xlim((0.95*tfinal, tfinal))\npl.ylim((0,3))\npl.show()\n", "id": "6269737", "language": "Python", "matching_score": 4.488194942474365, "max_stars_count": 1, "path": "pydelay/doc/pyplots/lk.py" }, { "content": "import numpy as np\nimport pylab as pl\nfrom pydelay import dde23\nimport time\n\neqns = { 'E:C': '0.5*(1.0+ii*a)*E*n + K*E(t-tau)',\n 'n' : '(p - n - (1.0 +n) * pow(abs(E),2))/T'}\n\nparams = { 'a' : 4.0, \n 'p' : 1.0, \n 'T' : 1000.0, \n 'K' : 0.1, \n 'tau': 1000,\n 'nu' : 10**-5,\n 'n0' : 10.0\n }\n\nnoise = { 'E': 'sqrt(0.5*nu*(n+n0)) * (gwn() + ii*gwn())' }\n\ndde = dde23(eqns=eqns, params=params, noise=noise)\n\ntfinal = 20000\ndde.set_sim_params(tfinal=tfinal)\n\n# use a dictionary to set the history\nthist = np.linspace(0, 1000, 10000)\nEhist = np.sin(0.01*thist)*np.exp(1.0j*0.001*thist)\nnhist = np.sin(0.01*thist)-1\n\ndic = {'t' : thist, 'E': Ehist, 'n': nhist}\ndde.hist_from_arrays(dic)\n\ndde.run()\n\nt = dde.sol['t']\nE = dde.sol['E']\nn = dde.sol['n']\n\nspl = dde.sample(-1000, 20000, 0.1)\n\npl.plot(t, abs(E), '.', label='calculated points')\npl.plot(spl['t'], abs(spl['E']), 'g', label='spline interpolation')\npl.plot(t[:-1], t[1:] - t[:-1], 'k', label='step size')\npl.legend()\n\npl.xlim((-1000, tfinal))\npl.show()\n", "id": "2952036", "language": "Python", "matching_score": 2.4121291637420654, "max_stars_count": 1, "path": "pydelay/examples/lk.py" }, { "content": "import numpy as np\nimport pylab as pl\n\n# import the solver\nfrom pydelay import dde23\n\n# define the FHN equations as in \n# <NAME>. , <NAME>., <NAME>. and <NAME>. , Dynamics of delay-coupled excitable neural systems, Int. J. Bifur. Chaos 19, 745 (2009)\neqns = { \n 'x1': '(x1 - pow(x1,3)/3.0 - y1 + C*(x2(t-tau) - x1))/eps',\n 'y1': 'x1 + a',\n 'x2': '(x2 - pow(x2,3)/3.0 - y2 + C*(x1(t-tau) - x2))/eps',\n 'y2': 'x2 + a'\n }\n\n# set the parameters and the delay\nparams = { \n 'a': 1.3,\n 'eps': 0.01,\n 'C': 0.5,\n 'tau': 3.0\n }\n\n\n# initalise the solver\ndde = dde23(eqns=eqns, params=params)\n\n# set the simulation parameters\ndde.set_sim_params(tfinal=200)\n\n# When nothing is specified, the history for all variables \n# is initialized to 0.\n#\ndde.hist_from_funcs({'x1': lambda t: 1.0})\n\n# run the simulation\ndde.run()\n\n# sample the solution with sample size dt=0.01 between 170 and 200\nsol = dde.sample(170, 200, 0.01)\n\n# plot the solution\nx1 = sol['x1']\ny1 = sol['y1']\nx2 = sol['x2']\ny2 = sol['y2']\nt = sol['t']\n\npl.subplot(221)\npl.plot(t, x1, 'r')\npl.plot(t, y1, 'g')\npl.xlabel('$t$')\npl.ylabel('$x_1, y_1$')\n\npl.subplot(222)\npl.plot(x1, x2, 'r')\npl.xlabel('$x_1$')\npl.ylabel('$x_2$')\n\npl.subplot(223)\npl.plot(t, x2, 'r')\npl.plot(t, y2, 'g')\npl.xlabel('$t$')\npl.ylabel('$x_2, y_2$')\n\npl.subplot(224)\npl.plot(y1, y2, 'g')\npl.xlabel('$y_2$')\npl.ylabel('$y_2$')\n\npl.show()\n", "id": "12446918", "language": "Python", "matching_score": 3.6223762035369873, "max_stars_count": 1, "path": "pydelay/examples/fhn.py" }, { "content": "import numpy as np\nimport pylab as pl\n\n# import the solver\nfrom pydelay import dde23\n\neqns = { \n 'x': '-x + k*x(t-10) + A* f(w,t)'\n }\n\nparams = { \n 'k': 0.1,\n 'w': 2.0,\n 'A': 0.5\n }\n\n# We can define a c function to be used in the equations\nmycode = \"\"\"\n double f(double w, double t) {\n return sin(w*t);\n }\n \"\"\"\n\n# initalise the solver\ndde = dde23(eqns=eqns, params=params, supportcode=mycode)\n\n# set the simulation parameters\ndde.set_sim_params(tfinal=40)\n\n# we can define the history as a python function\ndef myhist(t):\n return 0.01*t**2\n\ndde.hist_from_funcs({'x': myhist})\n\n# run the simulation\ndde.run()\n\nsol = dde.sample(0.01)\nt = sol['t']\nx = sol['x']\n\npl.plot(t, x)\npl.xlim((0,40))\npl.xlabel('$t$')\npl.ylabel('$x$')\npl.ylim((x.min(), x.max()))\npl.show()\n", "id": "5976652", "language": "Python", "matching_score": 3.21236252784729, "max_stars_count": 1, "path": "pydelay/examples/ccode_example.py" }, { "content": "# import pydelay and numpy and pylab\nimport numpy as np\nimport pylab as pl\nfrom pydelay import dde23\n\n# define the equations\neqns = { \n 'x' : '0.25 * x(t-tau) / (1.0 + pow(x(t-tau),p)) -0.1*x' \n }\n\n#define the parameters\nparams = {\n 'tau': 15,\n 'p' : 10\n }\n\n# Initialise the solver\ndde = dde23(eqns=eqns, params=params)\n\n# set the simulation parameters \n# (solve from t=0 to t=1000 and limit the maximum step size to 1.0)\ndde.set_sim_params(tfinal=1000, dtmax=1.0)\n\n# set the history of to the constant function 0.5 (using a python lambda function)\nhistfunc = {\n 'x': lambda t: 0.5 \n } \ndde.hist_from_funcs(histfunc, 51)\n\n# run the simulator\ndde.run()\n\n# Make a plot of x(t) vs x(t-tau):\n# Sample the solution twice with a stepsize of dt=0.1:\n\n# once in the interval [515, 1000]\nsol1 = dde.sample(515, 1000, 0.1)\nx1 = sol1['x']\n\n# and once between [500, 1000-15]\nsol2 = dde.sample(500, 1000-15, 0.1)\nx2 = sol2['x']\n\npl.plot(x1, x2)\npl.xlabel('$x(t)$')\npl.ylabel('$x(t - 15)$')\npl.show()\n", "id": "4744192", "language": "Python", "matching_score": 3.867276191711426, "max_stars_count": 1, "path": "pydelay/doc/pyplots/mackey-glass.py" }, { "content": "import numpy as np\nimport pylab as pl\nfrom pydelay import dde23\n\neqns = { 'x' : '0.25 * x(t-tau) / (1.0 + pow(x(t-tau),10.0)) -0.1*x' }\n\ndde = dde23(eqns=eqns, params={'tau': 15})\ndde.set_sim_params(tfinal=1000, dtmax=1.0, AbsTol=10**-6, RelTol=10**-3)\n\nhistfunc = {'x': lambda t: 0.5 } \ndde.hist_from_funcs(histfunc, 51)\ndde.run()\n\nsol1 = dde.sample(515, 1000, 0.1)\nx1 = sol1['x']\nsol2 = dde.sample(500, 1000-15, 0.1)\nx2 = sol2['x']\n\npl.plot(x1, x2)\npl.xlabel('$x(t)$')\npl.ylabel('$x(t-15)$')\n\npl.show()\n", "id": "4555678", "language": "Python", "matching_score": 0.14955143630504608, "max_stars_count": 1, "path": "pydelay/examples/mackey-glass.py" }, { "content": "# Copyright (C) 2009 <NAME>\n\nimport re\nimport numbers\nimport numpy as np\n\ndef gen_disconts(t0, t1, delays, initdisconts=None, order=3, rounddigits=5):\n \"\"\"Generate a list of all possible discontinuities in the range [t0, t1]\n up to 'order' with initial discontinuities given by 'initdisconts'.\n The adaptive step size methods should step on the discontinuity points.\n\n 'delays' can be a dictionary or a list of numbers.\n\n >>> gen_disconts(0, 100, [2.0, 4.0], [-10, 0], 3)\n array([ 0., 2., 4., 6., 8., 10., 12.])\n\n >>> gen_disconts(0, 100, [3.0, 5.0], [0], 2)\n array([ 0., 3., 5., 6., 8., 10.])\n \"\"\"\n\n delays = np.unique(np.array(delays))\n\n if initdisconts == None:\n newdis = delays.copy()\n order -= 1\n else:\n initdisconts = np.unique(np.array(initdisconts))\n newdis = initdisconts\n\n alldis = np.array([round(dis,rounddigits) for dis in newdis])\n alldis = alldis[np.where((alldis>=t0) & (alldis<=t1))]\n\n for o in range(order):\n tempdis = newdis\n l = tempdis.size\n newdis = np.empty([ l * len(delays),])\n i=0\n for delay in delays:\n newdis[i:i+l] = tempdis + delay\n i += l\n\n newdis = np.unique(newdis)\n\n # rounding via numpy, very fast, but a bit different\n #x = np.round(newdis,rounddigits)\n # rounding, the old slow way to not change known behavior\n x = np.array([round(dis,rounddigits) for dis in newdis])\n\n x = x[np.where((x>=t0) & (x<=t1))]\n\n alldis = np.concatenate((alldis, x))\n\n # unique() already sorts!\n alldis = np.unique(alldis)\n\n return alldis\n\ndef _symbols_allowedQ(eqn):\n \"\"\"Takes an eqn string and returns True if all symbols\n are allowed and False otherwise\n \n >>> map(_symbols_allowedQ, ['2.0*sin(x)* 3/2 + 1-2', 'a**b', 'a^b', 'a{b', 'a}b'])\n [True, False, False, False, False]\n >>> map(_symbols_allowedQ, ['a[b', 'a]b', 'a&c', 'a#b', 'a!b', 'a@b', 'a$b', 'a%b'])\n [False, False, False, False, False, False, False, False]\n >>> map(_symbols_allowedQ, ['a?b', 'a;b', 'a>b', '|', \"\\\\\\\\\"])\n [False, False, False, False, False]\n \"\"\"\n ForbiddenPattern =\\\n r'(\\*\\*|\\^|\\{|\\}|\\[|\\]|\\&|\\#|\\!|\\@|\\$|\\%|\\?|\\;|\\>|\\<|\\||\\\\)'\n res = re.search(ForbiddenPattern, eqn)\n if res:\n return False\n else:\n return True\n\ndef _parenthesis_balancedQ(eqn):\n \"\"\"Takes an eqn string and return True if parenthesis\n are balanced and False otherwise\n\n >>> map(_parenthesis_balancedQ,['(fdjd)*d((2-1)+x*2)', 'fs*(1-(x*2*(a+b))', 'dfs * (x-2) + b)'])\n [True, False, False]\n \"\"\"\n # 'opened_parenthesis' is increased, with '(' and decreased with ')'\n # and should be zero at the end (and never negative)\n opened_parenthesis = 0\n for ch in eqn:\n if ch == '(':\n opened_parenthesis += 1\n elif ch in ')':\n if opened_parenthesis == 0:\n return False\n else:\n opened_parenthesis -= 1\n if opened_parenthesis == 0:\n return True\n else:\n return False\n\ndef assure_array(array_candidate):\n \"\"\"If array_candidate is a tuple, list or array cast it to a numpy array \n and return it, else assert an error\"\"\"\n assert isinstance(array_candidate, (tuple, list, np.ndarray)),\\\n \"Error: this should be a list, tuple or array\"\n return np.array(array_candidate)\n\ndef isrealnum(real_candidate):\n \"\"\"\n >>> map(isrealnum, [0.1, 0.2, 0.1+0.2j, \"a\", (1.0, 2.0)])\n [True, True, False, False, False]\n \"\"\"\n if not isinstance(real_candidate, numbers.Number):\n return False\n if not hasattr(real_candidate, 'imag'):\n return True\n return real_candidate.imag == 0\n\ndef isnum(num_candidate):\n \"\"\"\n >>> map(isnum, [0.1, 0.2, 0.1+0.2j, \"a\", (1.0, 2.0)])\n [True, True, True, False, False]\n \"\"\"\n return isinstance(num_candidate, numbers.Number)\n\ngwn_code = \"\"\"\ndouble ra01()\n{ \n return(double(rand())/RAND_MAX);\n}\n\ndouble gwn()\n{\n static int iset=0;\n static double gset;\n\n double fac,rsq,v1,v2;\n\n if(iset==0) \n {\n do \n {\n v1=2.0*ra01()-1.;\n v2=2.0*ra01()-1.;\n rsq=v1*v1+v2*v2;\n } \n while(rsq>1.0 || rsq==0);\n\n fac=sqrt(-2.0*log(rsq)/rsq);\n gset=v1*fac;\n iset=1;\n return v2*fac;\n } \n else \n {\n iset=0;\n return gset;\n }\n}\n\"\"\"\n\nif __name__== '__main__':\n import doctest\n doctest.testmod()\n", "id": "10106611", "language": "Python", "matching_score": 0.6436701416969299, "max_stars_count": 1, "path": "pydelay/helper.py" }, { "content": "'''\nDefines the set of symbols used in text input to the model.\n\nThe default is a set of ASCII characters that works well for English or text that has been run\nthrough Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details.\n'''\nfrom . import cmudict\n\n_pad = '_'\n_eos = '~'\n_characters = 'AĄBCČDEĘĖFGHIĮJKLMNOPQRSŠTUŲŪVWXYZŽaąbcčdeęėfghiįjklmnopqrsštuųūvwxyzž!\\'(),-.:;? 2345'\n\n# Export all symbols:\nsymbols = [_pad, _eos] + list(_characters)\n", "id": "9704511", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tacotron/utils/symbols.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom ctypes import *\nfrom platform import uname, architecture\nfrom os.path import join, dirname\nfrom struct import unpack\n\n_supported_os = ['Windows', 'Linux']\n_supported_architecture = ['64bit', '32bit']\n\n_os = uname()[0]\n_architecture = architecture()[0]\n\nif _os not in _supported_os or _architecture not in _supported_architecture:\n msgs = []\n if _os not in _supported_os:\n msgs.append('%s OS not supported. Must be one of: %s. ' % (_os, str(_supported_os)))\n if _architecture not in _supported_architecture:\n msgs.append('%s CPU architecture not supported. Must be one of: %s.' % (_os, str(_supported_architecture)))\n \n raise Exception(' '.join(msgs))\n\nfolder = ''\nprefix = ''\nif _os == \"Windows\":\n if _architecture == '64bit':\n folder = 'Win64_x64'\n if _architecture == '32bit':\n folder = 'Win32_x86'\n lib_ext = \".dll\"\nelif _os == \"Linux\":\n if _architecture == '64bit':\n folder = 'Linux_x86_64'\n if _architecture == '32bit':\n folder = 'Linux_x86'\n lib_ext = \".so\"\n prefix = 'lib'\n\n_native_encoding = 'windows-1257'\n\n_lib_path = join(dirname(__file__), folder, prefix + \"PhonologyEngine\" + lib_ext)\n_PhonologyEngineLibrary = CDLL(_lib_path)\n\n_PhonologyEngineInit = _PhonologyEngineLibrary.PhonologyEngineInit\n\n_PhonologyEngineNozmalizeText = _PhonologyEngineLibrary.PhonologyEngineNormalizeText#(char * szText, NormalizedTextHandle * pHandle);\n_PhonologyEngineNozmalizeText.argtypes = [c_char_p, POINTER(c_void_p)]\n\n_PhonologyEngineNormalizedTextFree = _PhonologyEngineLibrary.PhonologyEngineNormalizedTextFree#(NormalizedTextHandle * pHandle);\n_PhonologyEngineNormalizedTextFree.argtypes = [POINTER(c_void_p)]\n\n_PhonologyEngineNormalizedTextGetPhraseCount = _PhonologyEngineLibrary.PhonologyEngineNormalizedTextGetPhraseCount#(NormalizedTextHandle handle, int * pValue);\n_PhonologyEngineNormalizedTextGetPhraseCount.argtypes = [c_void_p, POINTER(c_int)]\n\n_PhonologyEngineNormalizedTextGetPhrase = _PhonologyEngineLibrary.PhonologyEngineNormalizedTextGetPhrase#(NormalizedTextHandle handle, int index, char ** pSzValue);\n_PhonologyEngineNormalizedTextGetPhrase.argtypes = [c_void_p, c_int, POINTER(c_char_p)]\n\n_PhonologyEngineNormalizedTextGetPhraseLetterMap = _PhonologyEngineLibrary.PhonologyEngineNormalizedTextGetPhraseLetterMap#(NormalizedTextHandle handle, int index, int ** pArValue, int * pCount)\n_PhonologyEngineNormalizedTextGetPhraseLetterMap.argtypes = [c_void_p, c_int, POINTER(POINTER(c_int)), POINTER(c_int)]\n\n_PhonologyEngineProcessPhrase = _PhonologyEngineLibrary.PhonologyEngineProcessPhrase#(char * szNormalizedText, PhonologyEngineOutputHandle * pHandle);\n_PhonologyEngineProcessPhrase.argtypes = [c_char_p, POINTER(c_void_p)]\n\n_PhonologyEngineOutputFree = _PhonologyEngineLibrary.PhonologyEngineOutputFree#(PhonologyEngineOutputHandle * pHandle);\n_PhonologyEngineOutputFree.argtypes = [POINTER(c_void_p)]\n\n_PhonologyEngineOutputGetWordCount = _PhonologyEngineLibrary.PhonologyEngineOutputGetWordCount#(PhonologyEngineOutputHandle hOutput, int * pValue);\n_PhonologyEngineOutputGetWordCount.argtypes = [c_void_p, POINTER(c_int)]\n\n_PhonologyEngineOutputGetWord = _PhonologyEngineLibrary.PhonologyEngineOutputGetWord#(PhonologyEngineOutputHandle hOutput, int wordIndex, char ** pszValue);\n_PhonologyEngineOutputGetWord.argtypes = [c_void_p, c_int, POINTER(c_char_p)]\n\n_PhonologyEngineOutputGetWordSyllables = _PhonologyEngineLibrary.PhonologyEngineOutputGetWordSyllables#(PhonologyEngineOutputHandle hOutput, int wordIndex, char ** pszValue);\n_PhonologyEngineOutputGetWordSyllables.argtypes = [c_void_p, c_int, POINTER(c_char_p)]\n\n_PhonologyEngineOutputGetWordStressOptionCount = _PhonologyEngineLibrary.PhonologyEngineOutputGetWordStressOptionCount#(PhonologyEngineOutputHandle hOutput, int wordIndex, int * pValue);\n_PhonologyEngineOutputGetWordStressOptionCount.argtypes = [c_void_p, c_int, POINTER(c_int)]\n\n_PhonologyEngineOutputGetWordStressOptionSelected = _PhonologyEngineLibrary.PhonologyEngineOutputGetWordStressOptionSelected#(PhonologyEngineOutputHandle hOutput, int wordIndex, int * pValue);\n_PhonologyEngineOutputGetWordStressOptionSelected.argtypes = [c_void_p, c_int, POINTER(c_int)]\n\n_PhonologyEngineOutputGetWordStressOption = _PhonologyEngineLibrary.PhonologyEngineOutputGetWordStressOption#(PhonologyEngineOutputHandle hOutput, int wordIndex, int optionIndex, int * pLetterIndex, int * pStressType, int * pVocabulary, int * pGrammarRule)\n_PhonologyEngineOutputGetWordStressOption.argtypes = [c_void_p, c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)]\n\ndef _check(res):\n if res != 0:\n raise Exception(\"_PhonologyEngineInit failed: %d\" % res)\n\n_check( _PhonologyEngineInit() )\n\ndef phonology_engine_normalize_text(text):\n handle = c_void_p()\n cs = c_char_p(text.encode(_native_encoding))\n\n _check( _PhonologyEngineNozmalizeText( cs, byref(handle) ) )\n\n return handle\n\ndef phonology_engine_normalized_text_free(handle):\n _check( _PhonologyEngineNormalizedTextFree( byref(handle) ) )\n\ndef phonology_engine_normalized_text_get_phrase_count(handle):\n value = c_int(0)\n\n _check( _PhonologyEngineNormalizedTextGetPhraseCount( handle, byref(value) ) )\n\n return value.value\n\ndef phonology_engine_normalized_text_get_phrase(handle, index):\n cs = c_char_p()\n\n _check( _PhonologyEngineNormalizedTextGetPhrase( handle, c_int(index), byref(cs) ) )\n\n if not cs.value:\n return ''\n return cs.value.decode(_native_encoding)\n\ndef phonology_engine_normalized_text_get_phrase_letter_map(handle, index):\n ci = POINTER(c_int)()\n c = c_int(0)\n\n _check( _PhonologyEngineNormalizedTextGetPhraseLetterMap( handle, c_int(index), byref(ci), byref(c) ) )\n\n return [ci[i] for i in range(c.value)]\n\ndef phonology_engine_process_phrase(text):\n\n handle = c_void_p()\n cs = c_char_p(text.encode(_native_encoding))\n\n _check( _PhonologyEngineProcessPhrase( cs, byref(handle) ) )\n\n return handle\n\ndef phonology_engine_output_free(handle):\n _check( _PhonologyEngineOutputFree( byref(handle) ) )\n\ndef phonology_engine_output_get_word_count(handle):\n value = c_int(0)\n _check( _PhonologyEngineOutputGetWordCount(handle, byref(value)))\n return value.value\n\ndef phonology_engine_output_get_word(handle, word_index):\n cs = c_char_p()\n _check( _PhonologyEngineOutputGetWord(handle, c_int(word_index), byref(cs)) ) \n return cs.value.decode(_native_encoding)\n\ndef phonology_engine_output_get_word_syllables(handle, word_index):\n cs = c_char_p()\n\n _check( _PhonologyEngineOutputGetWordSyllables(handle, c_int(word_index), byref(cs)) ) \n\n values = []\n if cs.value:\n values = [unpack(\"<b\", v)[0] if not isinstance(v, int) else v for v in cs.value.split(b'\\n')[0]]\n\n indeces = []\n \n if len(values) > 1 and values[-1] == 2:\n values = values[0:-1]\n\n for i, v in enumerate(values):\n if v == 2:\n indeces.append(i)\n\n return indeces\n\ndef _get_case_name(case):\n if case >= 7:\n case -= 7\n\n if case == 0:\n return u'Vardininkas'#, 'Nominativus'\n elif case == 1:\n return u'Kilmininkas'#, 'Genitivus'\n elif case == 2:\n return u'Naudininkas'#, 'Dativus'\n elif case == 3:\n return u'Galininkas'#, 'Accusativus'\n elif case == 4:\n return u'Įnagininkas'#, 'Instrumentalis'\n elif case == 5:\n return u'Vietininkas'#, 'Locativus'\n elif case == 6:\n return u'Šauksmininkas'#, 'Vocativus'\n else:\n return 'UNKNOWN'\n\ndef phonology_engine_output_decode_option(option):\n stressed_letter_index, stress_type, vocabulary, value = option\n\n if not isinstance(value, int):\n raise Exception(\"Invalide value type, must be int\")\n\n result = {}\n if vocabulary == 0:\n result['rule'] = u'Veiksmazodžių kamienas ir galune (taisytina)'\n if vocabulary == 1:\n result['rule'] = u'Nekaitomas žodis'\n if vocabulary == 2:\n result['rule'] = u'Linksnis ir kamieno tipas'\n\n stem_type = value >> 8\n case = value - (stem_type << 8)\n\n if case >= 0 and case <= 6:\n result['number'] = 'vienaskaita'\n elif case >= 0 and case <= 12:\n result['number'] = 'daugiskaita'\n\n result['stem_type'] = stem_type\n result['grammatical_case'] = _get_case_name(case)\n result['stressed_letter_index'] = stressed_letter_index\n result['stress_type'] = stress_type\n \n return result\n\ndef phonology_engine_output_get_word_stress_options(handle, word_index):\n count, selected_index = c_int(0), c_int(0)\n\n _check( _PhonologyEngineOutputGetWordStressOptionCount(handle, c_int(word_index), byref(count)) ) \n _check( _PhonologyEngineOutputGetWordStressOptionSelected(handle, c_int(word_index), byref(selected_index)) ) \n\n count = count.value\n\n selected_index = selected_index.value if count > 0 else None\n\n options = {'selected_index': selected_index, 'options': [], 'decoded_options': []}\n\n for i in range(count):\n letter_index, stress_type, vocabulary, grammar_rule = c_int(0), c_int(0), c_int(0), c_int(0)\n \n _check( _PhonologyEngineOutputGetWordStressOption(handle, c_int(word_index), i, byref(letter_index), byref(stress_type), byref(vocabulary) , byref(grammar_rule)) )\n\n letter_index, stress_type, vocabulary, grammar_rule = letter_index.value, stress_type.value, vocabulary.value, grammar_rule.value\n\n option = (letter_index, stress_type, vocabulary, grammar_rule)\n options['options'].append(option)\n\n decoded_option = phonology_engine_output_decode_option(option)\n options['decoded_options'].append(decoded_option)\n\n return options", "id": "10065844", "language": "Python", "matching_score": 3.3995163440704346, "max_stars_count": 15, "path": "phonology_engine/pe_native.py" }, { "content": "from __future__ import with_statement\nfrom . import pe_native\n\n_syllable_chars = '-'\n\n_numeric_stress_map = {\n 0: '0',\n 1: '1',\n 2: '2',\n 3: '1'\n}\n\n_utf8_stress_map = {\n 0: u'\\u0300', # grave\n 1: u'\\u0301', # acute\n 2: u'\\u0303', # tilde\n 3: u'\\u0301', # acute\n}\n\n_ascii_stress_map = {\n 0: \"`\", # grave\n 1: \"^\", # acute - no printable acute accent in ascii table only in extended ASCII:239\n 2: \"~\", # tilde\n 3: \"^\", # acute\n}\n\n_stress_ascii_chars = _ascii_stress_map.values()\n\nclass PhonologyEngineNormalizedPhrases:\n def __init__(self, handle, remove_stress_chars=True, remove_syllable_chars=True):\n self.handle = handle\n\n self.current = 0\n self.max = self.get_phrase_count() - 1\n\n self.remove_stress_chars = remove_stress_chars\n self.remove_syllable_chars = remove_syllable_chars\n\n def __del__(self):\n pe_native.phonology_engine_normalized_text_free(self.handle)\n \n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n del self\n\n def __iter__(self):\n return self\n\n def __next__(self): # Python 3\n return self.next()\n\n def next(self): # Python 2\n if self.current > self.max:\n raise StopIteration\n else:\n value = self.get_phrase(self.current)\n letter_map = self.get_phrase_letter_map(self.current)\n\n if self.remove_stress_chars:\n remove_indeces = []\n for i, c in enumerate(value):\n if c in _stress_ascii_chars:\n remove_indeces.append(i)\n\n if self.remove_syllable_chars:\n for i, c in enumerate(value):\n if c in _syllable_chars:\n remove_indeces.append(i)\n\n remove_indeces.sort(reverse=True)\n\n for i in remove_indeces:\n value = value[:i] + value[i+1:]\n letter_map = letter_map[:i] + letter_map[i+1:]\n\n self.current += 1\n return value, letter_map\n \n def get_phrase_count(self):\n return pe_native.phonology_engine_normalized_text_get_phrase_count(self.handle)\n \n def get_phrase(self, index):\n return pe_native.phonology_engine_normalized_text_get_phrase(self.handle, index)\n \n def get_phrase_letter_map(self, index):\n return pe_native.phonology_engine_normalized_text_get_phrase_letter_map(self.handle, index)\n\nclass PhonologyEngineOutput:\n def __init__(self, handle):\n self.handle = handle\n\n def __del__(self):\n pe_native.phonology_engine_output_free(self.handle)\n \n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n del self\n\n def get_word_count(self):\n return pe_native.phonology_engine_output_get_word_count(self.handle)\n\n def get_word(self, index, include_syllables=False):\n if index >= self.get_word_count():\n raise Exception('Value out of bounds')\n\n word = pe_native.phonology_engine_output_get_word(self.handle, index)\n syllables = self.get_word_syllables(index)\n \n if include_syllables and syllables:\n res = []\n for i in range(len(word)):\n val = word[i]\n for j in syllables:\n if j == 0:\n continue\n if j == i:\n val = '-' + val\n res.append(val)\n return ''.join(res)\n else:\n return word\n\n\n def get_word_syllables(self, index):\n if index >= self.get_word_count():\n raise Exception('Value out of bounds')\n\n return pe_native.phonology_engine_output_get_word_syllables(self.handle, index)\n\n def get_word_stress_options(self, index):\n if index >= self.get_word_count():\n raise Exception('Value out of bounds')\n\n return pe_native.phonology_engine_output_get_word_stress_options(self.handle, index)\n\n def get_word_with_stress_and_syllables(self, word_index, stress_map, stress_option_index=None, only_multiple=False):\n stress_options = self.get_word_stress_options(word_index)['options']\n if stress_option_index != None:\n stress_options = [stress_options[stress_option_index]] \n\n res = []\n \n word = self.get_word(word_index, include_syllables=False)\n syllable_indeces = self.get_word_syllables(word_index)\n\n if not syllable_indeces:\n syllable_indeces = [0]\n syllable_indeces.append(len(word))\n stress_count = 0\n\n for i in range(len(word)):\n letter = word[i]\n stresses = set([])\n for oi in range(len(stress_options)):\n stress_option = stress_options[oi]\n\n if stress_option and i == stress_option[0]:\n stresses.add(stress_map[stress_option[1]])\n\n stress_count += len(stresses)\n res.append(letter + ''.join(stresses))\n \n if only_multiple and stress_count <= 1:\n res = word\n\n syllables = []\n for i,j in zip(syllable_indeces[:-1], syllable_indeces[1:]):\n syllables.append(''.join(res[i:j]))\n \n return syllables\n\n def get_word_with_stress(self, word_index, stress_map, stress_option_index=None, include_syllables=True):\n if not stress_option_index:\n stress_option_index = self.get_word_stress_options(word_index)['selected_index']\n\n res = self.get_word_with_stress_and_syllables(word_index, stress_map, stress_option_index)\n\n glue = '-' if include_syllables else ''\n\n return glue.join(res)\n\n def get_word_with_all_numeric_stresses(self, word_index, include_syllables=True):\n res = self.get_word_with_stress_and_syllables(word_index, _numeric_stress_map, None)\n\n glue = '-' if include_syllables else ''\n\n return glue.join(res)\n\n def get_word_with_only_multiple_numeric_stresses(self, word_index, include_syllables=True):\n res = self.get_word_with_stress_and_syllables(word_index, _numeric_stress_map, None, True)\n\n glue = '-' if include_syllables else ''\n\n return glue.join(res)\n \n def get_word_with_numeric_stress(self, word_index, stress_option_index=None, include_syllables=True):\n return self.get_word_with_stress(word_index, _numeric_stress_map, stress_option_index, include_syllables)\n\n def get_word_with_utf8_stress(self, word_index, stress_option_index=None, include_syllables=True):\n return self.get_word_with_stress(word_index, _utf8_stress_map, stress_option_index, include_syllables)\n\n def get_word_with_ascii_stress(self, word_index, stress_option_index=None, include_syllables=True):\n return self.get_word_with_stress(word_index, _ascii_stress_map, stress_option_index, include_syllables)\n\n", "id": "4199425", "language": "Python", "matching_score": 3.437359571456909, "max_stars_count": 15, "path": "phonology_engine/pe_output.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom os.path import join, dirname, abspath\nfrom .pe_output import PhonologyEngineOutput, PhonologyEngineNormalizedPhrases\nfrom .pe_output import _syllable_chars, _numeric_stress_map, _utf8_stress_map, _ascii_stress_map\nfrom .pe_native import phonology_engine_process_phrase, phonology_engine_normalize_text\nimport re\n\n_phrase_separators = u'.?!;:\\r\\n,'\n_truncated_chars = u'„“\"\\''\n_letter_pattern = u'[A-Za-zĄ-Žą-ž]'\n_max_prase_length = 200\n_word_format_symbols = {\n None:'',\n 'word': '',\n 'word_with_syllables': _syllable_chars,\n 'word_with_all_numeric_stresses': [_syllable_chars] + list(_numeric_stress_map.values()),\n 'word_with_only_multiple_numeric_stresses': _numeric_stress_map.values(),\n 'number_stressed_word': _numeric_stress_map.values(),\n 'utf8_stressed_word': _utf8_stress_map.values(),\n 'ascii_stressed_word': _ascii_stress_map.values()\n}\n\nclass PhonologyEngine:\n def __init__(self):\n def _collapsor(processed_phrase, phrase, normalized_phrase, letter_map, word_format=None):\n if word_format:\n return processed_phrase[word_format]\n else:\n return processed_phrase\n\n self.collapsor = _collapsor\n self.phrase_separators = _phrase_separators\n\n def _process_phrase(self, phrase, include_syllables):\n if len(phrase) > _max_prase_length:\n raise Exception('Phrase \"%s\" length exceeds %d char limit' % (phrase, _max_prase_length))\n \n handle = phonology_engine_process_phrase(phrase)\n\n offset = 0\n \n with PhonologyEngineOutput(handle) as output:\n res = []\n for i in range(output.get_word_count()):\n word = output.get_word(i, include_syllables=False)\n if not re.search(_letter_pattern, word):\n continue\n\n try:\n start_index = phrase.index(word, offset)\n word_span = start_index, start_index + len(word)\n offset = word_span[1]\n except ValueError:\n word_span = None\n\n res.append(\n {\n 'word': word,\n 'word_with_syllables': output.get_word(i, include_syllables=True),\n 'number_stressed_word': output.get_word_with_numeric_stress(i, include_syllables=include_syllables),\n 'utf8_stressed_word': output.get_word_with_utf8_stress(i, include_syllables=include_syllables),\n 'ascii_stressed_word': output.get_word_with_ascii_stress(i, include_syllables=include_syllables),\n 'word_with_all_numeric_stresses': output.get_word_with_all_numeric_stresses(i, include_syllables=include_syllables),\n 'word_with_only_multiple_numeric_stresses': output.get_word_with_only_multiple_numeric_stresses(i, include_syllables=include_syllables),\n 'syllables': output.get_word_syllables(i),\n 'stress_options': output.get_word_stress_options(i),\n 'word_span': word_span\n }\n )\n \n return res\n\n def _process(self, text, separators, normalize=True, include_syllables=True, normalize_only=False):\n p = (r'[^' + re.escape(separators) + r']+') if separators else r'^.*$'\n pattern = re.compile(p)\n if len(text.strip()) == 0:\n yield text\n\n for m in pattern.finditer(text):\n phrase = m.group()\n offset = m.span()[0]\n\n if normalize:\n handle = phonology_engine_normalize_text(phrase)\n with PhonologyEngineNormalizedPhrases(handle) as normalized_phrases:\n if normalize_only:\n for normalized_phrase, letter_map in normalized_phrases:\n yield normalized_phrase, phrase, normalized_phrase, [v + offset for v in letter_map]\n else:\n for normalized_phrase, letter_map in normalized_phrases:\n processed_phrase = self._process_phrase(normalized_phrase, include_syllables)\n yield processed_phrase, phrase, normalized_phrase, [v + offset for v in letter_map]\n else:\n processed_phrase = self._process_phrase(phrase, include_syllables)\n yield processed_phrase, phrase, phrase, [v + offset for v in range(len(phrase))]\n\n def get_collapse_formats(self):\n return _word_format_symbols.keys()\n\n def process(self, s, include_syllables=False):\n return self._process(s, separators=self.phrase_separators, normalize=True, include_syllables=include_syllables, normalize_only=False)\n\n def process_and_collapse(self, s, word_format='word', normalize=True, include_syllables=False):\n processed = self._process(s, separators=self.phrase_separators, normalize=normalize, include_syllables=include_syllables, normalize_only=False)\n return self.collapse(s, processed, word_format)\n\n def collapse(self, original_text, output, word_format='word'):\n if word_format not in _word_format_symbols:\n raise Exception('Invalide word format \"%s\". Can be one of: %s.' % (word_format, str(_word_format_symbols.keys())))\n\n processed_words = []\n for processed_phrase, _, _, _ in output:\n if isinstance(processed_phrase, list):\n for word_details in processed_phrase:\n if isinstance(word_details, dict):\n processed_words.append(word_details[word_format])\n else:\n processed_words.append(word_details)\n else:\n processed_words.append(processed_phrase)\n\n return ' '.join(processed_words)\n\n def normalize(self, text):\n return self._process(text, separators=self.phrase_separators, normalize=True, include_syllables=False, normalize_only=True)\n\n def normalize_and_collapse(self, text):\n return self.collapse(text, self._process(text, separators=self.phrase_separators, normalize=True, include_syllables=False, normalize_only=True))\n\n\n", "id": "12815311", "language": "Python", "matching_score": 3.384077548980713, "max_stars_count": 15, "path": "phonology_engine/phonology_engine.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom nose.tools import assert_equal\nfrom phonology_engine import PhonologyEngine\n\ndef test_process_1():\n pe = PhonologyEngine()\n pe.process('Laba diena')\n\ndef test_process_2():\n pe = PhonologyEngine()\n pe.process('Laba diena, kaip laikais?')\n\ndef test_process_and_collapse_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse('Laba diena, kaip laikais?')\n assert_equal(res, u'LABA DIENA KAIP LAIKAIS')\n\ndef test_process_and_collapse_word_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: laba diena. Kur buvai?', 'word')\n assert_equal(res, u'IŠTIKIMA SAVO DVASINIAM IR DOROVINIAM PAVELDUI SĄJUNGA REMIASI NEDALOMOMIS IR VISUOTINĖMIS VERTYBĖMIS LABA DIENA KUR BUVAI')\n\ndef test_process_and_collapse_word_with_syllables_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: laba diena. Kur buvai?', 'word_with_syllables')\n assert_equal(res, u'I-ŠTI-KI-MA SA-VO DVA-SI-NIAM IR DO-RO-VI-NIAM PA-VEL-DUI SĄ-JUN-GA RE-MIA-SI NEDALOMOMIS IR VI-SUO-TI-NĖ-MIS VER-TY-BĖ-MIS LA-BA DIE-NA KUR BU-VAI')\n\ndef test_process_and_collapse_word_with_all_numeric_stresses_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: Laba diena. Kur buvai?', 'word_with_all_numeric_stresses')\n assert_equal(res, u'I0ŠTIKIMA SA0VO DVA2SINIAM IR DORO1VINIAM PA2VELDUI SĄ1JUNGA RE2MIASI NEDALOMOMIS IR VISU1OTINĖMIS VERTY1BĖMIS LA2BA0 DIENA0 KUR2 BUVAI2')\n\ndef test_process_and_collapse_word_with_only_multiple_numeric_stresses_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: Laba diena. Kur buvai?', 'word_with_only_multiple_numeric_stresses')\n assert_equal(res, u'IŠTIKIMA SAVO DVASINIAM IR DOROVINIAM PAVELDUI SĄJUNGA REMIASI NEDALOMOMIS IR VISUOTINĖMIS VERTYBĖMIS LA2BA0 DIENA KUR BUVAI')\n\ndef test_process_and_collapse_number_stressed_word_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: laba diena. Kur buvai?', 'number_stressed_word')\n assert_equal(res, u'I0ŠTIKIMA SA0VO DVA2SINIAM IR DORO1VINIAM PA2VELDUI SĄ1JUNGA RE2MIASI NEDALOMOMIS IR VISU1OTINĖMIS VERTY1BĖMIS LA2BA DIENA0 KUR2 BUVAI2')\n\ndef test_process_and_collapse_utf8_stressed_word_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: laba diena. Kur buvai?', 'utf8_stressed_word')\n assert_equal(res, u'ÌŠTIKIMA SÀVO DVÃSINIAM IR DORÓVINIAM PÃVELDUI SĄ́JUNGA RẼMIASI NEDALOMOMIS IR VISÚOTINĖMIS VERTÝBĖMIS LÃBA DIENÀ KUR̃ BUVAĨ')\n\ndef test_process_and_collapse_ascii_stressed_word_1():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: laba diena. Kur buvai?', 'ascii_stressed_word')\n assert_equal(res, u'I`ŠTIKIMA SA`VO DVA~SINIAM IR DORO^VINIAM PA~VELDUI SĄ^JUNGA RE~MIASI NEDALOMOMIS IR VISU^OTINĖMIS VERTY^BĖMIS LA~BA DIENA` KUR~ BUVAI~')\n\ndef test_normalize_text_1():\n pe = PhonologyEngine()\n pe.normalize(u'1 žmogus. Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: laba diena. Kur buvai?')\n\ndef test_normalize_and_collapse_text_1():\n pe = PhonologyEngine()\n res = pe.normalize_and_collapse(u'1 žmogus. Ištikima savo dvasiniam ir doroviniam paveldui Sąjunga remiasi nedalomomis ir visuotinėmis vertybėmis: laba diena. Kur buvai?')\n assert_equal(res, u'VIENAS ŽMOGUS IŠTIKIMA SAVO DVASINIAM IR DOROVINIAM PAVELDUI SĄJUNGA REMIASI NEDALOMOMIS IR VISUOTINĖMIS VERTYBĖMIS LABA DIENA KUR BUVAI')\n\ndef test_normalize_and_collapse_text_2():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'31 kačiukas perbėgo kelią.', 'ascii_stressed_word')\n assert_equal(res, u'TRI`SDEŠIMT VI^ENAS KAČIU`KAS PE^RBĖGO KE~LIĄ')\n\ndef test_normalize_and_collapse_text_3():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Kainuos šie telefonai „vos“ nuo 1400 eurų.', 'ascii_stressed_word')\n assert_equal(res, u'KAINUO~S ŠIE~ TELEFO`NAI VO~S NUO TŪ^KSTANTIS KETURI` ŠIMTAI~ EU~RŲ')\n\ndef test_normalize_and_collapse_text_4():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'„vos“', 'ascii_stressed_word')\n assert_equal(res, u'VO~S')\n\ndef test_normalize_and_collapse_text_5():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'„123“', 'ascii_stressed_word')\n assert_equal(res, u'ŠIM~TAS DVI`DEŠIMT TRY~S')\n\ndef test_normalize_and_collapse_text_6():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'O pirmasis pasaulyje telefonas perlenkiamu ekranu - „Royole FlexPai“ - yra ne prototipinėje fazėje.', 'ascii_stressed_word')\n assert_equal(res, u'O PIRMA`SIS PASA^ULYJE TELEFO`NAS PE^RLENKIAMU EKRANU` ROYOLE FLEKSPAI YRA` NE PROTOTIPINĖJE FA~ZĖJE')\n\ndef test_normalize_and_collapse_text_3():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'Kainuos šie telefonai „vos“ nuo 1400 eurų.', 'ascii_stressed_word')\n assert_equal(res, u'KAINUO~S ŠIE~ TELEFO`NAI VO~S NUO TŪ^KSTANTIS KETURI` ŠIMTAI~ EU~RŲ')\n\ndef test_normalize_and_collapse_text_4():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'„vos“', 'ascii_stressed_word')\n assert_equal(res, u'VO~S')\n\ndef test_normalize_and_collapse_text_5():\n pe = PhonologyEngine()\n res = pe.process_and_collapse(u'„123“', 'ascii_stressed_word')\n assert_equal(res, u'ŠIM~TAS DVI`DEŠIMT TRY~S')\n\ndef test_normalize_and_collapse_abbr_1():\n pe = PhonologyEngine()\n pe.phrase_separators = ''\n res = pe.normalize_and_collapse(u'proc.')\n assert_equal(res, u'PROCENTAS')\n\ndef test_normalize_and_collapse_text_roman_num_1():\n pe = PhonologyEngine()\n pe.phrase_separators = ''\n res = pe.normalize_and_collapse(u'IV.')\n assert_equal(res, u'KETVIRTAS')\n\ndef test_word_span_consistency():\n pe = PhonologyEngine()\n t = u'Pradėkime nuo tų pirmųjų. Kinijos bendrovės „Royole“ pavadinimas Lietuvoje yra mažai kam girdėtas - ši įmonės negamina mūsų šalyje populiarių išmaniųjų telefonų. Na, ir telefonai atskirai nėra šio gamintojo arkliukas - jo specializacija yra lankstūs ekranai. Tuos ekranus galite klijuoti kur norite - ant rankinių, ant kepuraičių, ant marškinėlių. Jie ne ką storesni už popieriaus lapą.'\n liepa_processed_data = pe.process(t)\n for word_details, a, b, letter_map in liepa_processed_data:\n for word_detail in word_details: \n span = word_detail['word_span']\n normalized = (len ( set( letter_map[span[0]:span[1]] ) ) == 1) and (span[1] - span[0] > 1)\n source_span = letter_map[span[0]], letter_map[span[1] - 1] + 1\n word = word_detail['ascii_stressed_word']\n orig_word = t[source_span[0]:source_span[1]]\n\n if not set(orig_word).intersection(set('^`~')):\n continue\n \n _ = t[max(0, source_span[0] - 2):min(len(t), source_span[1] + 2)]\n\n assert_equal(orig_word.lower(), word.replace('`', '').replace('^', '').replace('~', '').lower())\n\ndef test_stress_3():\n pe = PhonologyEngine()\n t = u'Bezdonių'\n liepa_processed_data = pe.process(t)\n for word_details, _, _, _ in liepa_processed_data:\n for _ in word_details:\n pass", "id": "8077204", "language": "Python", "matching_score": 2.3803789615631104, "max_stars_count": 15, "path": "phonology_engine/tests/tests.py" }, { "content": "# -*- coding: utf-8 -*-\nfrom nose.tools import assert_equal\nfrom phonology_engine import PhonologyEngine\n\ndef test_process_1():\n pe = PhonologyEngine()\n text = u'1 katinas važiavo viešuoju transportu. Jis išlipo Siesikų stotelėje lygiai po 2 minučių.'\n\n for elem in pe.process(text):\n if 'span_source' in elem and len(elem['word']) == elem['span_source']:\n assert_equal(elem['word'].lower(), elem['span_source'][1] - elem['span_source'][0])\n\n\n", "id": "11286024", "language": "Python", "matching_score": 1.457999587059021, "max_stars_count": 15, "path": "phonology_engine/tests/test_word_span.py" }, { "content": "from .phonology_engine import PhonologyEngine \nfrom . import pe_output\nfrom . import pe_native", "id": "5709183", "language": "Python", "matching_score": 1.4642467498779297, "max_stars_count": 15, "path": "phonology_engine/__init__.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\ntry:\n from os import path\n from setuptools import setup, find_packages\n from setuptools.dist import Distribution\nexcept ImportError:\n from distutils.core import setup\n\nfrom phonology_engine.version import VERSION\n__version__ = VERSION\n\ntry:\n if sys.version_info[:2] <= (2, 7):\n readme = open(\"README.md\")\n else:\n readme = open(\"README.md\", encoding=\"utf8\")\n long_description = str(readme.read())\nfinally:\n readme.close()\n\nsetup(\n name='phonology_engine',\n author='<NAME>',\n version=VERSION,\n author_email='<EMAIL>',\n description=\"Module to get stress and syllables for words in a given sentence in Lithuanian language.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/aleksas/phonology_engine',\n license='BSD',\n packages=['phonology_engine', 'phonology_engine.tests'],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n ],\n keywords=['phonology_engine', 'phonology', 'pronunciation', 'stress', 'syllable', 'accent', 'hyphenation'],\n package_data={\n 'phonology_engine': [\n 'Linux_x86_64/libPhonologyEngine.so',\n 'Linux_x86/libPhonologyEngine.so',\n 'Win32_x86/PhonologyEngine.dll',\n 'Win64_x64/PhonologyEngine.dll'],\n }\n)\n", "id": "3315253", "language": "Python", "matching_score": 3.4100639820098877, "max_stars_count": 15, "path": "setup.py" }, { "content": "import setuptools\nimport re, ast\n\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n\nwith open('re_map/__init__.py', 'rb') as f:\n\tversion = str(ast.literal_eval(_version_re.search(\n\t\tf.read().decode('utf-8')).group(1)))\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"re_map\",\n version=version,\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n description=\"Apply multiple regex patterns and keep change index map.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/aleksas/re-map\",\n packages=setuptools.find_packages(),\n #ext_modules = cythonize([\"re_map/fast.pyx\"]),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=2.7'\n)\n", "id": "5954280", "language": "Python", "matching_score": 1.0436378717422485, "max_stars_count": 2, "path": "setup.py" }, { "content": "from distutils.core import setup\nsetup(name='pydelay',\n version='0.2.0',\n author='<NAME>',\n author_email='<EMAIL>',\n license='MIT',\n packages=['pydelay'],\n package_dir={'pydelay': 'pydelay'},\n package_data={'pydelay': ['doc/pyplots/*', \n 'doc/sphinxext/*', \n 'doc/Makefile', \n 'doc/conf.py', \n 'doc/README', \n 'doc/index.rst', \n 'doc/pydelay.pdf', \n 'examples/*.py']},\n )\n\n", "id": "4064169", "language": "Python", "matching_score": 0.24829921126365662, "max_stars_count": 1, "path": "setup.py" }, { "content": "from ftplib import FTP, error_perm\n\nftp = FTP('f', 'alex', 'Aoiujkl')\n\ndef ftp_mkdirs(path): \n\n def mkdirs_(currentDir):\n if currentDir != \"\":\n try:\n ftp.cwd(currentDir)\n except error_perm:\n mkdirs_('/'.join(currentDir.split('/')[:-1]))\n ftp.mkd(currentDir)\n ftp.cwd(currentDir)\n\n pwd = ftp.pwd()\n path = '/'.join([pwd.rstrip('/'), path.lstrip('/')])\n\n mkdirs_(path)\n\nftp_mkdirs(\"user-backups/alex/backups/as/as\")\nftp_mkdirs(\"user-backups/alex/backups/as/asd\")", "id": "3385254", "language": "Python", "matching_score": 1.0472798347473145, "max_stars_count": 1, "path": "intergation_ftp_backup/ftp_backup_intrgration/doctype/ftp_backup_settings/test.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport os\nfrom frappe import _\nfrom frappe.model.document import Document\nimport ftp, json\nfrom frappe.utils.backups import new_backup\nfrom frappe.utils.background_jobs import enqueue\nfrom six.moves.urllib.parse import urlparse, parse_qs\nfrom frappe.integrations.utils import make_post_request\nfrom rq.timeouts import JobTimeoutException\nfrom frappe.utils import (cint, split_emails,\n\tget_files_path, get_backups_path, get_url, encode)\nfrom six import text_type\nfrom ftplib import FTP, FTP_TLS\n\nignore_list = [\".DS_Store\"]\n\nclass FTPBackupSettings(Document):\n\tdef validate(self):\n\t\tif self.enabled and self.limit_no_of_backups and self.no_of_backups < 1:\n\t\t\tfrappe.throw(_('Number of DB backups cannot be less than 1'))\n\[email protected]()\ndef take_backup():\n\t\"\"\"Enqueue longjob for taking backup to ftp\"\"\"\n\tenqueue(\"intergation_ftp_backup.ftp_backup_intrgration.doctype.ftp_backup_settings.ftp_backup_settings.take_backup_to_ftp\", queue='long', timeout=1500)\n\tfrappe.msgprint(_(\"Queued for backup. It may take a few minutes to an hour.\"))\n\ndef take_backups_daily():\n\ttake_backups_if(\"Daily\")\n\ndef take_backups_weekly():\n\ttake_backups_if(\"Weekly\")\n\ndef take_backups_if(freq):\n\tif frappe.db.get_value(\"FTP Backup Settings\", None, \"backup_frequency\") == freq:\n\t\ttake_backup_to_ftp()\n\ndef take_backup_to_ftp(retry_count=0, upload_db_backup=True):\n\tdid_not_upload, error_log = [], []\n\ttry:\n\t\tif cint(frappe.db.get_value(\"FTP Backup Settings\", None, \"enabled\")):\n\t\t\tdid_not_upload, error_log = backup_to_ftp(upload_db_backup)\n\t\t\tif did_not_upload: raise Exception\n\n\t\t\tsend_email(True, \"FTP\")\n\texcept JobTimeoutException:\n\t\tif retry_count < 2:\n\t\t\targs = {\n\t\t\t\t\"retry_count\": retry_count + 1,\n\t\t\t\t\"upload_db_backup\": False #considering till worker timeout db backup is uploaded\n\t\t\t}\n\t\t\tenqueue(\"intergation_ftp_backup.ftp_backup_intrgration.doctype.ftp_backup_settings.ftp_backup_settings.take_backup_to_ftp\",\n\t\t\t\tqueue='long', timeout=1500, **args)\n\texcept Exception:\n\t\tif isinstance(error_log, str):\n\t\t\terror_message = error_log + \"\\n\" + frappe.get_traceback()\n\t\telse:\n\t\t\tfile_and_error = [\" - \".join(f) for f in zip(did_not_upload, error_log)]\n\t\t\terror_message = (\"\\n\".join(file_and_error) + \"\\n\" + frappe.get_traceback())\n\t\tfrappe.errprint(error_message)\n\t\tsend_email(False, \"FTP\", error_message)\n\ndef send_email(success, service_name, error_status=None):\n\tif success:\n\t\tif frappe.db.get_value(\"FTP Backup Settings\", None, \"send_email_for_successful_backup\") == '0':\n\t\t\treturn\n\n\t\tsubject = \"Backup Upload Successful\"\n\t\tmessage =\"\"\"<h3>Backup Uploaded Successfully</h3><p>Hi there, this is just to inform you\n\t\tthat your backup was successfully uploaded to your %s account. So relax!</p>\n\t\t\"\"\" % service_name\n\n\telse:\n\t\tsubject = \"[Warning] Backup Upload Failed\"\n\t\tmessage =\"\"\"<h3>Backup Upload Failed</h3><p>Oops, your automated backup to %s\n\t\tfailed.</p>\n\t\t<p>Error message: <br>\n\t\t<pre><code>%s</code></pre>\n\t\t</p>\n\t\t<p>Please contact your system manager for more information.</p>\n\t\t\"\"\" % (service_name, error_status)\n\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\trecipients = split_emails(frappe.db.get_value(\"FTP Backup Settings\", None, \"send_notifications_to\"))\n\tfrappe.sendmail(recipients=recipients, subject=subject, message=message)\n\ndef backup_to_ftp(upload_db_backup=True):\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\t# upload database\n\tftp_settings, use_tls = get_ftp_settings()\n\n\tif not ftp_settings['host']:\n\t\treturn 'Failed backup upload', 'No FTP host! Please enter valid host for FTP.'\n\n\tif not ftp_settings['username']:\n\t\treturn 'Failed backup upload', 'No FTP username! Please enter valid username for FTP.'\n\n\tftp_client = FTP_TLS(**ftp_settings) if use_tls else FTP(**ftp_settings)\n\n\ttry:\n\t\tif upload_db_backup:\n\t\t\tbackup = new_backup(ignore_files=True)\n\t\t\tfilename = os.path.join(get_backups_path(), os.path.basename(backup.backup_path_db))\n\t\t\tupload_file_to_ftp(filename, \"/database\", ftp_client)\n\n\t\t\t# delete older databases\n\t\t\tif ftp_settings['no_of_backups']:\n\t\t\t\tdelete_older_backups(ftp_client, \"/database\", ftp_settings['no_of_backups'])\n\n\t\t# upload files to files folder\n\t\tdid_not_upload = []\n\t\terror_log = []\n\n\t\tif ftp_settings['file_backup']:\n\t\t\tupload_from_folder(get_files_path(), 0, \"/files\", ftp_client, did_not_upload, error_log)\n\t\t\tupload_from_folder(get_files_path(is_private=1), 1, \"/private/files\", ftp_client, did_not_upload, error_log)\n\n\t\treturn did_not_upload, list(set(error_log))\n\n\tfinally:\n\t\tftp_client.quit()\n\ndef upload_from_folder(path, is_private, ftp_folder, ftp_client, did_not_upload, error_log):\n\tif not os.path.exists(path):\n\t\treturn\n\n\tif is_fresh_upload():\n\t\tresponse = get_uploaded_files_meta(ftp_folder, ftp_client)\n\telse:\n\t\tresponse = frappe._dict({\"entries\": []})\n\n\tpath = text_type(path)\n\n\tfor f in frappe.get_all(\"File\", filters={\"is_folder\": 0, \"is_private\": is_private,\n\t\t\"uploaded_to_ftp\": 0}, fields=['file_url', 'name', 'file_name']):\n\t\tif is_private:\n\t\t\tfilename = f.file_url.replace('/private/files/', '')\n\t\telse:\n\t\t\tif not f.file_url:\n\t\t\t\tf.file_url = '/files/' + f.file_name\n\t\t\tfilename = f.file_url.replace('/files/', '')\n\t\tfilepath = os.path.join(path, filename)\n\n\t\tif filename in ignore_list:\n\t\t\tcontinue\n\n\t\tfound = False\n\t\tfor file_metadata in response.entries:\n\t\t\ttry:\n\t\t\t\tif (os.path.basename(filepath) == file_metadata.name\n\t\t\t\t\tand os.stat(encode(filepath)).st_size == int(file_metadata.size)):\n\t\t\t\t\tfound = True\n\t\t\t\t\tupdate_file_ftp_status(f.name)\n\t\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\terror_log.append(frappe.get_traceback())\n\n\t\tif not found:\n\t\t\ttry:\n\t\t\t\tupload_file_to_ftp(filepath, ftp_folder, ftp_client)\n\t\t\t\tupdate_file_ftp_status(f.name)\n\t\t\texcept Exception:\n\t\t\t\tdid_not_upload.append(filepath)\n\t\t\t\terror_log.append(frappe.get_traceback())\n\ndef upload_file_to_ftp(filename, folder, ftp_client):\n\t\"\"\"upload files with chunk of 15 mb to reduce session append calls\"\"\"\n\tif not os.path.exists(filename):\n\t\treturn\n\n\tcreate_folder_if_not_exists(folder, ftp_client)\n\tchunk_size = 15 * 1024 * 1024\n\tfile_size = os.path.getsize(encode(filename))\n\tmode = (ftp.files.WriteMode.overwrite)\n\n\tf = open(encode(filename), 'rb')\n\tpath = \"{0}/{1}\".format(folder, os.path.basename(filename))\n\n\ttry:\n\t\tif file_size <= chunk_size:\n\t\t\tftp_client.files_upload(f.read(), path, mode)\n\t\telse:\n\t\t\tupload_session_start_result = ftp_client.files_upload_session_start(f.read(chunk_size))\n\t\t\tcursor = ftp.files.UploadSessionCursor(session_id=upload_session_start_result.session_id, offset=f.tell())\n\t\t\tcommit = ftp.files.CommitInfo(path=path, mode=mode)\n\n\t\t\twhile f.tell() < file_size:\n\t\t\t\tif ((file_size - f.tell()) <= chunk_size):\n\t\t\t\t\tftp_client.files_upload_session_finish(f.read(chunk_size), cursor, commit)\n\t\t\t\telse:\n\t\t\t\t\tftp_client.files_upload_session_append(f.read(chunk_size), cursor.session_id,cursor.offset)\n\t\t\t\t\tcursor.offset = f.tell()\n\texcept ftp.exceptions.ApiError as e:\n\t\tif isinstance(e.error, ftp.files.UploadError):\n\t\t\terror = \"File Path: {path}\\n\".format(path=path)\n\t\t\terror += frappe.get_traceback()\n\t\t\tfrappe.log_error(error)\n\t\telse:\n\t\t\traise\n\ndef create_folder_if_not_exists(folder, ftp_client):\n\ttry:\n\t\tftp_client.files_get_metadata(folder)\n\texcept ftp.exceptions.ApiError as e:\n\t\t# folder not found\n\t\tif isinstance(e.error, ftp.files.GetMetadataError):\n\t\t\tftp_client.files_create_folder(folder)\n\t\telse:\n\t\t\traise\n\ndef update_file_ftp_status(file_name):\n\tfrappe.db.set_value(\"File\", file_name, 'uploaded_to_ftp', 1, update_modified=False)\n\ndef is_fresh_upload():\n\tfile_name = frappe.db.get_value(\"File\", {'uploaded_to_ftp': 1}, 'name')\n\treturn not file_name\n\ndef get_uploaded_files_meta(ftp_folder, ftp_client):\n\ttry:\n\t\treturn ftp_client.files_list_folder(ftp_folder)\n\texcept ftp.exceptions.ApiError as e:\n\t\t# folder not found\n\t\tif isinstance(e.error, ftp.files.ListFolderError):\n\t\t\treturn frappe._dict({\"entries\": []})\n\t\telse:\n\t\t\traise\n\ndef get_ftp_settings():\n\tsettings = frappe.get_doc(\"FTP Backup Settings\")\n\n\tapp_details = {\n\t\t\"host\": settings.ftp_host,\n\t\t\"user\": 'anonymous' if settings.ftp_authentication == 'Anonymous' else settings.ftp_username,\n\t\t\"password\": '' if settings.ftp_authentication == 'Anonymous' else settings.ftp_password,\n\t}\n\n\treturn app_details, settings.ftp_tls\n\ndef delete_older_backups(ftp_client, folder_path, to_keep):\n\tres = ftp_client.files_list_folder(path=folder_path)\n\tfiles = []\n\tfor f in res.entries:\n\t\tif isinstance(f, ftp.files.FileMetadata) and 'sql' in f.name:\n\t\t\tfiles.append(f)\n\n\tif len(files) <= to_keep:\n\t\treturn\n\n\tfiles.sort(key=lambda item:item.client_modified, reverse=True)\n\tfor f in files[to_keep:]:\n\t\tftp_client.files_delete(os.path.join(folder_path, f.name))\n\[email protected]()\ndef get_ftp_authorize_url():\n\tapp_details = get_ftp_settings(redirect_uri=True)\n\tftp_oauth_flow = ftp.FTPOAuth2Flow(\n\t\tapp_details[\"app_key\"],\n\t\tapp_details[\"app_secret\"],\n\t\tapp_details[\"redirect_uri\"],\n\t\t{},\n\t\t\"ftp-auth-csrf-token\"\n\t)\n\n\tauth_url = ftp_oauth_flow.start()\n\n\treturn {\n\t\t\"auth_url\": auth_url,\n\t\t\"args\": parse_qs(urlparse(auth_url).query)\n\t}\n\[email protected]()\ndef ftp_auth_finish(return_access_token=False):\n\tapp_details = get_ftp_settings(redirect_uri=True)\n\tcallback = frappe.form_dict\n\tclose = '<p class=\"text-muted\">' + _('Please close this window') + '</p>'\n\n\tftp_oauth_flow = ftp.FTPOAuth2Flow(\n\t\tapp_details[\"app_key\"],\n\t\tapp_details[\"app_secret\"],\n\t\tapp_details[\"redirect_uri\"],\n\t\t{\n\t\t\t'ftp-auth-csrf-token': callback.state\n\t\t},\n\t\t\"ftp-auth-csrf-token\"\n\t)\n\n\tif callback.state or callback.code:\n\t\ttoken = ftp_oauth_flow.finish({'state': callback.state, 'code': callback.code})\n\t\tif return_access_token and token.access_token:\n\t\t\treturn token.access_token, callback.state\n\n\t\tset_ftp_access_token(token.access_token)\n\telse:\n\t\tfrappe.respond_as_web_page(_(\"FTP Setup\"),\n\t\t\t_(\"Illegal Access Token. Please try again\") + close,\n\t\t\tindicator_color='red',\n\t\t\thttp_status_code=frappe.AuthenticationError.http_status_code)\n\n\tfrappe.respond_as_web_page(_(\"FTP Setup\"),\n\t\t_(\"FTP access is approved!\") + close,\n\t\tindicator_color='green')\n\[email protected](allow_guest=True)\ndef set_ftp_access_token(access_token):\n\tfrappe.db.set_value(\"FTP Backup Settings\", None, 'ftp_access_token', access_token)\n\tfrappe.db.commit()\n", "id": "4558449", "language": "Python", "matching_score": 7.497847557067871, "max_stars_count": 1, "path": "intergation_ftp_backup/ftp_backup_intrgration/doctype/ftp_backup_settings/ftp_backup_settings.bak.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport os\nimport json\nfrom frappe import _\nfrom frappe.model.document import Document\nfrom ftplib import FTP, FTP_TLS, error_perm\nfrom frappe.utils.backups import new_backup\nfrom frappe.utils.background_jobs import enqueue\nfrom six.moves.urllib.parse import urlparse, parse_qs\nfrom frappe.integrations.utils import make_post_request\nfrom rq.timeouts import JobTimeoutException\nfrom frappe.utils import (cint, split_emails,\n\tget_files_path, get_backups_path, get_url, encode)\nfrom six import text_type\nfrom ftplib import FTP, FTP_TLS\nfrom dateutil import parser\n\nignore_list = [\".DS_Store\"]\n\nclass FTPBackupSettings(Document):\n\tdef validate(self):\n\t\tif self.enabled and self.limit_no_of_backups and self.no_of_backups < 1:\n\t\t\tfrappe.throw(_('Number of DB backups cannot be less than 1'))\n\[email protected]()\ndef take_backup():\n\t\"\"\"Enqueue longjob for taking backup to ftp\"\"\"\n\tenqueue(\"intergation_ftp_backup.ftp_backup_intrgration.doctype.ftp_backup_settings.ftp_backup_settings.take_backup_to_ftp\", queue='long', timeout=1500)\n\tfrappe.msgprint(_(\"Queued for backup. It may take a few minutes to an hour.\"))\n\ndef take_backups_daily():\n\ttake_backups_if(\"Daily\")\n\ndef take_backups_weekly():\n\ttake_backups_if(\"Weekly\")\n\ndef take_backups_if(freq):\n\tif frappe.db.get_value(\"FTP Backup Settings\", None, \"backup_frequency\") == freq:\n\t\ttake_backup_to_ftp()\n\ndef take_backup_to_ftp(retry_count=0, upload_db_backup=True):\n\tdid_not_upload, error_log = [], []\n\ttry:\n\t\tif cint(frappe.db.get_value(\"FTP Backup Settings\", None, \"enabled\")):\n\t\t\tdid_not_upload, error_log = backup_to_ftp(upload_db_backup)\n\t\t\tif did_not_upload: raise Exception\n\n\t\t\tsend_email(True, \"FTP\")\n\texcept JobTimeoutException:\n\t\tif retry_count < 2:\n\t\t\targs = {\n\t\t\t\t\"retry_count\": retry_count + 1,\n\t\t\t\t\"upload_db_backup\": False #considering till worker timeout db backup is uploaded\n\t\t\t}\n\t\t\tenqueue(\"intergation_ftp_backup.ftp_backup_intrgration.doctype.ftp_backup_settings.ftp_backup_settings.take_backup_to_ftp\",\n\t\t\t\tqueue='long', timeout=1500, **args)\n\texcept Exception:\n\t\tif isinstance(error_log, str):\n\t\t\terror_message = error_log + \"\\n\" + frappe.get_traceback()\n\t\telse:\n\t\t\tfile_and_error = [\" - \".join(f) for f in zip(did_not_upload, error_log)]\n\t\t\terror_message = (\"\\n\".join(file_and_error) + \"\\n\" + frappe.get_traceback())\n\t\tfrappe.errprint(error_message)\n\t\tsend_email(False, \"FTP\", error_message)\n\ndef send_email(success, service_name, error_status=None):\n\tif success:\n\t\tif frappe.db.get_value(\"FTP Backup Settings\", None, \"send_email_for_successful_backup\") == '0':\n\t\t\treturn\n\n\t\tsubject = \"Backup Upload Successful\"\n\t\tmessage =\"\"\"<h3>Backup Uploaded Successfully</h3><p>Hi there, this is just to inform you\n\t\tthat your backup was successfully uploaded to your %s account. So relax!</p>\n\t\t\"\"\" % service_name\n\n\telse:\n\t\tsubject = \"[Warning] Backup Upload Failed\"\n\t\tmessage =\"\"\"<h3>Backup Upload Failed</h3><p>Oops, your automated backup to %s\n\t\tfailed.</p>\n\t\t<p>Error message: <br>\n\t\t<pre><code>%s</code></pre>\n\t\t</p>\n\t\t<p>Please contact your system manager for more information.</p>\n\t\t\"\"\" % (service_name, error_status)\n\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\trecipients = split_emails(frappe.db.get_value(\"FTP Backup Settings\", None, \"send_notifications_to\"))\n\tfrappe.sendmail(recipients=recipients, subject=subject, message=message)\n\ndef combine_path(root_dir, dst_dir):\n\treturn '/'.join([] + root_dir.rstrip('/').split('/') + dst_dir.strip('/').split('/'))\n\ndef backup_to_ftp(upload_db_backup=True):\n\tif not frappe.db:\n\t\tfrappe.connect()\n\n\t# upload database\n\tftp_settings, use_tls, root_directory, file_backup, limit_no_of_backups, no_of_backups = get_ftp_settings()\n\n\tif not ftp_settings['host']:\n\t\treturn 'Failed backup upload', 'No FTP host! Please enter valid host for FTP.'\n\n\tif not ftp_settings['user']:\n\t\treturn 'Failed backup upload', 'No FTP username! Please enter valid username for FTP.'\n\n\tif not root_directory:\n\t\treturn 'Failed backup upload', 'No FTP username! Please enter valid username for FTP.'\n\n\tftp_client = FTP_TLS(**ftp_settings) if use_tls else FTP(**ftp_settings)\n\n\ttry:\n\t\tif upload_db_backup:\n\t\t\tbackup = new_backup(ignore_files=True)\n\t\t\tfilename = os.path.join(get_backups_path(), os.path.basename(backup.backup_path_db))\n\t\t\tupload_file_to_ftp(filename, combine_path(root_directory, \"/database\"), ftp_client)\n\n\t\t\t# delete older databases\n\t\t\tif limit_no_of_backups:\n\t\t\t\tdelete_older_backups(ftp_client, combine_path(root_directory, \"/database\"), no_of_backups)\n\n\t\t# upload files to files folder\n\t\tdid_not_upload = []\n\t\terror_log = []\n\n\t\tif file_backup:\n\t\t\tupload_from_folder(get_files_path(), 0, combine_path(root_directory, \"/files\"), ftp_client, did_not_upload, error_log)\n\t\t\tupload_from_folder(get_files_path(is_private=1), 1, combine_path(root_directory, \"/private/files\"), ftp_client, did_not_upload, error_log)\n\n\t\treturn did_not_upload, list(set(error_log))\n\n\tfinally:\n\t\tftp_client.quit()\n\ndef upload_from_folder(path, is_private, ftp_folder, ftp_client, did_not_upload, error_log):\n\tif not os.path.exists(path):\n\t\treturn\n\n\tif is_fresh_upload():\n\t\tresponse = get_uploaded_files_meta(ftp_folder, ftp_client)\n\telse:\n\t\tresponse = frappe._dict({\"entries\": []})\n\n\tpath = text_type(path)\n\n\tfor f in frappe.get_all(\"File\", filters={\"is_folder\": 0, \"is_private\": is_private,\n\t\t\"uploaded_to_dropbox\": 0}, fields=['file_url', 'name', 'file_name']):\n\t\tif is_private:\n\t\t\tfilename = f.file_url.replace('/private/files/', '')\n\t\telse:\n\t\t\tif not f.file_url:\n\t\t\t\tf.file_url = '/files/' + f.file_name\n\t\t\tfilename = f.file_url.replace('/files/', '')\n\t\tfilepath = os.path.join(path, filename)\n\n\t\tif filename in ignore_list:\n\t\t\tcontinue\n\n\t\tfound = False\n\t\tfor file_metadata in response.entries:\n\t\t\ttry:\n\t\t\t\tif (os.path.basename(filepath) == file_metadata.name\n\t\t\t\t\tand os.stat(encode(filepath)).st_size == int(file_metadata.size)):\n\t\t\t\t\tfound = True\n\t\t\t\t\tupdate_file_ftp_status(f.name)\n\t\t\t\t\tbreak\n\t\t\texcept Exception:\n\t\t\t\terror_log.append(frappe.get_traceback())\n\n\t\tif not found:\n\t\t\ttry:\n\t\t\t\tupload_file_to_ftp(filepath, ftp_folder, ftp_client)\n\t\t\t\tupdate_file_ftp_status(f.name)\n\t\t\texcept Exception:\n\t\t\t\tdid_not_upload.append(filepath)\n\t\t\t\terror_log.append(frappe.get_traceback())\n\ndef upload_file_to_ftp(filename, folder, ftp_client):\n\t\"\"\"upload files with chunk of 15 mb to reduce session append calls\"\"\"\n\tif not os.path.exists(filename):\n\t\treturn\n\n\twith open(encode(filename), 'rb') as f:\n\t\tpath = \"{0}/{1}\".format(folder, os.path.basename(filename))\n\n\t\ttry:\n\t\t\tcreate_folder_if_not_exists(ftp_client, folder)\n\t\t\tpwd = ftp_client.pwd()\n\t\t\tftp_client.cwd(folder)\n\n\t\t\tftp_client.storbinary('STOR %s' % os.path.basename(filename), f)\n\t\t\t\n\t\t\tftp_client.cwd(pwd)\n\t\texcept Exception:\n\t\t\terror = \"File Path: {path}\\n\".format(path=path)\n\t\t\terror += frappe.get_traceback()\n\t\t\tfrappe.log_error(error)\n\t\t\tprint (error)\n\t\t\ndef create_folder_if_not_exists(ftp_client, path):\n\tdef _mkdirs_(currentDir):\n\t\tif currentDir != \"\":\n\t\t\ttry:\n\t\t\t\tftp_client.cwd(currentDir)\n\t\t\texcept error_perm:\n\t\t\t\t_mkdirs_('/'.join(currentDir.split('/')[:-1]))\n\t\t\t\tftp_client.mkd(currentDir)\n\t\t\t\tftp_client.cwd(currentDir)\n\n\tpwd = ftp_client.pwd()\n\tpath = '/'.join([pwd.rstrip('/'), path.lstrip('/')])\n\t_mkdirs_(path)\n\tftp_client.cwd(pwd)\n\ndef update_file_ftp_status(file_name):\n\tfrappe.db.set_value(\"File\", file_name, 'uploaded_to_dropbox', 1, update_modified=False)\n\ndef is_fresh_upload():\n\tfile_name = frappe.db.get_value(\"File\", {'uploaded_to_dropbox': 1}, 'name')\n\treturn not file_name\n\ndef get_uploaded_files_meta(ftp_folder, ftp_client):\n\ttry:\n\t\treturn {'entries': ftp_client.nlst(ftp_folder) }\n\texcept error_perm as e:\n\t\tif str(e) == \"550 No files found\":\n\t\t\treturn {'entries': [] }\n\t\telse:\n\t\t\traise\n\ndef decorate_files(ftp_client, filenames):\n latest_time = None\n\n for name in filenames:\n time = ftp_client.voidcmd(\"MDTM \" + name)\n if (latest_time is None) or (time > latest_time):\n latest_time = time\n \n yield name, latest_time\n\ndef get_ftp_settings():\n\tprint ('get_ftp_settings')\n\tsettings = frappe.get_doc(\"FTP Backup Settings\")\n\n\tapp_details = {\n\t\t\"host\": settings.ftp_host,\n\t\t\"user\": 'anonymous' if settings.ftp_authentication == 'Anonymous' else settings.ftp_username,\n\t\t\"passwd\": '' if settings.ftp_authentication == 'Anonymous' else settings.get_password(fieldname=\"ftp_password\", raise_exception=False)\n\t}\n\n\treturn app_details, settings.ftp_tls, settings.ftp_root_directory, settings.file_backup, settings.limit_no_of_backups, settings.no_of_backups \n\ndef delete_older_backups(ftp_client, folder_path, to_keep):\n\tprint ('delete_older_backups')\n\tres = get_uploaded_files_meta(folder_path, ftp_client, )\n\tfiles = []\n\tfor ft in decorate_files(ftp_client, res['entries']):\n\t\tfiles.append(ft)\n\n\tif len(files) <= to_keep:\n\t\treturn\n\n\tfiles.sort(key=lambda item:item[1], reverse=True)\n\tfor f, _ in files[to_keep:]:\n\t\tprint ('delete', f)\n\t\tftp_client.delete(f)\n\n", "id": "10539845", "language": "Python", "matching_score": 1.9760267734527588, "max_stars_count": 1, "path": "intergation_ftp_backup/ftp_backup_intrgration/doctype/ftp_backup_settings/ftp_backup_settings.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, <NAME> and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport frappe.utils\nfrom frappe.model.document import Document\nfrom frappe import _\nfrom hr_extension.hr_extension.doctype.regular_work_summary.regular_work_summary import get_user_emails_from_group\nfrom calendar import day_name, month_name, monthrange\nfrom datetime import datetime, timedelta\n\nclass RegularWorkSummaryGroup(Document):\n\tdef validate(self):\n\t\tif self.users:\n\t\t\tif not frappe.flags.in_test and not is_incoming_account_enabled():\n\t\t\t\tfrappe.throw(_('Please enable default incoming account before creating Regular Work Summary Group'))\n\ndef trigger_emails():\n\t'''Send emails to Employees at the given hour asking\n\t\t\tthem what did they work on today'''\n\tgroups = frappe.get_all(\"Regular Work Summary Group\")\n\tfor d in groups:\n\t\tgroup_doc = frappe.get_doc(\"Regular Work Summary Group\", d)\n\n\t\tif (is_current_hour(group_doc)\n\t\t\tand is_current_day(group_doc)\n\t\t\tand is_current_month(group_doc)\n\t\t\tand not is_holiday_today(group_doc.holiday_list)\n\t\t\tand group_doc.enabled):\n\t\t\temails = get_user_emails_from_group(group_doc, 'Reminder')\n\t\t\t# find emails relating to a company\n\t\t\tif emails:\n\t\t\t\tregular_work_summary = frappe.get_doc(\n\t\t\t\t\tdict(doctype='Regular Work Summary', regular_work_summary_group=group_doc.name)\n\t\t\t\t).insert()\n\t\t\t\tregular_work_summary.send_mails(group_doc, emails)\n\ndef is_current_hour(group_doc):\n\thour = group_doc.send_reminder_at\n\treturn frappe.utils.nowtime().split(':')[0] == hour.split(':')[0]\n\ndef is_current_day(group_doc, offset=0):\n\ttoday_offsetted = datetime.today() - timedelta(days=offset)\n\n\tif group_doc.send_reminder_frequency == 'Weekly':\n\t\t#ToDo: day_name may work incorrectly as it contains localized day name\n\t\t#offset should not be more than 5 days or more complex logic for weekly summayry scenario should be added\n\t\treturn day_name[today_offsetted.weekday()] == group_doc.send_reminder_week_day\n\telif group_doc.send_reminder_frequency in ['Monthly', 'Yearly']:\n\t\tmonth_day = int(group_doc.send_reminder_month_day)\n\t\ttoday = datetime.today()\n\t\tif month_day < 0:\n\t\t\tmonth_day += monthrange(today.year, today.month)[1] + 1\n\t\treturn today_offsetted.day == month_day\n\n\treturn True\n\ndef is_current_month(group_doc, offset=0):\n\ttoday_offsetted = datetime.today() - timedelta(days=offset)\n\t\n\tif group_doc.send_reminder_frequency == 'Yearly':\n\t\t#ToDo: month_name may work incorrectly as it contains localized month name\n\t\treturn month_name[today_offsetted.month] == group_doc.send_reminder_month\n\treturn True\n\ndef is_holiday_today(holiday_list):\n\tdate = frappe.utils.today()\n\tif holiday_list:\n\t\treturn frappe.get_all('Holiday List',\n\t\t\tdict(name=holiday_list, holiday_date=date)) and True or False\n\telse:\n\t\treturn False\n\ndef send_summary():\n\tfor d in frappe.get_all(\"Regular Work Summary Group\"):\n\t\tgroup_doc = frappe.get_doc(\"Regular Work Summary Group\", d)\n\n\t\tif group_doc.send_reminder_frequency == 'Daily':\n\t\t\toffset = 1\n\t\telse:\n\t\t\toffset = int(group_doc.send_summary_after_days)\n\n\t\tif (is_current_day(group_doc, offset)\n\t\t\tand is_current_month(group_doc, offset)\n\t\t\tand group_doc.enabled):\n\t\t\t\n\t\t\tfor d in frappe.get_all('Regular Work Summary', dict(egular_work_summary_group=group_doc.name, tatus='Open')):\n\t\t\t\tregular_work_summary = frappe.get_doc('Regular Work Summary', d.name)\n\t\t\t\tregular_work_summary.send_summary()\n\ndef is_incoming_account_enabled():\n\treturn frappe.db.get_value('Email Account', dict(enable_incoming=1, default_incoming=1))\n", "id": "8029023", "language": "Python", "matching_score": 3.6346120834350586, "max_stars_count": 0, "path": "hr_extension/hr_extension/doctype/regular_work_summary_group/regular_work_summary_group.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, <NAME> and Contributors\n# See license.txt\nfrom __future__ import unicode_literals\n\nimport os\nimport frappe\nimport unittest\nimport frappe.utils\n\n# test_records = frappe.get_test_records('Regular Work Summary')\n\nclass TestRegularWorkSummary(unittest.TestCase):\n\tdef test_email_trigger(self):\n\t\tself.setup_and_prepare_test()\n\t\tfor d in self.users:\n\t\t\t# check that email is sent to users\n\t\t\tif d.message:\n\t\t\t\tself.assertTrue(d.email in [d.recipient for d in self.emails\n\t\t\t\t\tif self.groups.subject in d.message])\n\n\tdef test_email_trigger_failed(self):\n\t\thour = '00:00'\n\t\tif frappe.utils.nowtime().split(':')[0] == '00':\n\t\t\thour = '01:00'\n\n\t\tself.setup_and_prepare_test(hour)\n\n\t\tfor d in self.users:\n\t\t\t# check that email is not sent to users\n\t\t\tself.assertFalse(d.email in [d.recipient for d in self.emails\n\t\t\t\tif self.groups.subject in d.message])\n\n\tdef test_incoming(self):\n\t\t# get test mail with message-id as in-reply-to\n\t\tself.setup_and_prepare_test()\n\t\twith open(os.path.join(os.path.dirname(__file__), \"test_data\", \"test-reply.raw\"), \"r\") as f:\n\t\t\tif not self.emails: return\n\t\t\ttest_mails = [f.read().replace('{{ sender }}',\n\t\t\tself.users[-1].email).replace('{{ message_id }}',\n\t\t\tself.emails[-1].message_id)]\n\n\t\t# pull the mail\n\t\temail_account = frappe.get_doc(\"Email Account\", \"_Test Email Account 1\")\n\t\temail_account.db_set('enable_incoming', 1)\n\t\temail_account.receive(test_mails=test_mails)\n\n\t\tregular_work_summary = frappe.get_doc('Regular Work Summary',\n\t\t\tfrappe.get_all('Regular Work Summary')[0].name)\n\n\t\targs = regular_work_summary.get_message_details()\n\n\t\tself.assertTrue('I built Regular Work Summary!' in args.get('replies')[0].content)\n\n\tdef setup_and_prepare_test(self, hour=None):\n\t\tfrappe.db.sql('delete from `tabRegular Work Summary`')\n\t\tfrappe.db.sql('delete from `tabEmail Queue`')\n\t\tfrappe.db.sql('delete from `tabEmail Queue Recipient`')\n\t\tfrappe.db.sql('delete from `tabCommunication`')\n\t\tfrappe.db.sql('delete from `tabRegular Work Summary Group`')\n\n\t\tself.users = frappe.get_all('User',\n\t\t\tfields=['email'],\n\t\t\tfilters=dict(email=('!=', '<EMAIL>')))\n\t\tself.setup_groups(hour)\n\n\t\tfrom erpnext.hr.doctype.regular_work_summary_group.regular_work_summary_group \\\n\t\t\timport trigger_emails\n\t\ttrigger_emails()\n\n\t\t# check if emails are created\n\n\t\tself.emails = frappe.db.sql(\"\"\"select r.recipient, q.message, q.message_id \\\n\t\t\tfrom `tabEmail Queue` as q, `tabEmail Queue Recipient` as r \\\n\t\t\twhere q.name = r.parent\"\"\", as_dict=1)\n\n\t\tfrappe.db.commit()\n\n\tdef setup_groups(self, hour=None):\n\t\t# setup email to trigger at this hour\n\t\tif not hour:\n\t\t\thour = frappe.utils.nowtime().split(':')[0]\n\t\t\thour = hour+':00'\n\n\t\tgroups = frappe.get_doc(dict(doctype=\"Regular Work Summary Group\",\n\t\t\tname=\"Regular Work Summary\",\n\t\t\tusers=self.users,\n\t\t\tsend_emails_at=hour,\n\t\t\tsubject=\"this is a subject for testing summary emails\",\n\t\t\tmessage='this is a message for testing summary emails'))\n\t\tgroups.insert()\n\n\t\tself.groups = groups\n\t\tself.groups.save()\n", "id": "6909385", "language": "Python", "matching_score": 4.361148357391357, "max_stars_count": 0, "path": "hr_extension/hr_extension/doctype/regular_work_summary/test_regular_work_summary.py" }, { "content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, <NAME> and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe import _\nfrom email_reply_parser import EmailReplyParser\nfrom erpnext.hr.doctype.employee.employee import is_holiday\nfrom frappe.utils import global_date_format\nfrom six import string_types\n\nclass RegularWorkSummary(Document):\n\tdef send_mails(self, rws_group, emails):\n\t\t'''Send emails to get regular work summary to all users \\\n\t\t\tin selected regular work summary group'''\n\n\t\tincoming_email_account = frappe.db.get_value('Email Account',\n\t\t\tdict(enable_incoming=1, default_incoming=1),\n\t\t\t\t'email_id')\n\n\t\tself.db_set('email_sent_to', '\\n'.join(emails))\n\t\tfrappe.sendmail(recipients=emails,\n\t\t\tmessage=rws_group.message,\n\t\t\tsubject=rws_group.subject,\n\t\t\treference_doctype=self.doctype,\n\t\t\treference_name=self.name,\n\t\t\treply_to=incoming_email_account)\n\n\tdef send_summary(self):\n\t\t'''Send summary of all replies. Called at midnight'''\n\t\targs = self.get_message_details()\n\t\temails = get_user_emails_from_group(self.regular_work_summary_group, 'Summary')\n\t\tfrappe.sendmail(recipients=emails,\n\t\t\ttemplate='daily_work_summary',\n\t\t\targs=args,\n\t\t\tsubject=_(self.regular_work_summary_group),\n\t\t\treference_doctype=self.doctype,\n\t\t\treference_name=self.name)\n\n\t\tself.db_set('status', 'Sent')\n\n\tdef get_message_details(self):\n\t\t'''Return args for template'''\n\t\tdws_group = frappe.get_doc('Regular Work Summary Group',\n\t\t\tself.regular_work_summary_group)\n\n\t\treplies = frappe.get_all('Communication',\n\t\t\tfields=['content', 'text_content', 'sender'],\n\t\t\tfilters=dict(reference_doctype=self.doctype,\n\t\t\t\treference_name=self.name,\n\t\t\t\tcommunication_type='Communication',\n\t\t\t\tsent_or_received='Received'),\n\t\t\torder_by='creation asc')\n\n\t\tdid_not_reply = self.email_sent_to.split()\n\n\t\tfor d in replies:\n\t\t\tuser = frappe.db.get_values(\"User\",\n\t\t\t\t{\"email\": d.sender},\n\t\t\t\t[\"full_name\", \"user_image\"],\n\t\t\t\tas_dict=True)\n\n\t\t\td.sender_name = user[0].full_name if user else d.sender\n\t\t\td.image = user[0].image if user and user[0].image else None\n\n\t\t\toriginal_image = d.image\n\t\t\t# make thumbnail image\n\t\t\ttry:\n\t\t\t\tif original_image:\n\t\t\t\t\tfile_name = frappe.get_list('File',\n\t\t\t\t\t\t{'file_url': original_image})\n\n\t\t\t\t\tif file_name:\n\t\t\t\t\t\tfile_name = file_name[0].name\n\t\t\t\t\t\tfile_doc = frappe.get_doc('File', file_name)\n\t\t\t\t\t\tthumbnail_image = file_doc.make_thumbnail(\n\t\t\t\t\t\t\tset_as_thumbnail=False,\n\t\t\t\t\t\t\twidth=100,\n\t\t\t\t\t\t\theight=100,\n\t\t\t\t\t\t\tcrop=True\n\t\t\t\t\t\t)\n\t\t\t\t\t\td.image = thumbnail_image\n\t\t\texcept:\n\t\t\t\td.image = original_image\n\n\t\t\tif d.sender in did_not_reply:\n\t\t\t\tdid_not_reply.remove(d.sender)\n\t\t\tif d.text_content:\n\t\t\t\td.content = frappe.utils.md_to_html(\n\t\t\t\t\tEmailReplyParser.parse_reply(d.text_content)\n\t\t\t\t)\n\n\t\tdid_not_reply = [(frappe.db.get_value(\"User\", {\"email\": email}, \"full_name\") or email)\n\t\t\tfor email in did_not_reply]\n\n\t\treturn dict(replies=replies,\n\t\t\toriginal_message=dws_group.message,\n\t\t\ttitle=_('Work Summary for {0}'.format(\n\t\t\t\tglobal_date_format(self.creation)\n\t\t\t)),\n\t\t\tdid_not_reply=', '.join(did_not_reply) or '',\n\t\t\tdid_not_reply_title=_('No replies from'))\n\ndef get_user_emails_from_group(group, receive='Both'):\n\t'''Returns list of email of enabled users from the given group\n\n\t:param group: Regular Work Summary Group `name`'''\n\tgroup_doc = group\n\tif isinstance(group_doc, string_types):\n\t\tgroup_doc = frappe.get_doc('Regular Work Summary Group', group)\n\n\temails = get_users_email(group_doc, receive)\n\n\treturn emails\n\ndef get_users_email(doc, receive):\n\treturn [d.email for d in doc.users\n\t\tif frappe.db.get_value(\"User\", d.user, \"enabled\") and (d.receive == receive or d.receive == 'Both')]\n", "id": "4697284", "language": "Python", "matching_score": 0.6690562963485718, "max_stars_count": 0, "path": "hr_extension/hr_extension/doctype/regular_work_summary/regular_work_summary.py" }, { "content": "from sys import path\npath.append('..')\n\nfrom unittest import TestCase, main\nfrom re_map import Processor\n\nclass IntersectingGroupTestCase(TestCase):\n def test_intersection_1(self):\n text = 'C AAA C'\n\n modifiers = [\n ( r'(AAA)', { 1: 'BBBBB' } ),\n ( r'(C BBBBB C)', { 1: 'DD' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'DD' )\n self.assertEqual( procesor.span_map, [ ((0, 7), (0, 2)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '0000000' )\n self.assertEqual( decorated_processed_text, '00' )\n\n def test_intersection_2(self):\n text = 'C AAA C'\n\n modifiers = [\n ( r'(AAA)', { 1: 'BBBBB' } ),\n ( r'(C BBBBB)', { 1: 'DD' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'DD C' )\n self.assertEqual( procesor.span_map, [ ((0, 5), (0, 2)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '00000 C' )\n self.assertEqual( decorated_processed_text, '00 C' )\n\n def test_intersection_3(self):\n text = 'C AAA C'\n\n modifiers = [\n ( r'(AAA)', { 1: 'BBBBB' } ),\n ( r'(BBBBB C)', { 1: 'DD' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'C DD' )\n self.assertEqual( procesor.span_map, [ ((2, 7), (2, 4)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, 'C 00000' )\n self.assertEqual( decorated_processed_text, 'C 00' )\n\n def test_intersection_4(self):\n text = 'C AAA C'\n\n modifiers = [\n ( r'(AAA)', { 1: 'BBEBB' } ),\n ( r'(BBEBB)', { 1: 'DD' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'C DD C' )\n self.assertEqual( procesor.span_map, [ ((2, 5), (2, 4)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, 'C 000 C' )\n self.assertEqual( decorated_processed_text, 'C 00 C' )\n\n def test_intersection_5(self):\n text = ' C AAA C '\n\n modifiers = [\n ( r' (AAA) C', { 1: 'BBEBB' } ),\n ( r'C (BBEBB)', { 1: 'DD' } ),\n ( r'(C D)D', { 1: 'FF' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' FFD C ' )\n self.assertEqual( procesor.span_map, [ ((1, 6), (1, 4)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 00000 C ' )\n self.assertEqual( decorated_processed_text, ' 000 C ' )\n\n def test_intersection_6(self):\n modifiers = [\n ( r' (AAA)B', { 1: 'CCC CCC'} ),\n ( r'(CCCB)', { 1: lambda x: x } ),\n ]\n\n text = ' AAAB'\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' CCC CCCB' )\n self.assertEqual( procesor.span_map, [((1, 5), (1, 9))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 0000' )\n self.assertEqual( decorated_processed_text, ' 00000000' )\n\n def test_intersection_7(self):\n modifiers = [\n ( r'B(AAA) ', { 1: 'CCC CCC'} ),\n ( r'(BCCC)', { 1: lambda x: x } ),\n ]\n\n text = 'BAAA '\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'BCCC CCC ' )\n self.assertEqual( procesor.span_map, [((0, 4), (0, 8))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '0000 ' )\n self.assertEqual( decorated_processed_text, '00000000 ' )\n\n def test_multiple_intersections_1(self):\n text = ' C AAA C D '\n\n modifiers = [\n ( r'(C) (AAA) (C) (D) ', { 1: 'GG', 2: 'BB', 3: 'GG', 4: 'DD' } ),\n ( r'( GG BB G)G', { 1: 'HJK' } ),\n ( r'(HJK)G', { 1: 'FF' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'FFG DD ' )\n self.assertEqual( procesor.span_map, [ ((0, 8), (0, 3)), ((9, 10), (4, 6)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '00000000 1 ' )\n self.assertEqual( decorated_processed_text, '000 11 ' )\n\n\n def test_new(self):\n modifiers = [\n ( r' (etc)\\.', { 1: 'et cetera'} ),\n ( r'([^ ]+)', { 1: lambda x: x } ),\n ]\n\n text = ' etc.'\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' et cetera.' )\n self.assertEqual( procesor.span_map, [ ((1, 5), (1, 11)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 0000' )\n self.assertEqual( decorated_processed_text, ' 0000000000' )\n\nif __name__ == '__main__':\n main()", "id": "6776618", "language": "Python", "matching_score": 3.5606563091278076, "max_stars_count": 2, "path": "tests/test_intersecting_groups.py" }, { "content": "from sys import path\npath.append('..')\n\nfrom unittest import TestCase, main\nfrom re_map import Processor\n\nclass MatchingGroupTestCase(TestCase):\n '''\n Tests perfect matching match group replacements.\n '''\n def test_matching_1(self):\n text = ' BBB AAA AAA BBB '\n\n modifiers = [\n ( r'(AAA)', { 1: 'BBB' } ),\n ( r'(BBB)', { 1: 'YYY' } ),\n ]\n\n ref_span_map = [\n ((1, 4), (1, 4)),\n ((5, 8), (5, 8)),\n ((9, 12), (9, 12)),\n ((13, 16), (13, 16))\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' YYY YYY YYY YYY ' )\n self.assertEqual( procesor.span_map, ref_span_map )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 111 222 333 ' )\n self.assertEqual( decorated_processed_text, ' 000 111 222 333 ' )\n\n def test_matching_2(self):\n text = ' AAA BBB CCC DDD '\n\n modifiers = [\n ( r'(AAA) (BBB) (CCC)', { 1: 'ZZZZ', 2: 'YYYYY', 3: 'XXXXXX' } ),\n ( r'((YYYYY)|(ZZZZ))', { 1: 'WWWWWW' } ),\n ( r'(WWWWWW)', { 1: 'QQQQQQQ' } ),\n ]\n\n ref_span_map = [\n ((1, 4), (1, 8)),\n ((5, 8), (9, 16)),\n ((9, 12), (17, 23))\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' QQQQQQQ QQQQQQQ XXXXXX DDD ' )\n self.assertEqual( procesor.span_map, ref_span_map )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_processed_text, ' 0000000 1111111 222222 DDD ' )\n self.assertEqual( decorated_text, ' 000 111 222 DDD ' )\n\n def test_matching_3(self):\n text = 'AZA'\n\n modifiers = [\n ( r'(A)', { 1: 'BB' } ),\n ( r'(BB)', { 1: 'DD' } )\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'DDZDD' )\n self.assertEqual( procesor.span_map, [ ((0, 1), (0, 2)), ((2, 3), (3, 5)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '0Z1' )\n self.assertEqual( decorated_processed_text, '00Z11' )\n\n def test_matching_4(self):\n text = ' AAA '\n\n modifiers = [\n ( r'(AAA)', { 1: 'BBBBB' } ),\n ( r'(BBBBB)', { 1: 'CC' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' CC ' )\n self.assertEqual( procesor.span_map, [ ((1, 4), (1, 3)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 ' )\n self.assertEqual( decorated_processed_text, ' 00 ' )\n\n def test_matching_5(self):\n text = ' AAA D '\n\n modifiers = [\n ( r'(AAA) (D)', { 1: 'BBBBB', 2: 'EE' } ),\n ( r'(BBBBB)', { 1: 'CC' } ),\n ( r'(EE)', { 1: 'FFFF' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' CC FFFF ' )\n self.assertEqual( procesor.span_map, [ ((1, 4), (1, 3)), ((5, 6), (4, 8)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 1 ' )\n self.assertEqual( decorated_processed_text, ' 00 1111 ' )\n\n def test_matching_6(self):\n text = ' AAA D AAA D '\n\n modifiers = [\n ( r'(AAA) (D)', { 1: 'BBBBB', 2: 'EE' } ),\n ( r'(BBBBB)', { 1: 'CC' } ),\n ( r'(EE)', { 1: 'FFFF' } ),\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' CC FFFF CC FFFF ' )\n self.assertEqual( procesor.span_map, [ ((1, 4), (1, 3)), ((5, 6), (4, 8)), ((7, 10), (9, 11)), ((11, 12), (12, 16)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 1 222 3 ' )\n self.assertEqual( decorated_processed_text, ' 00 1111 22 3333 ' )\n\nif __name__ == '__main__':\n main()", "id": "6011159", "language": "Python", "matching_score": 3.2397284507751465, "max_stars_count": 2, "path": "tests/test_matching_groups.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom sys import path\npath.append('..')\n\nfrom unittest import TestCase, main\nfrom re_map import Processor\n\nclass OtherTestCase(TestCase):\n text_0 = ' AAA BBB CCC DDD '\n text_1 = ' AAA BBB AAA BBB '\n text_2 = ' BBB AAA AAA BBB '\n text_3 = ' AAA AAA AAA AAA '\n\n modifiers_0 = [\n ( r'(AAA)', { 1: 'ZZZ' } ),\n ( r'(BBB)', { 1: 'YYY' } ),\n ( r'(CCC)', { 1: 'XXX' } ),\n ( r'(DDD)', { 1: 'WWW' } )\n ]\n\n modifiers_1 = [\n ( r'(AAA)', { 1: 'BBB' } ),\n ]\n\n modifiers_3 = [\n ( r'(AAA) (BBB)', { 1: 'CCC', 2: 'CCC' } ),\n ( r'(DDD)', { 1: 'CCC' } ),\n ]\n\n modifiers_4 = [\n ( r'(AAA) (BBB) (CCC)', { 1: 'CCCC', 2: 'CCCC', 3: 'CCCC' } ),\n ( r'(DDD)', { 1: 'CCCC' } ),\n ]\n\n span_map = [\n ((1, 4), (1, 4)),\n ((5, 8), (5, 8)),\n ((9, 12), (9, 12)),\n ((13, 16), (13, 16))\n ]\n\n span_map_1_1 = [\n ((1, 4), (1, 4)),\n ((9, 12), (9, 12))\n ]\n\n span_map_2 = [\n ((1, 4), (1, 4)),\n ((5, 8), (5, 8)),\n ((13, 16), (13, 16))\n ]\n\n span_map_3 = [\n ((1, 4), (1, 5)),\n ((5, 8), (6, 10)),\n ((9, 12), (11, 15)),\n ((13, 16), (16, 20))\n ]\n\n def test_0(self):\n with Processor(self.text_0) as procesor:\n for pattern, replacement_map in self.modifiers_0:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' ZZZ YYY XXX WWW ' )\n self.assertEqual( procesor.span_map, self.span_map )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 111 222 333 ' )\n self.assertEqual( decorated_processed_text, ' 000 111 222 333 ' )\n\n def test_1(self):\n with Processor(self.text_1) as procesor:\n for pattern, replacement_map in self.modifiers_0:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' ZZZ YYY ZZZ YYY ' )\n self.assertEqual( procesor.span_map, self.span_map )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 111 222 333 ' )\n self.assertEqual( decorated_processed_text, ' 000 111 222 333 ' )\n\n def test_2(self):\n with Processor(self.text_1) as procesor:\n for pattern, replacement_map in self.modifiers_1:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' BBB BBB BBB BBB ' )\n self.assertEqual( procesor.span_map, self.span_map_1_1 )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 BBB 111 BBB ' )\n self.assertEqual( decorated_processed_text, ' 000 BBB 111 BBB ' )\n\n def test_3(self):\n with Processor(self.text_3) as procesor:\n for pattern, replacement_map in self.modifiers_1:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' BBB BBB BBB BBB ' )\n self.assertEqual( procesor.span_map, self.span_map )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 111 222 333 ' )\n self.assertEqual( decorated_processed_text, ' 000 111 222 333 ' )\n\n def test_4(self):\n with Processor(self.text_2) as procesor:\n for pattern, replacement_map in self.modifiers_0:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' YYY ZZZ ZZZ YYY ' )\n self.assertEqual( procesor.span_map, self.span_map )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 111 222 333 ' )\n self.assertEqual( decorated_processed_text, ' 000 111 222 333 ' )\n\n def test_5(self):\n with Processor(self.text_0) as procesor:\n for pattern, replacement_map in self.modifiers_3:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' CCC CCC CCC CCC ' )\n self.assertEqual( procesor.span_map, self.span_map_2 )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 111 CCC 222 ' )\n self.assertEqual( decorated_processed_text, ' 000 111 CCC 222 ' )\n\n def test_6(self):\n with Processor(self.text_0) as procesor:\n for pattern, replacement_map in self.modifiers_4:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, ' CCCC CCCC CCCC CCCC ' )\n self.assertEqual( procesor.span_map, self.span_map_3 )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, ' 000 111 222 333 ' )\n self.assertEqual( decorated_processed_text, ' 0000 1111 2222 3333 ' )\n\n def test_7(self):\n text = 'ab'\n pattern, replacement_map = r'((a)(b))', { 1: 'c', 2:'d', 3:'e'}\n\n with self.assertRaises(ValueError):\n with Processor(text) as procesor:\n procesor.process(pattern, replacement_map)\n\n def test_9(self):\n text = 'AA BBBB& CC&CCCC'\n\n pattern_1, replacement_map_1 = r'([A-Za-z&]+)', { 1: lambda x: x.replace('&', '') }\n pattern_2, replacement_map_2 = r'(AA) ', { 1: 'DDD DDD' }\n\n with Processor(text) as procesor:\n procesor.process(pattern_1, replacement_map_1)\n procesor.swap()\n procesor.process(pattern_2, replacement_map_2)\n\n self.assertEqual( procesor.text, 'AA BBBB CCCCCC' )\n self.assertEqual( procesor.processed_text, 'DDD DDD BBBB& CC&CCCC' )\n self.assertEqual( procesor.span_map, [((0, 2), (0, 7)), ((3, 7), (8, 13)), ((8, 14), (14, 21)) ] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '00 1111 222222' )\n self.assertEqual( decorated_processed_text, '0000000 11111 2222222' )\n\nif __name__ == '__main__':\n main()", "id": "10538970", "language": "Python", "matching_score": 3.306464672088623, "max_stars_count": 2, "path": "tests/test_other.py" }, { "content": "from sys import path\npath.append('..')\n\nfrom unittest import TestCase, main\nfrom re_map import Processor\n\nclass BundledModifiersTestCase(TestCase):\n def test_bundled_1(self):\n text = 'ABAB'\n modifiers = [\n ( r'(A)', { 1: 'CC'} ),\n ( r'(B)', { 1: 'D'} )\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'CCDCCD' )\n self.assertEqual( procesor.span_map, [((0, 1), (0, 2)), ((1, 2), (2, 3)), ((2, 3), (3, 5)), ((3, 4), (5, 6))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '0123' )\n self.assertEqual( decorated_processed_text, '001223' )\n\n def test_bundled_2(self):\n text = 'AABAAB'\n modifiers = [\n ( r'(AA)', { 1: 'C'} ),\n ( r'(B)', { 1:'D'} )\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'CDCD' )\n self.assertEqual( procesor.span_map, [((0, 2), (0, 1)), ((2, 3), (1, 2)), ((3, 5), (2, 3)), ((5, 6), (3, 4))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '001223' )\n self.assertEqual( decorated_processed_text, '0123' )\n\n def test_bundled_3(self):\n text = 'ABBABB'\n modifiers = [\n ( r'(A)', { 1: 'CC'} ),\n ( r'(BB)', { 1:'D'} )\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'CCDCCD' )\n self.assertEqual( procesor.span_map, [((0, 1), (0, 2)), ((1, 3), (2, 3)), ((3, 4), (3, 5)), ((4, 6), (5, 6))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '011233' )\n self.assertEqual( decorated_processed_text, '001223' )\n\n def test_bundled_4(self):\n text = 'BAAB'\n\n modifiers = [\n ( r'(AA)', { 1: 'CC'} ),\n ( r'(B)', { 1:''} )\n ]\n\n with Processor(text) as procesor:\n for pattern, replacement_map in modifiers:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'CC' )\n self.assertEqual( procesor.span_map, [((0, 1), (0, 0)), ((1, 3), (0, 2)), ((3, 4), (2, 2))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '0112' )\n self.assertEqual( decorated_processed_text, '11' )\n\nif __name__ == '__main__':\n main()", "id": "1903100", "language": "Python", "matching_score": 3.435446262359619, "max_stars_count": 2, "path": "tests/test_bundled_modifiers.py" }, { "content": "from sys import path\npath.append('..')\n\nfrom unittest import TestCase, main\nfrom re_map import Processor\n\nclass SingleModifierTestCase(TestCase):\n def test_single_1(self):\n text = 'ABAB'\n pattern, replacement_map = r'(A)(B)', { 1: 'CC', 2:'D'}\n\n with Processor(text) as procesor:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'CCDCCD' )\n self.assertEqual( procesor.span_map, [((0, 1), (0, 2)), ((1, 2), (2, 3)), ((2, 3), (3, 5)), ((3, 4), (5, 6))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '0123' )\n self.assertEqual( decorated_processed_text, '001223' )\n\n def test_single_2(self):\n text = 'AABAAB'\n pattern, replacement_map = r'(AA)(B)', { 1: 'C', 2:'D'}\n\n with Processor(text) as procesor:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'CDCD' )\n self.assertEqual( procesor.span_map, [((0, 2), (0, 1)), ((2, 3), (1, 2)), ((3, 5), (2, 3)), ((5, 6), (3, 4))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '001223' )\n self.assertEqual( decorated_processed_text, '0123' )\n\n def test_single_3(self):\n text = 'ABBABB'\n pattern, replacement_map = r'(A)(BB)', { 1: 'CC', 2:'D'}\n\n with Processor(text) as procesor:\n procesor.process(pattern, replacement_map)\n\n self.assertEqual( procesor.processed_text, 'CCDCCD' )\n self.assertEqual( procesor.span_map, [((0, 1), (0, 2)), ((1, 3), (2, 3)), ((3, 4), (3, 5)), ((4, 6), (5, 6))] )\n\n decorated_text, decorated_processed_text = procesor.decorate()\n\n self.assertEqual( decorated_text, '011233' )\n self.assertEqual( decorated_processed_text, '001223' )\n\nif __name__ == '__main__':\n main()", "id": "4996368", "language": "Python", "matching_score": 1.2726114988327026, "max_stars_count": 2, "path": "tests/test_single_modifier.py" }, { "content": "import re\nfrom math import ceil, floor\nfrom json import dump, load\nfrom .utils import decorate\nfrom .slow import intersect, intersection, span_len_delta, span_length, span_rtrim\n\ndef span_offset(span, replacement_span_map, delta_start=0, delta_end=0, delta_i=0):\n cached_delta_start = 0\n cached_delta_end = 0\n cached_delta_count = 0\n for span_source, span_target, _ in replacement_span_map[delta_i:]:\n if span_target[1] <= span[0]:\n d = span_len_delta(span_target, span_source)\n delta_start += d\n delta_end += d\n cached_delta_start = delta_start\n cached_delta_end = delta_end\n cached_delta_count += 1\n else:\n target_trimmed_end = span_rtrim(span_target, span[1])\n if target_trimmed_end:\n d = span_len_delta(span_target, span_source)\n # int() and 1.0 multipliers for 2.7 compatibility\n ratio_end = 1.0 * span_length(target_trimmed_end) / span_length(span_target)\n delta_end += int(floor(d * ratio_end))\n\n target_trimmed_start = span_rtrim(span_target, span[0])\n if target_trimmed_start:\n ratio_start = 1.0 * span_length(target_trimmed_start) / span_length(span_target)\n delta_start += int(ceil(d * ratio_start))\n else:\n break\n\n return delta_start, delta_end, cached_delta_start, cached_delta_end, delta_i + cached_delta_count\n\ndef insert(entry, replacement_span_map, allow_intersect=True, offset=0):\n i = 0\n for source_span, target_span, _ in replacement_span_map[offset:]:\n if ((source_span[0] >= entry[0][1] or source_span[0] >= entry[0][0]) or\n (target_span[0] >= entry[1][1] or target_span[0] >= entry[1][0]) or\n (intersect(source_span, entry[0])) or\n (intersect(target_span, entry[1]))):\n break\n i+=1\n\n merge_entries = []\n for e in replacement_span_map[offset:]:\n source_span, target_span, _ = e\n if source_span[0] >= entry[0][1] and target_span[0] >= entry[1][1]:\n break\n if intersect(source_span, entry[0]):\n if not allow_intersect:\n raise ValueError(\"Intersecting groups not allowed.\")\n merge_entries.append(e)\n\n if merge_entries:\n entry_source_length = span_length(entry[0])\n entry_target_length = span_length(entry[1])\n source_length, target_length = 0, 0\n source_span_start, target_span_start = entry[0][0], entry[1][0]\n\n aligned_entry_target_span = (entry[1][0], entry[1][1] - entry[2])\n for e in merge_entries:\n source_intersection = intersection(e[0], entry[0])\n entry_source_length -= span_length(source_intersection) if source_intersection else 0\n target_intersection = intersection(e[1], aligned_entry_target_span)\n entry_target_length -= span_length(target_intersection) if target_intersection else 0\n source_length += span_length(e[0])\n target_length += span_length(e[1])\n source_span_start = min(source_span_start, e[0][0])\n target_span_start = min(target_span_start, e[1][0])\n\n source_span = (source_span_start, source_span_start + source_length + entry_source_length)\n target_span = (target_span_start, target_span_start + target_length + entry_target_length)\n\n entry = source_span, (target_span[0], target_span[1]), entry[2]\n\n for e in merge_entries:\n replacement_span_map.remove(e)\n\n replacement_span_map.insert(offset + i, entry)\n\n for j, (source_span, target_span, len_delta) in enumerate(replacement_span_map[i+offset+1:]):\n replacement_span_map[i + offset + 1 + j] = source_span, (target_span[0] + entry[2], target_span[1] + entry[2]), len_delta\n\n return i + offset\n\ndef repl(match, replacement_map, replacement_map_keys, replacement_span_map, cache, exceptions):\n match_string = match.group()\n\n if exceptions:\n for exception in exceptions:\n if isinstance(exception, tuple):\n pattern, replacement = exception \n else: # str\n pattern, replacement = exception, exception \n \n if re.match(pattern, match_string):\n return replacement\n\n match_start = match.span(0)[0]\n if len(match.regs) == 1:\n raise Exception('No match groups in regex pattern.')\n _, delta_end, c_delta_start, c_delta_end, c_delta_i = span_offset(match.span(1), replacement_span_map, cache['offset'][0], cache['offset'][1], cache['offset'][2])\n cache['offset'] = (c_delta_start, c_delta_end, c_delta_i)\n\n current_match_delta = 0\n\n for i in replacement_map_keys:\n span = match.span(i)\n group_rel_span = span[0] - match_start, span[1] - match_start\n\n replacement = replacement_map[i] if isinstance(replacement_map[i], str) else replacement_map[i](match.group(i))\n match_string = match_string[0:group_rel_span[0] + current_match_delta] + replacement + match_string[group_rel_span[1] + current_match_delta:]\n\n match_delta = delta_end + current_match_delta\n group_rel_span_alligned = group_rel_span[0] + match_delta, group_rel_span[1] + match_delta\n\n span_target = group_rel_span_alligned[0] + match_start, group_rel_span_alligned[0] + len(replacement) + match_start\n\n new_entry = span, span_target, span_len_delta(span_target, span)\n\n cache['insert'] = insert(new_entry, replacement_span_map, allow_intersect=False, offset=cache['insert'])\n current_match_delta += new_entry[2]\n\n return match_string\n\ndef normalize_source_spans(replacement_span_map, tmp_replacement_span_map):\n '''\n Corrects the source spans according to earlier length changes\n '''\n cached_delta_start = 0\n cached_delta_end = 0\n delta_i = 0\n for i, (tmp_source_span, _, len_delta) in enumerate(tmp_replacement_span_map):\n delta_start, delta_end, cached_delta_start, cached_delta_end, delta_i = span_offset(tmp_source_span, replacement_span_map, cached_delta_start, cached_delta_end, delta_i)\n tmp_replacement_span_map[i] = (tmp_source_span[0] - delta_start, tmp_source_span[1] - delta_end), tmp_replacement_span_map[i][1], len_delta\n\ndef init_replacement_span_map(replacement_span_map):\n new_span_map = []\n new_replacement_span_map = []\n if replacement_span_map:\n for source_span, target_span in replacement_span_map:\n entry = (tuple(source_span), tuple(target_span), span_len_delta(target_span, source_span))\n new_replacement_span_map.append( entry )\n new_span_map.append( entry[:2] )\n return new_replacement_span_map, new_span_map\n\nclass Processor:\n def __init__(self, text, processed_text=None, replacement_span_map=None):\n self.__processing = False\n self.__text = (text + '.')[:-1]\n self.__processed_text = (processed_text + '.')[:-1] if processed_text else (text + '.')[:-1]\n self.__replacement_span_map, self.__span_map = init_replacement_span_map(replacement_span_map)\n\n def process(self, pattern, replacement_map, count=0, flags=0, exceptions=None):\n if not self.__processing:\n raise Exception(\"Processing session not initiated\")\n\n tmp_replacement_span_map = []\n cache = {'insert':0, 'offset':(0,0,0)}\n\n replacement_map_keys = sorted(replacement_map.keys())\n self.__processed_text = re.sub(\n pattern = pattern,\n repl = lambda match: repl(match, replacement_map, replacement_map_keys, tmp_replacement_span_map, cache, exceptions),\n string = self.__processed_text,\n count=count,\n flags = flags\n )\n\n normalize_source_spans(self.__replacement_span_map, tmp_replacement_span_map)\n\n offset = 0\n for entry in tmp_replacement_span_map:\n offset = insert(entry, self.__replacement_span_map, offset=offset)\n\n def swap(self):\n self.__replacement_span_map = [(destination, source, -delta) for source, destination, delta in self.__replacement_span_map]\n\n tmp = self.__text\n self.__text = self.__processed_text\n self.__processed_text = tmp\n\n def decorate(self):\n return decorate(self.__text, self.__processed_text, self.span_map)\n\n @staticmethod\n def load(fp):\n state = load(fp)\n return Processor(state['text'], state['processed_text'], state['span_map'])\n\n def save(self, fp):\n if self.__processing:\n raise Exception(\"Saving state in processing mode not allowed\")\n\n state = {\n 'text': self.text,\n 'processed_text': self.processed_text,\n 'span_map': self.span_map\n }\n\n dump(state, fp)\n\n @property\n def span_map(self):\n return self.__span_map\n\n @property\n def text(self):\n return self.__text\n\n @property\n def processed_text(self):\n return self.__processed_text\n\n def __enter__(self):\n if self.__processing:\n raise Exception(\"Already processing.\")\n self.__processing = True\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.__processing = False\n self.__span_map = [(src, dst) for src, dst, _ in self.__replacement_span_map]", "id": "1082438", "language": "Python", "matching_score": 2.139784336090088, "max_stars_count": 2, "path": "re_map/core.py" }, { "content": "def decorate(text, processed_text, span_map):\n text = str(text)\n processed_text = str(processed_text)\n for i, span in enumerate(span_map):\n span = span_map[i]\n a = span[0][1] - span[0][0]\n b = span[1][1] - span[1][0]\n v = '{:x}'.format(i%16).upper()\n text = text[0:span[0][0]] + a*v + text[span[0][1]:]\n processed_text = processed_text[0:span[1][0]] + b*v + processed_text[span[1][1]:]\n\n return text, processed_text\n\n\ndef text_range(text, processed_text, span_map, source_len_limit, target_len_limit):\n span_map_length = len(span_map)\n for start in range(span_map_length):\n for end in range(start, span_map_length):\n source_start = span_map[start][0][0]\n source_end = span_map[end][0][1]\n target_start = span_map[start][1][0]\n target_end = span_map[end][1][1]\n if ( source_end - source_start < source_len_limit and \n target_end - target_start < source_len_limit) :\n continue\n else:\n yield text[source_start:source_end], processed_text[target_start:target_end]\n break", "id": "11511704", "language": "Python", "matching_score": 1.1403559446334839, "max_stars_count": 2, "path": "re_map/utils.py" }, { "content": "def span_rtrim(span, value):\n if span[0] < value:\n return (min(span[0], value), min(span[1], value))\n\ndef intersect(span_a, span_b):\n return max(span_a[0], span_b[0]) < min(span_a[1], span_b[1])\n\ndef intersection(span_a, span_b):\n span = (max(span_a[0], span_b[0]), min(span_a[1], span_b[1]))\n if span[0] < span[1]:\n return span\n\ndef span_len_delta(span_1, span_2):\n return (span_1[1] - span_1[0]) - (span_2[1] - span_2[0])\n\ndef span_length(span):\n return span[1] - span[0]", "id": "585526", "language": "Python", "matching_score": 0, "max_stars_count": 2, "path": "re_map/slow.py" }, { "content": "# -*- coding: utf-8 -*-\n\nVERSION = '0.2.8'\nRELEASE = '1'\n", "id": "1058599", "language": "Python", "matching_score": 0.8681966066360474, "max_stars_count": 15, "path": "phonology_engine/version.py" }, { "content": "from .core import Processor\n\n__version__ = '0.4.6'\n", "id": "6132027", "language": "Python", "matching_score": 0.6179832816123962, "max_stars_count": 2, "path": "re_map/__init__.py" } ]
2.260082
widberg
[ { "content": "with open('meshes.txt','r') as file:\n\tfor line in file:\n\t\tline = line.strip()\n\t\tprint(line + \"_BF_PARTICLESDATA_0\")\n", "id": "2560527", "language": "Python", "matching_score": 0.7595948576927185, "max_stars_count": 1, "path": "data/gen.py" }, { "content": "import bpy\nfrom bpy_extras.io_utils import ImportHelper\nfrom bpy.props import StringProperty\nfrom bpy.types import Operator\nimport io, struct, collections, math, numpy\n\n\ndef snorm16_to_float(x):\n return max(x / 32767.0, -1.0)\n\n\ndef normalize_qtangent(qtangent):\n norm = numpy.linalg.norm(qtangent)\n if norm != 0:\n return qtangent / norm\n return qtangent\n\n\ndef decode_qtangent(qtangent):\n (i, j, k, w) = qtangent\n \n fTx = 2.0 * i\n fTy = 2.0 * j\n fTz = 2.0 * k\n fTwx = fTx * w\n fTwy = fTy * w\n fTwz = fTz * w\n fTxx = fTx * i\n fTxy = fTy * i\n fTxz = fTz * i\n fTyz = fTz * j\n fTyy = fTy * j\n fTzz = fTz * k\n \n normal = (1.0-(fTyy+fTzz), fTxy+fTwz, fTxz-fTwy)\n tangent = ( fTxy-fTwz, 1.0-(fTxx+fTzz), fTyz+fTwx )\n bitangent = numpy.cross(normal, tangent) * numpy.sign(w)\n \n return normal, tangent, bitangent\n\nclass MeshZ:\n cylindre_cols = []\n unknown7s = []\n vertex_buffers = []\n index_buffers = []\n unknown11s = []\n unknown12s = []\n unknown16s = []\n unknown15s = []\n unknown15_indicies = []\n \n def __init__(self, data):\n # Header\n bs = io.BytesIO(data)\n s = struct.Struct('<IIIIII')\n ClassHeader = collections.namedtuple('ClassHeader', 'data_size links_size decompressed_size compressed_size class_crc32 crc32')\n class_header = ClassHeader._make(s.unpack(bs.read(s.size)))\n \n assert class_header.compressed_size == 0, \"Compressed objects not allowed\"\n \n # Links\n bs.seek(class_header.links_size, io.SEEK_CUR) # skip links\n \n # Data\n strip_vertices_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(strip_vertices_count * 3 * 4, io.SEEK_CUR)\n print(strip_vertices_count)\n \n unknown0_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(unknown0_count * 4 * 4, io.SEEK_CUR)\n print(unknown0_count)\n \n texcoord_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(texcoord_count * 2 * 4, io.SEEK_CUR)\n print(texcoord_count)\n \n normal_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(normal_count * 3 * 4, io.SEEK_CUR)\n print(normal_count)\n \n strip_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, strip_count):\n strip_vertices_index_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(strip_vertices_index_count * 1 * 2 + 2 * 4, io.SEEK_CUR)\n print(strip_count)\n \n unknown4_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, unknown4_count):\n unknown0_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(unknown0_count * 2 * 4, io.SEEK_CUR)\n print(unknown4_count)\n \n material_crc32_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(material_crc32_count * 1 * 4, io.SEEK_CUR)\n print(material_crc32_count)\n \n cylindre_col_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, cylindre_col_count):\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n min_vertex = (x,y,z)\n bs.seek(2 * 2, io.SEEK_CUR)\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n max_vertex = (x,y,z)\n unknown7s_index = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n unknown = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n self.cylindre_cols.append((min_vertex, max_vertex))\n print(cylindre_col_count)\n \n unknown7_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, unknown7_count):\n x = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n y = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n z = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n unknown = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n self.unknown7s.append((x, y, z))\n print(unknown7_count)\n \n unknown8_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(unknown8_count * 8 * 4, io.SEEK_CUR)\n print(unknown8_count)\n \n s = struct.Struct('<fffhhhhffff')\n Vertex = collections.namedtuple('Vertex', 'x y z i j k w u0 v0 u1 v1')\n VertexBuffer = collections.namedtuple('VertexBuffer', 'id positions normals tangents bitangents uvs')\n vertex_buffer_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, vertex_buffer_count):\n vertex_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n vertex_size = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n id = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n positions = []\n normals = []\n tangents = []\n bitangents = []\n uvs = []\n for j in range(0, vertex_count):\n vertex = Vertex._make(s.unpack(bs.read(s.size)))\n positions.append((vertex.x, vertex.y, vertex.z))\n qtangent = (snorm16_to_float(vertex.i), snorm16_to_float(vertex.j), snorm16_to_float(vertex.k), snorm16_to_float(vertex.w))\n qtangent = normalize_qtangent(qtangent)\n normal, tangent, bitangent = decode_qtangent(qtangent)\n normals.append(normal)\n tangents.append(tangent)\n bitangents.append(bitangent)\n uvs.append((vertex.u0, vertex.v0))\n bs.seek(vertex_size - s.size, io.SEEK_CUR)\n self.vertex_buffers.append(VertexBuffer._make((id, positions, normals, tangents, bitangents, uvs)))\n print(vertex_buffer_count)\n \n IndexBuffer = collections.namedtuple('IndexBuffer', 'id data')\n index_buffer_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, index_buffer_count):\n index_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n id = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n data = []\n for j in range(0, int(index_count / 3)):\n # invert winding order for blender\n a = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n b = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n c = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n data.append((c, b, a))\n self.index_buffers.append(IndexBuffer._make((id, data)))\n print(index_buffer_count)\n \n unknown11_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, unknown11_count):\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n self.unknown11s.append((x,y,z))\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n self.unknown11s.append((x,y,z))\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n self.unknown11s.append((x,y,z))\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n self.unknown11s.append((x,y,z))\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n self.unknown11s.append((x,y,z))\n print(unknown11_count)\n \n unknown13_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, unknown13_count):\n bs.seek(24 * 2, io.SEEK_CUR)\n unknown1_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(unknown1_count * 7 * 4, io.SEEK_CUR)\n print(unknown13_count)\n \n unknown16_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, unknown16_count):\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n min_vertex = (x,y,z)\n bs.seek(2 * 2, io.SEEK_CUR)\n (x, y, z) = struct.unpack('<fff', bs.read(12))\n max_vertex = (x,y,z)\n pairs_index = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n unknown = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n self.unknown16s.append((min_vertex, max_vertex))\n print(unknown16_count)\n \n pair_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(pair_count * 2 * 2, io.SEEK_CUR)\n print(pair_count)\n \n unknown15s_indices = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(unknown15s_indices * 1 * 2, io.SEEK_CUR)\n print(unknown15s_indices)\n \n morph_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, morph_count):\n name_size = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n bs.seek(name_size * 1 * 1, io.SEEK_CUR)\n bs.seek(1 * 4 + 1 * 2, io.SEEK_CUR)\n unknown15_indicies_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for j in range(0, unknown15_indicies_count):\n x = int.from_bytes(bs.read(2), byteorder='little', signed=False)\n self.unknown15_indicies.append(x)\n unknown15_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for j in range(0, unknown15_count):\n x = int.from_bytes(bs.read(2), byteorder='little', signed=True) / 1024\n y = int.from_bytes(bs.read(2), byteorder='little', signed=True) / 1024\n z = int.from_bytes(bs.read(2), byteorder='little', signed=True) / 1024\n self.unknown15s.append((x, y, z))\n bs.seek(1 * 2, io.SEEK_CUR)\n print(morph_count)\n \n unknown12_count = int.from_bytes(bs.read(4), byteorder='little', signed=False)\n for i in range(0, unknown12_count):\n x = int.from_bytes(bs.read(2), byteorder='little', signed=True) / 1024\n y = int.from_bytes(bs.read(2), byteorder='little', signed=True) / 1024\n z = int.from_bytes(bs.read(2), byteorder='little', signed=True) / 1024\n self.unknown12s.append((x, y, z))\n print(unknown12_count)\n\n\nclass ImportAsoboMeshZ(Operator, ImportHelper):\n \"\"\"Import Asobo Mesh_Z\"\"\"\n bl_idname = \"import_asobo.mesh_z\"\n bl_label = \"Import Asobo Mesh_Z\"\n\n filename_ext = \".Mesh_Z\"\n \n filter_glob: StringProperty(\n default=\"*.Mesh_Z\",\n options={'HIDDEN'},\n maxlen=255,\n )\n\n def execute(self, context): \n f = open(self.filepath, 'rb')\n data = f.read()\n f.close()\n\n mesh = MeshZ(data)\n \n for i in range(0, len(mesh.index_buffers)):\n ob_name = \"meshz\" + str(i)\n me = bpy.data.meshes.new(ob_name + \"Mesh\")\n ob = bpy.data.objects.new(ob_name, me)\n me.from_pydata(mesh.vertex_buffers[i].positions, [], mesh.index_buffers[i].data)\n uv_layer = me.uv_layers.new()\n for face in me.polygons:\n for vert_idx, loop_idx in zip(face.vertices, face.loop_indices):\n uv_layer.data[loop_idx].uv = mesh.vertex_buffers[i].uvs[vert_idx]\n for face in me.polygons:\n face.use_smooth = True\n me.use_auto_smooth = True\n me.normals_split_custom_set_from_vertices(mesh.vertex_buffers[i].normals)\n me.calc_tangents()\n #if i == 1:\n # color_layer = me.vertex_colors.new()\n # for face in me.polygons:\n # for vert_idx, loop_idx in zip(face.vertices, face.loop_indices):\n # (r,g,b) = mesh.vertex_buffers[2].positions[vert_idx]\n # color_layer.data[loop_idx].color = (r*10000,g*10000,b*10000,1)\n me.validate(clean_customdata=False)\n me.update()\n context.scene.collection.objects.link(ob)\n \n #if mesh.unknown12s:\n # ob_name = \"meshz\" + str(10)\n # me = bpy.data.meshes.new(ob_name + \"Mesh\")\n # ob = bpy.data.objects.new(ob_name, me)\n # me.from_pydata(mesh.unknown12s, [], [])\n # me.update()\n # context.scene.collection.objects.link(ob)\n \n #if mesh.unknown15s:\n # ob_name = \"meshz\" + str(20)\n # me = bpy.data.meshes.new(ob_name + \"Mesh\")\n # ob = bpy.data.objects.new(ob_name, me)\n # me.from_pydata(mesh.unknown15s, [], [])\n # me.update()\n # context.scene.collection.objects.link(ob)\n \n if mesh.unknown16s:\n ob_name = \"meshz\" + str(30)\n me = bpy.data.meshes.new(ob_name + \"Mesh\")\n ob = bpy.data.objects.new(ob_name, me)\n vertices = []\n for e in mesh.unknown16s:\n vertices.append(e[0])\n vertices.append(e[1])\n me.from_pydata(vertices, [(i * 2, i * 2 + 1) for i in range(0, len(mesh.unknown16s))], [])\n me.update()\n context.scene.collection.objects.link(ob)\n \n if mesh.cylindre_cols:\n ob_name = \"meshz\" + str(40)\n me = bpy.data.meshes.new(ob_name + \"Mesh\")\n ob = bpy.data.objects.new(ob_name, me)\n vertices = []\n for e in mesh.cylindre_cols:\n vertices.append(e[0])\n vertices.append(e[1])\n me.from_pydata(vertices, [(i * 2, i * 2 + 1) for i in range(0, len(mesh.cylindre_cols))], [])\n me.update()\n context.scene.collection.objects.link(ob)\n \n if mesh.unknown7s:\n ob_name = \"meshz\" + str(50)\n me = bpy.data.meshes.new(ob_name + \"Mesh\")\n ob = bpy.data.objects.new(ob_name, me)\n indices = []\n for x in mesh.unknown7s:\n indices.append((x[0], x[1], x[2]))\n me.from_pydata(mesh.unknown12s, [], indices)\n me.update()\n context.scene.collection.objects.link(ob)\n \n if mesh.unknown11s:\n ob_name = \"meshz\" + str(60)\n me = bpy.data.meshes.new(ob_name + \"Mesh\")\n ob = bpy.data.objects.new(ob_name, me)\n indices = []\n me.from_pydata(mesh.unknown11s, [], [])\n me.update()\n context.scene.collection.objects.link(ob)\n \n return {'FINISHED'}\n\n\ndef menu_func_import(self, context):\n self.layout.operator(ImportAsoboMeshZ.bl_idname, text=\"Asobo Mesh_Z (.Mesh_Z)\")\n\n\ndef register():\n bpy.utils.register_class(ImportAsoboMeshZ)\n bpy.types.TOPBAR_MT_file_import.append(menu_func_import)\n\n\ndef unregister():\n bpy.utils.unregister_class(ImportAsoboMeshZ)\n bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)\n\n\nif __name__ == \"__main__\":\n register()\n\n bpy.ops.import_asobo.mesh_z('INVOKE_DEFAULT')\n", "id": "614359", "language": "Python", "matching_score": 1.485632061958313, "max_stars_count": 0, "path": "import_asobo_meshz.py" }, { "content": "# MIT License\n#\n# Copyright (c) 2018-2020 widberg\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# pcil v1.0.0 by widberg\n# https://github.com/widberg/pcil\n\nfrom argparse import ArgumentParser\nfrom urllib.request import urlretrieve\nimport sys\nimport os\n\nparser = ArgumentParser()\nparser.add_argument(\"-i\", \"--in\", dest=\"pci_path\", default=\"pci.ids\",\n help=\"path to output the header file\", metavar=\"PATH\")\nparser.add_argument(\"-o\", \"--out\", dest=\"header_path\", default=\"include/pcil/pcil.hpp\",\n help=\"path to output the header file\", metavar=\"PATH\")\nparser.add_argument(\"-q\", \"--quiet\", action=\"store_true\", dest=\"quiet\",\n default=False, help=\"don't print status messages to stdout\")\nparser.add_argument(\"-u\", \"--url\", dest=\"url\",\n help=\"URL to request the ids database from\", metavar=\"URL\")\nargs = parser.parse_args()\n\nvendors = list()\n\n\ndef reporthook(block_number, block_size, total_size):\n read_so_far = block_number * block_size\n if total_size > 0:\n percent = read_so_far * 1e2 / total_size\n s = \"\\r%5.1f%% %*d / %d\" % (\n percent, len(str(total_size)), read_so_far, total_size)\n sys.stderr.write(s)\n if read_so_far >= total_size:\n sys.stderr.write(\"\\n\")\n else:\n sys.stderr.write(\"read %d\\n\" % (read_so_far,))\n\n\nif __name__ == '__main__':\n pci_path = os.path.join(os.path.dirname(__file__), args.pci_path)\n os.makedirs(os.path.dirname(pci_path), exist_ok=True)\n header_path = os.path.join(os.path.dirname(__file__), args.header_path)\n os.makedirs(os.path.dirname(header_path), exist_ok=True)\n if args.url:\n if args.quiet:\n urlretrieve(args.url, pci_path)\n else:\n print(\"Downloading id database from \" + args.url)\n urlretrieve(args.url, pci_path, reporthook)\n print(\"Download complete\")\n print(\"The ids database is located at \" + pci_path.replace(\"\\\\\", \"/\"))\n with open(header_path, \"w\", encoding=\"utf8\") as header_file:\n header_file.write(\"\"\"// MIT License\n//\n// Copyright (c) 2019 widberg\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in all\n// copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n// SOFTWARE.\n//\n// This file was generated with a script.\n// This header was generated with pcil v1.0.0 by widberg\n// https://github.com/widberg/pcil\n\n#ifndef PCIL_HPP\n#define PCIL_HPP\n\n#include <cstdint>\n\nnamespace pcil\n{\n inline constexpr const char* deviceLookup(std::uint16_t vendorId, std::uint16_t deviceId)\n {\n switch((vendorId << 16) | (deviceId))\n {\n\"\"\")\n with open(os.path.join(os.path.dirname(__file__), pci_path), \"r\", encoding=\"utf8\") as pci_file:\n vendorId = 0\n for line in pci_file:\n if not line.startswith(\"#\") and not line.isspace():\n if not line.startswith(\"C \"):\n s = line.split(\" \")\n if len(line) - len(line.lstrip('\\t')) == 0:\n vendorId = int(s[0], 16)\n vendors.append([vendorId, s[1]])\n elif len(line) - len(line.lstrip('\\t')) == 1:\n deviceId = int(s[0], 16)\n key = ((vendorId << 16) | deviceId)\n header_file.write(\" case {key}: return \\\"{name}\\\";\\n\".format(key=key, name=s[1].strip().replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\")))\n else:\n break\n header_file.write(\"\"\" default: return \"Unrecognized Device\";\n }\n }\n \n inline constexpr const char* vendorLookup(std::uint16_t vendorId)\n {\n switch(vendorId)\n {\n\"\"\")\n for vendor in vendors:\n header_file.write(\" case {key}: return \\\"{name}\\\";\\n\".format(key=vendor[0], name=vendor[1].strip().replace(\"\\\\\", \"\\\\\\\\\").replace(\"\\\"\", \"\\\\\\\"\")))\n header_file.write(\"\"\" default: return \"Unrecognized Vendor\";\n }\n }\n}\n\n#endif // PCIL_HPP\n\"\"\")\n if not args.quiet:\n print(\"The header file is located at \" + header_path.replace(\"\\\\\", \"/\"))\n", "id": "11116855", "language": "Python", "matching_score": 0.9999979734420776, "max_stars_count": 0, "path": "pcil.py" }, { "content": "import csv\n\ntotal = 0\ntotalSolved = 0\n\ndictionaryTotal = {}\ndictionaryTotalSolved = {}\n\ndpcTotal = {}\ndpcSolved = {}\n\nwith open('crc32s.txt') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in spamreader:\n\t\tif row[2] not in dictionaryTotal:\n\t\t\tdictionaryTotal[row[2]] = 0\n\t\t\tdictionaryTotalSolved[row[2]] = 0\n\t\tfor dpc in row[3].split(' '):\n\t\t\tif dpc not in dpcTotal:\n\t\t\t\tdpcTotal[dpc] = 0\n\t\t\t\tdpcSolved[dpc] = 0\n\t\t\tdpcTotal[dpc] += 1\n\t\tdictionaryTotal[row[2]] += 1\n\t\ttotal += 1\n\t\tif len(row[4]):\n\t\t\tfor dpc in row[3].split(' '):\n\t\t\t\tdpcSolved[dpc] += 1\n\t\t\tdictionaryTotalSolved[row[2]] += 1\n\t\t\ttotalSolved += 1\n\nfor key in dictionaryTotal:\n\tprint('{}: {}/{} {:.2%}'.format(key, dictionaryTotalSolved[key], dictionaryTotal[key], float(dictionaryTotalSolved[key]) / dictionaryTotal[key]))\n\nprint('\\n')\n\nfor key in dpcTotal:\n\tprint('{}: {}/{} {:.2%}'.format(key, dpcSolved[key], dpcTotal[key], float(dpcSolved[key]) / dpcTotal[key]))\n\nprint('\\nTotal: {}/{} {:.2%}'.format(totalSolved, total, float(totalSolved) / total))\n", "id": "1398350", "language": "Python", "matching_score": 0.015401187352836132, "max_stars_count": 1, "path": "data/progress.py" } ]
0.879796
immohsin
[ { "content": "from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Product(models.Model):\n title = models.CharField(max_length=120)\n description = models.TextField()\n price = models.DecimalField(decimal_places=2, max_digits=7, default=39.99)\n image = models.FileField(upload_to='products/', null=True)\n owner = models.ForeignKey('Student', on_delete=models.SET_NULL, null=True)\n Availability = (\n ('a', 'Available'),\n ('s', 'Sold'),\n )\n status = models.CharField(max_length=1, choices=Availability, blank=True, default='a', help_text='Item availability')\n flag_count = models.IntegerField(default=0)\n\n def __str__(self):\n return self.title\n\nclass Student(models.Model):\n django_user = models.OneToOneField(User, unique=True, null=True, on_delete=models.CASCADE)\n first_name = models.CharField(max_length=100)\n last_name = models.CharField(max_length=100)\n bio = models.TextField()\n password = models.CharField(max_length=120, blank=True, help_text=\"Enter a password\")\n College = (\n ('u', '<NAME>'),\n ('a', 'Amherst College'),\n ('s', 'Smith College'),\n ('m', 'Mount Holyoke College'),\n ('h', 'Hampshire College'),\n )\n\n college = models.CharField(max_length=1, choices=College, blank=True, default='u', help_text='college choice')\n email = models.EmailField(max_length=100)\n star_count = models.DecimalField(decimal_places=0, max_digits=1)\n image = models.FileField(upload_to='user_profile_picture/', null=True, blank=True)\n\n def __str__(self):\n return '{0}, {1}'.format(self.first_name, self.last_name)\n\nclass Flag(models.Model):\n user = models.ForeignKey('Student', on_delete=models.SET_NULL, null=True, blank=True)\n products = models.ManyToManyField('Product', blank=True)\n\n def __str__(self):\n return str(self.id)\n", "id": "4808949", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Django/ffs/models.py" } ]
0
analyticdan
[ { "content": "import numpy as np\nimport cv2\nimport sys\n\ndef decay(accumulator):\n return accumulator / decay_const\n\ndef rejuvenate(fgmask, accumulator):\n fgmask = np.where(fgmask > 0, 1, 0)\n fgmask = np.stack((fgmask,) * 3, axis=-1)\n return np.maximum(accumulator, fgmask * rejuvenate_const)\n\ndecay_rate = 1.01 # Generally values between 1.01 and 2.0 seem reasonable.\ndisplay = False\noutput = False\n\ninput_file = 0 # Default to zero, which asks the OS to use the webcam.\noutput_file = 'out.mp4'\n\ni = 1\nwhile i < len(sys.argv):\n if sys.argv[i] == '-d':\n display = True\n elif sys.argv[i] == '-o':\n output = True\n elif sys.argv[i] == '-f':\n i += 1\n if i < len(sys.argv):\n output_file = sys.argv[i]\n else:\n print('Error: To use the \"-f\" flag, the program must be provided '\n 'the name of an output file as the next argument.')\n exit(-1)\n elif sys.argv[i] == '-of':\n output = True\n i += 1\n if i < len(sys.argv):\n output_file = sys.argv[i]\n else:\n print('Error: To use the \"-of\" flag, the program must be provided '\n 'the name of an output file as the next argument.')\n exit(-1)\n elif sys.argv[i] == '-i':\n i += 1\n if i < len(sys.argv):\n input_file = sys.argv[i]\n else:\n print('Error: To use the \"-i\" flag, the program must be provided '\n 'the name of an input file as the next argument.')\n exit(-1)\n elif sys.argv[i] == '-r':\n i += 1\n if i < len(sys.argv):\n try:\n decay_rate = float(sys.argv[i])\n except ValueError:\n print('Error: To use the \"-r\" flag, the program must be provided '\n 'a number as a decay rate.')\n exit(-1)\n if decay_rate <= 0:\n print('Error: To use the \"-r\" flag, the program must be provided '\n 'a positive number as a decay rate.')\n exit(-1)\n else:\n print('Error: To use the \"-r\" flag, the program must be provided '\n 'the decay rate as the next argument.')\n exit(-1)\n else:\n print('Unrecognized program parameter: {0}'.format(sys.argv[i]))\n exit(-1)\n i += 1\n\nvideo = cv2.VideoCapture(input_file)\nframe_shape = (int(video.get(4)), int(video.get(3)), 3)\ndecay_const = np.full(frame_shape, [decay_rate, 1, 1])\nrejuvenate_const = np.full(frame_shape, [179, 225, 225])\naccumulator = np.full(frame_shape, [0, 225, 225])\n\nbg_subtractor = cv2.bgsegm.createBackgroundSubtractorMOG()\n\nframes = []\n\nwhile video.isOpened():\n try:\n ok, frame = video.read()\n if not ok:\n break\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n fgmask = bg_subtractor.apply(gray)\n accumulator = decay(accumulator)\n accumulator = rejuvenate(fgmask, accumulator)\n frame = cv2.cvtColor(accumulator.astype(np.uint8), cv2.COLOR_HSV2BGR)\n if display:\n cv2.imshow('Video', frame)\n cv2.waitKey(1)\n frames.append(frame)\n except KeyboardInterrupt:\n break\n\nif output:\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n fps = int(video.get(5))\n out_shape = (int(video.get(3)), int(video.get(4)))\n out = cv2.VideoWriter(output_file, fourcc, fps, out_shape)\n for frame in frames:\n out.write(frame)\n out.release()\n\nvideo.release()\ncv2.destroyAllWindows()\n \n", "id": "7225341", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "motion-heatmap.py" } ]
0
olivierkes
[ { "content": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nimport csv\nimport argparse\n\ndef parseText(t):\n\n t = t.replace(\"\\\\n\\\\q\", \" \\n\\n\\t\")\n t = t.replace(\"\\\\n\", \" \\n\")\n t = t.replace(\"\\\\q\", \"\\t\")\n t = t.replace(\"\\\\p\", \" \\n\\n\")\n\n return t\n\ndef addMark(t):\n i = -1\n while t[i] in [\"\\n\", \"\\t\"]:\n i -= 1\n return t[:i] + \" °\" + t[i:]\n\ndef getText(book, startChapter, startVerse, endChapter, endVerse, title, showVerses, showMarks):\n r = \"\"\n with open('textes/' + book + \".csv\", 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n within = False\n for row in reader:\n if row[0] == startChapter and row[1] == startVerse:\n within = True\n if within:\n rr = parseText(row[2])\n if rr[-1] != \"\\n\": rr += \" \"\n if showVerses: rr = \":sup:`{}:{}`\\xc2\\xa0\".format(row[0], row[1]) + rr\n if showMarks and not showVerses: rr = addMark(rr)\n r += rr\n #r += \"({}:{}) \".format(row[0], row[1])\n if row[0] == endChapter and row[1] == endVerse:\n break\n return r\n\ndef addTitle(row):\n charSet = \"#=-~_\"\n return \"\\n\\n\" + row[6] + \"\\n\" + charSet[int(row[1])] * len(row[6]) + \"\\n\"\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='This generates a t2t bible.')\n parser.add_argument('-p', '--plan', help='plan to be used',\n default=\"nouveau-testament-commente\")\n #parser.add_argument('-s', '--style', help='style used',\n #default=\"default\")\n parser.add_argument('-v', help='show verses references', action='store_const', const=True, default=False)\n parser.add_argument('-m', help='show marks only', action='store_const', const=True, default=False)\n\n #parser.add_argument('output', help='output file')\n args = parser.parse_args()\n\n plan = args.plan\n #style = args.style\n #output = args.output\n showVerse = args.v\n showMarks = args.m\n\n text = \"\"\n\n with open('plans/' + plan + \".csv\", 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\n r = 0\n struct = []\n for row in reader:\n if r != 0:\n struct.append(row)\n r += 1\n for i in range(len(struct)):\n row = struct[i]\n nextRow = -1\n text += addTitle(row)\n if i != len(struct) - 1: nextRow = struct[i + 1]\n if nextRow != -1 and nextRow[2] == row[2] and nextRow[3] == row[3]:\n pass\n else:\n text += getText(row[0], row[2], row[3], row[4], row[5], row[6], showVerse, showMarks)\n\n print text\n", "id": "6459715", "language": "Python", "matching_score": 5.369047164916992, "max_stars_count": 1, "path": "biblification.py" }, { "content": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nimport csv\nimport argparse\nimport re\n\n\nif __name__ == \"__main__\":\n # Parsing arguments\n parser = argparse.ArgumentParser(description='This generates a t2t bible.')\n parser.add_argument('-p', '--plan', help='plan to be used',\n default=\"nouveau-testament-commente\")\n parser.add_argument('-v', help='show verses references',\n action='store_const', const=True, default=False)\n parser.add_argument('-m', help='show marks only',\n action='store_const', const=True, default=False)\n parser.add_argument('-t', help='show references in titles',\n action='store_const', const=True, default=False)\n args = parser.parse_args()\n\n plan = args.plan\n showVerse = args.v\n showMarks = args.m\n showInTitles = args.t\n\n text = \"\"\n\n def parseText(t):\n \"Format verse references in the chosen way.\"\n a = re.compile('\\[(\\d*)\\:(\\d*)\\]')\n if showVerse:\n s = r':sup:`\\1:\\2` '\n elif showMarks:\n s = r\"° \"\n else:\n s = r\"\"\n t = a.sub(s, t)\n return t\n\n def getText(book, startChapter, startVerse, endChapter, endVerse):\n \"Renvoie le texte demandé.\"\n r = \"\"\n f = open('textes/' + book + \".txt\", 'r')\n text = f.read()\n f.close()\n start = text.find(\"[{}:{}]\".format(startChapter, startVerse))\n end = text.find(\"[{}:{}]\".format(endChapter, str(int(endVerse) + 1)))\n if end < 0: # Chapitre suivant\n end = text.find(\"[{}:{}]\".format(str(int(endChapter) + 1), 1))\n if end < 0: # Fin du livre\n end = len(text)\n return parseText(text[start:end])\n\n def makeTitle(row):\n \"Renvoie un titre formatté comme il faut.\"\n charSet = \"#=-~_\"\n titre = row[6]\n if showInTitles: # ajoute la référence si demandée\n if row[2] == row[4]:\n if row[3] == row[5]:\n tt = \"{},{}\".format(row[2], row[3])\n else:\n tt = \"{},{}-{}\".format(row[2], row[3], row[5])\n else:\n tt = \"{},{} – {},{}\".format(row[2], row[3], row[4], row[5])\n\n # Ajoute la référence au titre, différement suivant le niveau\n if row[1] == \"1\":\n titre = \"{}: {}\".format(tt, titre)\n else:\n titre = \"{} ({})\".format(titre, tt)\n\n t = \"\\n\\n\" + titre + \"\\n\" + charSet[int(row[1])] * len(titre) + \"\\n\"\n return t\n\n with open('plans/' + plan + \".csv\", 'rb') as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n r = 0\n struct = []\n for row in reader:\n if r != 0:\n struct.append(row)\n r += 1\n for i in range(len(struct)):\n # Row: 0 = Livre 1 = Niveau 2 = chapitre début\n # 3 = verset debut 4 = chapitre fin 5 = verset fin\n # 6 = Titre\n row = struct[i]\n nextRow = -1\n text += makeTitle(row)\n if i != len(struct) - 1:\n nextRow = struct[i + 1]\n if nextRow != -1 and nextRow[2] == row[2] and nextRow[3] == row[3]:\n pass\n else:\n text += getText(row[0], row[2], row[3], row[4], row[5])\n\n print text\n", "id": "12043035", "language": "Python", "matching_score": 1.8282456398010254, "max_stars_count": 1, "path": "biblification_2.py" }, { "content": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n\nimport re\n\n\ndef livreSuivant(text):\n titre = \"\"\n bookNumber = 0\n bible = []\n book = []\n chap = []\n for i in range(len(text)):\n if i == 0:\n bookTitle = text[0]\n bookNumber = bookNumber + 1\n # Si c'est la dernière ligne, on ajoute le dernier livre\n elif i == len(text) - 1:\n bible.append(book)\n else:\n # Titre de chapitre\n if re.compile('(.+)\\s+(\\d)+').match(text[i]):\n if chap:\n book.append(chap)\n chap = []\n\n # Nouveau livre\n elif re.compile('\\d?\\s?[A-ZÉÈ]{3}.*').match(text[i]):\n book.append(chap)\n chap = []\n bible.append(book)\n book = []\n bookTitle = text[i]\n bookNumber = bookNumber + 1\n chap = []\n else:\n chap.append(text[i])\n return bible\n\nif __name__ == \"__main__\":\n f = open('1-66.txt', 'r')\n fichier = f.read()\n f.close()\n\n fichier = fichier.split(\"\\n\")\n\n # Nettoyage\n text = []\n for l in fichier:\n l = l.strip()\n if l != \"\":\n text.append(l)\n\n # Séparation en livres\n livres = []\n bible = livreSuivant(text)\n print(len(bible), len(bible[0])) # pour vérification\n\n # Génère les CSV\n for b in bible:\n bookNumber = bible.index(b) + 1\n f = open('{}.csv'.format(bookNumber), 'w')\n for c in b:\n for v in c:\n chapNumber = b.index(c) + 1\n verseNumber = c.index(v) + 1\n m = re.compile(r\"\\d+.?\\s(.*)\").match(v)\n if m:\n verse = m.group(1)\n else:\n print(v)\n f.write('{},{},\"{}\"\\n'.format(chapNumber, verseNumber, verse))\n f.close()\n\n # Génère les TXT\n for b in bible:\n bookNumber = bible.index(b) + 1\n f = open('{}.txt'.format(bookNumber), 'w')\n for c in b:\n for v in c:\n chapNumber = b.index(c) + 1\n verseNumber = c.index(v) + 1\n m = re.compile(r\"\\d+.?\\s(.*)\").match(v)\n if m:\n verse = m.group(1)\n else:\n print(v)\n f.write('[{}:{}]{}\\n'.format(chapNumber, verseNumber, verse))\n f.close()\n\n", "id": "6154209", "language": "Python", "matching_score": 0.415808767080307, "max_stars_count": 1, "path": "textes/generateur/split.py" } ]
1.828246
apf99
[ { "content": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageColor\nimport time\nfrom scipy.stats import norm\n# Colors (one for each class)\ncmap = ImageColor.colormap\nprint(\"Number of colors =\", len(cmap))\nCOLOR_LIST = sorted([c for c in cmap.keys()])\ndef to_image_coords(boxes, height, width):\n \"\"\"\n The original box coordinate output is normalized, i.e [0, 1].\n \n This converts it back to the original coordinate based on the image\n size.\n \"\"\"\n box_coords = np.zeros_like(boxes)\n box_coords[:, 0] = boxes[:, 0] * height\n box_coords[:, 1] = boxes[:, 1] * width\n box_coords[:, 2] = boxes[:, 2] * height\n box_coords[:, 3] = boxes[:, 3] * width\n \n return box_coords\ndef filter_boxes(min_score, boxes, scores, classes):\n \"\"\"Return boxes with a confidence >= `min_score`\"\"\"\n n = len(classes)\n idxs = []\n for i in range(n):\n if scores[i] >= min_score:\n idxs.append(i)\n \n filtered_boxes = boxes[idxs, ...]\n filtered_scores = scores[idxs, ...]\n filtered_classes = classes[idxs, ...]\n return filtered_boxes, filtered_scores, filtered_classes\ndef draw_boxes(image, boxes, classes, thickness=4):\n \"\"\"Draw bounding boxes on the image\"\"\"\n draw = ImageDraw.Draw(image)\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i])\n color = COLOR_LIST[class_id]\n draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness, fill=color)\n \ndef load_graph(graph_file):\n \"\"\"Loads a frozen inference graph\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(graph_file, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph\ngraph_file = 'saved_models\\\\exported\\\\ssd_mobilenet_v2\\\\frozen_inference_graph.pb'\ndetection_graph = load_graph(graph_file)\n# The input placeholder for the image.\n# `get_tensor_by_name` returns the Tensor with the associated name in the Graph.\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n# Each box represents a part of the image where a particular object was detected.\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n# Each score represent how level of confidence for each of the objects.\n# Score is shown on the result image, together with the class label.\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n# The classification of the object (integer id).\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n# Load a sample image.\nimage = Image.open('images/sim_y.jpg')\nimage_np = np.expand_dims(np.asarray(image, dtype=np.uint8), 0)\nwith tf.Session(graph=detection_graph) as sess: \n # Actual detection.\n (boxes, scores, classes) = sess.run([detection_boxes, detection_scores, detection_classes], \n feed_dict={image_tensor: image_np})\n # Remove unnecessary dimensions\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes)\n confidence_cutoff = 0.8\n # Filter boxes with a confidence score less than `confidence_cutoff`\n boxes, scores, classes = filter_boxes(confidence_cutoff, boxes, scores, classes)\n # The current box coordinates are normalized to a range between 0 and 1.\n # This converts the coordinates actual location on the image.\n width, height = image.size\n box_coords = to_image_coords(boxes, height, width)\n # Each class with be represented by a differently colored box\n draw_boxes(image, box_coords, classes)\n plt.figure(figsize=(12, 8))\n plt.imshow(image)", "id": "5854564", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "ObjectDetectionLab/test.py" } ]
0
davidlenz
[ { "content": "from pvtm.pvtm import PVTM\nfrom pvtm.pvtm import clean\nfrom sklearn.datasets import fetch_20newsgroups\nimport nltk\nimport argparse\nimport numpy as np\n\nnewsgroups_train = fetch_20newsgroups(subset='train')\ninput_texts = newsgroups_train.data\nprint('There are', len(newsgroups_train.data), 'documents.')\n\nnltk.download(\"stopwords\")\nfrom nltk.corpus import stopwords\nstop_words = list(set(stopwords.words('english')))\n\n# Create PVTM class object\npvtm = PVTM(input_texts)\npvtm.preprocess(lemmatize = False, lang = 'en', min_df = 0.005)\npvtm.fit(vector_size= 50,\n hs=0,\n dbow_words=1, # train word vectors!\n dm=0, # Distributed bag of words (=word2vec-Skip-Gram) (dm=0) OR distributed memory (=word2vec-cbow) (dm=1)\n epochs=10, # doc2vec training epochs\n window=1, # doc2vec window size\n seed=123, # doc3vec random seed\n random_state = 123, # gmm random seed\n min_count=5, # minimal number of appearences for a word to be considered\n workers=1, # doc2vec num workers\n alpha=0.025, # doc2vec initial learning rate\n min_alpha=0.025, # doc2vec final learning rate. Learning rate will linearly drop to min_alpha as training progresses.\n n_components=15, # number of Gaussian mixture components, i.e. Topics\n covariance_type='diag', # gmm covariance type\n verbose=1, # verbosity\n n_init=1, # gmm number of initializations\n\n )\nprint(\"Finished fitting\")\n# You can get document vectors from the Doc2Vec model by calling the command\ndocument_vectors = np.array(pvtm.model.docvecs.vectors_docs)\n\n# You can get distribution of the document over the defined topics by calling the command\n# Thereby, each row is a single document, each column is one topic. The entries within the matrix are probabilities.\ndocument_topics = np.array(pvtm.gmm.predict_proba(np.array(pvtm.model.docvecs.vectors_docs)))\n\n# Inference\nnewsgroups_test = fetch_20newsgroups(subset='test')\nnew_text = newsgroups_test.data[0]\npvtm.infer_topics(new_text)\n\npvtm.start_webapp()\n\n", "id": "8514927", "language": "Python", "matching_score": 7.363551616668701, "max_stars_count": 5, "path": "example/20_newsgroups.py" }, { "content": "from pvtm.pvtm import PVTM\nfrom pvtm.pvtm import clean\nfrom keras.datasets import reuters\nimport nltk\nimport argparse\nimport numpy as np\n\n# load reuters text data\ndef reuters_news_wire_texts():\n (x_train, y_train), (x_test, y_test) = reuters.load_data()\n wordDict = {y:x for x,y in reuters.get_word_index().items()}\n texts = []\n for x in x_train:\n texts.append(\" \".join([wordDict.get(index-3) for index in x if wordDict.get(index-3) is not None]))\n return texts, y_train\n\ninput_texts, y = reuters_news_wire_texts()\nlen_docs = 5000\n# load stop words\nnltk.download(\"stopwords\")\nfrom nltk.corpus import stopwords\nstop_words = list(set(stopwords.words('english')))\n\n# Create PVTM class object\npvtm = PVTM(input_texts[:len_docs])\npvtm.preprocess(lemmatize = False, lang = 'en', min_df = 0.005)\npvtm.fit(vector_size = 50,\n hs=0,\n dbow_words=1, # train word vectors!\n dm=0, # Distributed bag of words (=word2vec-Skip-Gram) (dm=0) OR distributed memory (=word2vec-cbow) (dm=1)\n epochs= 20, # doc2vec training epochs\n window=1, # doc2vec window size\n seed=123, # doc3vec random seed\n random_state = 123, # gmm random seed\n min_count=5, # minimal number of appearences for a word to be considered\n workers=1, # doc2vec num workers\n alpha=0.025, # doc2vec initial learning rate\n min_alpha=0.025, # doc2vec final learning rate. Learning rate will linearly drop to min_alpha as training progresses.\n n_components=15, # number of Gaussian mixture components, i.e. Topics\n covariance_type='diag', # gmm covariance type\n verbose=1, # verbosity\n n_init=1, # gmm number of initializations\n )\n\n# You can get document vectors from the Doc2Vec model by calling the command\ndocument_vectors = np.array(pvtm.model.docvecs.vectors_docs)\n\n# You can get distribution of the document over the defined topics by calling the command\n# Thereby, each row is a single document, each column is one topic. The entries within the matrix are probabilities.\ndocument_topics = np.array(pvtm.gmm.predict_proba(np.array(pvtm.model.docvecs.vectors_docs)))\n\n# Inference\nnew_text = input_texts[len_docs+1]\npvtm.infer_topics(new_text)\n\npvtm.start_webapp()\n", "id": "5959805", "language": "Python", "matching_score": 3.0939877033233643, "max_stars_count": 5, "path": "example/example_reuters_dataset.py" }, { "content": "import numpy as np\n\nnp.random.seed(23)\nimport random\n\nrandom.seed(1)\n\nimport pandas as pd\nimport time\nfrom sklearn import mixture\nimport gensim\nimport re\nimport joblib\nimport os\nimport matplotlib.pyplot as plt\nimport requests\nfrom wordcloud import WordCloud\nfrom PIL import Image\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport string\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport inspect\n\nimport spacy\n\nmapping = {ord(u\"ü\"): u\"ue\", ord(u\"ß\"): u\"ss\", ord(u\"ä\"): u\"ae\", ord(u\"ö\"): u\"oe\"}\nclean = lambda x: re.sub('\\W+', ' ', re.sub(\" \\d+\", '', str(x).lower()).strip()).translate(mapping)\n\n\ndef load_example_data():\n '''\n Loads example text data.\n :return: an array with texts.\n '''\n df = pd.read_csv(\"data/sample_5000.csv\")\n texts = df.text.values\n return texts\n\n\nclass Documents(object):\n '''\n :return: tagged documents.\n '''\n\n def __init__(self, documents):\n self.documents = documents\n self.len = len(documents)\n\n def __iter__(self):\n for i, doc in enumerate(self.documents):\n yield gensim.models.doc2vec.TaggedDocument(words=doc, tags=[i])\n\n\nclass PVTM(Documents):\n\n def __init__(self, texts):\n x = [kk.split() for kk in texts]\n self.documents = texts\n self.x_docs = Documents(x)\n\n def preprocess(self, lemmatize=False, lang='en', **kwargs):\n '''\n The function takes a list of texts and removes stopwords, special characters, punctuation as well\n as very frequent and very unfrequent words.\n :param texts: original documents.\n :param lemmatize: if lemmatize = True, lemmatization of the texts will be done.\n :param lang: language of the text documents.( this parameter is needed if lemmatize=True).\n :param kwargs: additional key word arguments passed to popularity_based_prefiltering() function.\n :return: cleaned texts.\n '''\n texts = self.documents\n\n texts = [clean(x) for x in texts]\n\n if lemmatize:\n texts = self.lemmatize(texts, lang=lang)\n\n cleaned_text, self.vocab = self.popularity_based_prefiltering(texts,\n **{key: value for key, value in kwargs.items()\n if key in inspect.getfullargspec(\n self.popularity_based_prefiltering).args})\n self.documents = cleaned_text\n x = [kk.split() for kk in cleaned_text]\n self.x_docs = Documents(x)\n return cleaned_text\n\n def get_allowed_vocab(self, data, min_df=0.05, max_df=0.95):\n '''\n Vocabulary building using sklearn's tfidfVectorizer.\n :param data: a list of strings(documents)\n :param min_df: words are ignored if the frequency is lower than min_df.\n :param max_df: words are ignored if the frequency is higher than man_df.\n :return: corpus specific vocabulary\n '''\n\n print(min_df, max_df)\n self.tfidf = TfidfVectorizer(min_df=min_df, max_df=max_df)\n\n # fit on dataset\n self.tfidf.fit(data)\n # get vocabulary\n vocabulary = set(self.tfidf.vocabulary_.keys())\n print(len(vocabulary), 'words in the vocabulary')\n return vocabulary\n\n def popularity_based_prefiltering(self, data, min_df=0.05, max_df=0.95, stopwords=None):\n '''\n Prefiltering function which removes very rare/common words.\n :param data: a list of strings(documents)\n :param min_df: words are ignored if the frequency is lower than min_df.\n :param max_df: words are ignored if the frequency is higher than max_df.\n :param stopwords: a list of stopwords.\n :return: filtered documents' texts and corpus specific vocabulary.\n '''\n vocabulary = self.get_allowed_vocab(data, min_df=min_df, max_df=max_df)\n vocabulary = frozenset(vocabulary)\n if stopwords:\n stopwords = frozenset(stopwords)\n pp = []\n for i, line in enumerate(data):\n rare_removed = list(filter(lambda word: word.lower() in vocabulary, line.split()))\n if stopwords:\n stops_removed = list(filter(lambda word: word.lower() not in stopwords, rare_removed))\n pp.append(\" \".join(stops_removed))\n else:\n pp.append(\" \".join(rare_removed))\n return pp, vocabulary\n\n def lemmatize(self, texts, lang='en'):\n '''\n Lemmatization of input texts.\n :param texts: original documents.\n :param lang: language of the text documents.\n :return: lemmmatized texts.\n '''\n print('Start lemmatization...')\n t0 = time.time()\n if lang==\"en\":\n nlp = spacy.load(\"en_core_web_sm\")\n if lang==\"de\":\n nlp = spacy.load(\"de_core_news_sm\")\n nlp.disable_pipes('tagger', 'ner')\n doclist = list(nlp.pipe(texts, n_threads=6, batch_size=500))\n texts = [' '.join([listitem.lemma_ for listitem in doc]) for i, doc in enumerate(doclist)]\n print(\"Save lemmatized texts to lemmatized.txt\")\n with open(\"lemmatized.txt\", \"w\", encoding=\"utf-8\") as file:\n for line in texts:\n file.write(line)\n file.write(\"\\n\")\n\n t1 = time.time()\n print('Finished lemmatization. Process took', t1 - t0, 'seconds')\n print('len(texts)', len(texts))\n return texts\n\n def fit(self, **kwargs):\n '''\n First, a Doc2Vec model is trained and clustering of the documents is done by means of GMM.\n :param kwargs: additional arguments which should be passed to Doc2Vec and GMM.\n :param save: if you want to save the trained model set save=True.\n :param filename: name of the saved model.\n :return: Doc2Vec model and GMM clusters.\n '''\n # generate doc2vec model\n self.model = gensim.models.Doc2Vec(self.x_docs, **{key: value for key, value in kwargs.items()\n if\n key in inspect.getfullargspec(gensim.models.Doc2Vec).args or\n key in inspect.getfullargspec(\n gensim.models.base_any2vec.BaseAny2VecModel).args or\n key in inspect.getfullargspec(\n gensim.models.base_any2vec.BaseWordEmbeddingsModel).args}\n )\n\n print('Start clustering..')\n self.gmm = mixture.GaussianMixture(**{key: value for key, value in kwargs.items()\n if key in inspect.getfullargspec(mixture.GaussianMixture).args})\n \n\n self.doc_vectors = np.array(self.model.docvecs.vectors_docs)\n self.cluster_memberships = self.gmm.fit_predict(self.doc_vectors)\n print('Finished clustering.')\n self.BIC = self.gmm.bic(self.doc_vectors)\n self.cluster_center = self.gmm.means_\n print('BIC: {}'.format(self.BIC))\n\n self.get_document_topics()\n self.top_topic_center_words = pd.DataFrame(\n [self.most_similar_words_per_topic(topic, 200) for topic in range(self.gmm.n_components)])\n self.topic_similarities = pd.DataFrame(np.argsort(cosine_similarity(self.cluster_center))[:, ::-1]).T\n\n def SelBest(self, arr: list, X: int) -> list:\n '''\n returns the set of X configurations with shorter distance\n '''\n dx = np.argsort(arr)[:X]\n return arr[dx]\n def select_topic_number(self, metric = ['BIC','Silohuette'], iterations = 10):\n from matplotlib.patches import Ellipse\n from sklearn.mixture import GaussianMixture as GMM\n from sklearn import metrics\n from matplotlib import rcParams\n rcParams['figure.figsize'] = 16, 8\n n_clusters = np.arange(2, self.gmm.n_components)\n if metric == 'Silohuette':\n sils = []\n sils_err = []\n iterations = interations\n for n in n_clusters:\n tmp_sil = []\n for _ in range(iterations):\n gmm = GMM(n, n_init=2).fit(self.doc_vectors)\n labels = gmm.predict(self.doc_vectors)\n sil = metrics.silhouette_score(self.doc_vectors, labels, metric='euclidean')\n tmp_sil.append(sil)\n val = np.mean(self.SelBest(np.array(tmp_sil), int(iterations / 5)))\n err = np.std(tmp_sil)\n sils.append(val)\n sils_err.append(err)\n plt.errorbar(n_clusters, sils, yerr=sils_err)\n plt.title(\"Silhouette Scores\", fontsize=20)\n plt.xticks(n_clusters)\n plt.xlabel(\"N. of clusters\")\n plt.ylabel(\"Score\")\n elif metric == 'BIC':\n bics = []\n bics_err = []\n iterations = iterations\n for n in n_clusters:\n tmp_bic = []\n for _ in range(iterations):\n gmm = GMM(n, n_init=2).fit(self.doc_vectors)\n\n tmp_bic.append(gmm.bic(self.doc_vectors))\n val = np.mean(self.SelBest(np.array(tmp_bic), int(iterations / 5)))\n err = np.std(tmp_bic)\n bics.append(val)\n bics_err.append(err)\n print('The minimal BIC score is reached for ', n_clusters[bics.index(min(bics))])\n print(\"Check where the BIC curve's change in slope is big. For this reason, take a look at the gradient values.\")\n plt.errorbar(n_clusters, np.gradient(bics), yerr=bics_err, label='BIC')\n plt.title(\"Gradient of BIC Scores\", fontsize=20)\n plt.xticks(n_clusters)\n plt.xlabel(\"N. of clusters\")\n plt.ylabel(\"grad(BIC)\")\n plt.legend()\n\n\n def get_string_vector(self, string, steps=100):\n '''\n The function takes a string (document) and\n transforms it to vector using a trained model.\n :param string: new document string.\n :param steps: number of times to train the new document.\n :return: document vector.\n '''\n assert isinstance(string, str), \"string parameter should be a string with the original text\"\n string = clean(string)\n return self.model.infer_vector(string.split(), steps=steps).reshape(1, -1)\n\n def get_topic_weights(self, vector, probabilities=True):\n '''\n The function takes a document vector\n and returns a distribution of a given document over all topics.\n :param vector: document vector.\n :param probabilities: if True, probability distribution over all topics is returned. If False,\n number of topic with the highest probability is returned.\n :return: probability distribution of the vector over all topics.\n '''\n if probabilities:\n return self.gmm.predict_proba(vector)\n else:\n return self.gmm.predict(vector)\n\n def wordcloud_by_topic(self, topic, variant='sim', stop_words=None, n_words=100, savepath=None, display=False):\n '''\n Create a wordcloud to the defined topic.\n :param topic: number of a topic.\n :param n_words: number of words to be shown in a wordcloud.\n :return: a wordcloud with most common words.\n '''\n x, y = np.ogrid[:300, :300]\n shape = (x - 150) ** 2 + (y - 150) ** 2 > 130 ** 2\n shape = 255 * shape.astype(int)\n if variant == 'sim':\n text = self.top_topic_center_words.iloc[topic, :n_words]\n text = \" \".join(text)\n wordcloud = WordCloud(max_font_size=50, max_words=n_words, stopwords=stop_words,\n background_color=\"white\", mask=shape).generate(text)\n if variant == 'count':\n text = self.topic_words.iloc[topic, :n_words]\n text = \" \".join(text)\n wordcloud = WordCloud(max_font_size=50, max_words=n_words, stopwords=stop_words,\n background_color=\"white\", mask=shape).generate(text)\n if display:\n fig, ax = plt.subplots(figsize=(10, 8))\n ax.imshow(wordcloud, interpolation=\"bilinear\", )\n ax.axis(\"off\")\n if savepath:\n plt.savefig(savepath)\n return wordcloud\n\n def get_document_topics(self):\n '''\n Create data frame with the most frequent words per topic as well as the nearest words to the single topic centers.\n :return: self.topic_words data frame with 100 frequent words per topic and self.wordcloud_df containing all the\n document texts assigned to the resulted topics.\n '''\n document_topics = np.array(self.gmm.predict_proba(np.array(self.model.docvecs.vectors_docs)))\n most_relevant_topic_per_document = pd.DataFrame(document_topics).idxmax(1)\n kk = pd.DataFrame(self.documents).join(pd.DataFrame(most_relevant_topic_per_document, columns=['top_topic']))\n self.wordcloud_df = kk.groupby('top_topic')[0].apply(list).apply(\" \".join).str.lower()\n self.topic_words = pd.DataFrame(\n [pd.Series(self.wordcloud_df.loc[topic].split()).value_counts().iloc[:100].index.values for topic in\n range(self.gmm.n_components)])\n\n def most_similar_words_per_topic(self, topic, n_words):\n '''\n The function returns most similar words to the selected topic.\n :param topic: topic number.\n :param n_words: number of words to be shown.\n :return: returns words which are most similar to the topic center as measured by cosine similarity.\n '''\n sims = cosine_similarity([self.cluster_center[topic]], self.model.wv.vectors)\n sims = np.argsort(sims)[0][::-1]\n text = [self.model.wv.index2word[k] for k in sims[:n_words]]\n return text\n\n def search_topic_by_term(self, term, variant='sim', method='vec_sim', n_words=100):\n '''\n This function returns topic number and a wordcloud which represent defined search terms.\n :param term: a list with search terms, e.g. ['market','money']\n :param variant: if there is only one search term, there two variants to find a representative topic/wordcloud.\n if 'sim' is chosen the term is searched for among all the words that are nearest to the single topic centers.\n If 'count' is chosen only words from the texts which were assigned to the single topics are considered.\n :param method: if there are multiple search terms, three different methods can be used to find a representative\n wordcloud. Method 'combine' considers all the search terms as a single text and the process is simple to assigning\n new documents with .get_string_vector and .get_topic_weights. Method 'sim_docs' also considers all the search terms as\n a single text and searches for the most similar documents and topics these similar documents were assigned to.\n Third method (vec_sim) transforms single search terms into word vectors and considers cosine similarity between each word\n vector and topic centers.\n :param n_words: number of words to be shown in a wordcloud.\n :return: best matching topic and a wordcloud.\n '''\n assert isinstance(term, (list, tuple)), 'term parameter should be a list or a tuple'\n if len(term) == 1:\n assert variant == 'sim' or variant == 'count', \"choose one of the available variants: sim or count\"\n if variant == \"sim\":\n matches = self.top_topic_center_words[self.top_topic_center_words == term[0]]\n best_matching_topic = pd.DataFrame(list(matches.stack().index)).sort_values(1).iloc[0][0]\n text = self.top_topic_center_words.loc[best_matching_topic].values\n text = \" \".join(text)\n elif variant == \"count\":\n matches = self.topic_words[self.topic_words == term[0]]\n best_matching_topic = pd.DataFrame(list(matches.stack().index)).sort_values(1).iloc[0][0]\n text = self.wordcloud_df.loc[best_matching_topic]\n print(\"best_matching_topic\", best_matching_topic)\n self.wordcloud_by_topic(best_matching_topic)\n\n elif len(term) > 1:\n assert method == 'combine' or method == 'sim_docs' or method == 'vec_sim', \"choose one of the available methods: \" \\\n \"combine, sim_docs or vec_sim\"\n if method == 'combine':\n string = ' '.join(term)\n vector = np.array(self.get_string_vector(string))\n best_matching_topic = self.gmm.predict(vector)[0]\n print(\"best_matching_topic\", best_matching_topic)\n self.wordcloud_by_topic(best_matching_topic)\n\n elif method == 'sim_docs':\n string = ' '.join(term)\n vector = np.array(self.get_string_vector(string))\n docs_num = [self.model.docvecs.most_similar(positive=[np.array(vector).reshape(-1, )], topn=10)[i][0]\n for i in range(10)]\n document_topics = np.array(self.gmm.predict(np.array(self.model.docvecs.vectors_docs)))\n unique, counts = np.unique(document_topics[docs_num], return_counts=True)\n top_topics = np.asarray((unique, counts)).T\n df = pd.DataFrame(top_topics).sort_values(by=1, ascending=False)\n df.columns = ['topic', 'frequency']\n best_matching_topic = df.iloc[0, 0]\n print(\"best_matching_topic\", best_matching_topic)\n self.wordcloud_by_topic(best_matching_topic)\n\n elif method == 'vec_sim':\n vectors = [self.get_string_vector(t) for t in term]\n terms_df = pd.DataFrame({'topic': range(self.gmm.n_components)})\n for i in range(len(term)):\n sims = [cosine_similarity([self.cluster_center[j]], vectors[i]) for j in\n range(self.gmm.n_components)]\n sims = [j for i in sims for j in i]\n sims = [sims[i][0] for i in range(len(sims))]\n terms_df[term[i]] = sims\n topics = [np.argsort(terms_df[term[i]].values)[::-1][0] for i in range(len(term))]\n s = pd.Series(topics)\n best_matching_topic = s.value_counts().index[0]\n print(\"best_matching_topic\", best_matching_topic)\n self.wordcloud_by_topic(best_matching_topic)\n\n def infer_topics(self, text, probabilities=True):\n \"\"\"\n Infer topics from unseen documents.\n :param text: array or list of strings.\n :param probabilities: if True, probability distribution over all topics is returned. If False,\n number of topic with the highest probability is returned.\n :return: probability distribution of the text vector over all topics.\n \"\"\"\n vec = self.get_string_vector(text)\n return self.get_topic_weights(vec, probabilities=probabilities)\n\n def save(self, path=None):\n '''\n Save the trained model. Name it whatever you like.\n :param savepath: path the defined model should be stored in.\n '''\n\n if not path:\n path = 'pvtm_model_tmp'\n\n path = os.path.abspath(path)\n print(\"Save model to:\", path)\n os.makedirs(os.path.dirname(path), exist_ok=True)\n joblib.dump(self, path)\n", "id": "5420121", "language": "Python", "matching_score": 3.7514290809631348, "max_stars_count": 5, "path": "pvtm/pvtm.py" }, { "content": "import argparse\nimport joblib\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nimport pybase64\nimport os\nimport errno\nimport glob\nimport flask\nfrom dash.dependencies import Input, Output\nfrom pvtm.pvtm import PVTM, Documents\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\n\n# general\nap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to the trained PVTM model\")\n\nparsed_args = ap.parse_args()\nargs = vars(parsed_args)\n\ndata = joblib.load(args['model'])\n\nimage_directory = 'Output/'\nif not os.path.exists(os.path.dirname(image_directory)):\n try:\n os.makedirs(os.path.dirname(image_directory))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\nfor i in range(data.gmm.n_components):\n data.wordcloud_by_topic(i).to_file('Output/img_{}.png'.format(i))\n\ndef generate_table(dataframe, max_rows=20):\n return html.Table(\n # Header\n [html.Tr([html.Th(col) for col in dataframe.columns])] +\n\n # Body\n [html.Tr([\n html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n ]) for i in range(min(len(dataframe), max_rows))],\n style = {'overflow': 'scroll', \"background-color\": \"powderblue\",\"width\":\"100%\",\n 'border': '1px solid black','border-collapse': 'collapse',\n 'text-align': 'middle', 'border-spacing': '5px', 'font-size' : '20px'}\n )\n\n\napp = dash.Dash()\napp.scripts.config.serve_locally = True\n\napp.layout = html.Div(children=[\n html.H1('PVTM Results', style={'textAlign': 'center', 'background-color': '#7FDBFF'}),\n dcc.Slider(\n id='input-value',\n marks={i: '{}'.format(1 * i) for i in range(data.gmm.n_components)},\n step=1,\n value=0,\n min=0,\n max= data.gmm.n_components - 1\n ),\n html.Div(children=[\n html.Div([\n html.H3('Word Cloud', style={'textAlign': 'center', 'color': '#1C4E80'}),\n html.Img(id='wordcloud')\n ], style={'width': '49%', 'display': 'inline-block', 'vertical-align': 'top','textAlign': 'center'}, className=\"six columns\"),\n html.Div([\n html.H3('Important Words', style={'textAlign': 'center', 'color': '#1C4E80'}),\n html.Div(id='table')\n ], style={'width': '49%', 'display': 'inline-block', 'vertical-align': 'middle','textAlign': 'center'}, className='six columns')\n ], className=\"row\")\n])\n\[email protected](Output(component_id='wordcloud', component_property='src'),\n [Input(component_id='input-value', component_property='value')]\n )\ndef update_img(value):\n try:\n image_filename = image_directory + 'img_{}.png'.format(\n value)\n encoded_image = pybase64.b64encode(open(image_filename, 'rb').read())#.decode('ascii')\n return 'data:image/png;base64,{}'.format(encoded_image.decode('ascii'))\n except Exception as e:\n with open('errors.txt', 'a') as f:\n f.write(str(e))\n f.write('\\n')\n\[email protected](Output('table', 'children'), [Input('input-value', 'value')])\ndef display_table(value):\n #df = pd.DataFrame(data.top_topic_center_words.iloc[value,:])\n #df = df.rename(columns={value:'top words for topic {}'.format(value)})\n text = pd.DataFrame(data.model.wv.similar_by_vector(data.cluster_center[value],\n topn=100),\n columns=['word', \"similarity\"])\n return generate_table(text, max_rows=15)\n\nif __name__ == '__main__':\n app.run_server(debug=True, use_reloader=True)\n", "id": "9985772", "language": "Python", "matching_score": 2.4624669551849365, "max_stars_count": 5, "path": "webapp/webapp.py" }, { "content": "import argparse\nimport csv\nimport os\nimport sys\nimport time\n\nfrom scripts.pipeline import Pipeline\nfrom scripts.utils.argschecker import ArgsChecker\nfrom scripts.utils.pygrams_exception import PygramsException\n\npredictor_names = ['All standard predictors', 'Naive', 'Linear', 'Quadratic', 'Cubic', 'ARIMA', 'Holt-Winters', 'SSM']\n\n\ndef get_args(command_line_arguments):\n parser = argparse.ArgumentParser(description=\"extract popular n-grams (words or short phrases)\"\n \" from a corpus of documents\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter, # include defaults in help\n conflict_handler='resolve') # allows overridng of arguments\n\n # suppressed:________________________________________\n parser.add_argument(\"-ih\", \"--id_header\", default=None, help=argparse.SUPPRESS)\n parser.add_argument(\"-c\", \"--cite\", default=False, action=\"store_true\", help=argparse.SUPPRESS)\n parser.add_argument(\"-pt\", \"--path\", default='data', help=argparse.SUPPRESS)\n parser.add_argument(\"-nmf\", \"--n_nmf_topics\", type=int, default=0, help=argparse.SUPPRESS)\n # help=\"NMF topic modelling - number of topics (e.g. 20 or 40)\")\n\n # Focus source and function\n parser.add_argument(\"-f\", \"--focus\", default=None, choices=['set', 'chi2', 'mutual'],\n help=argparse.SUPPRESS)\n parser.add_argument(\"-fs\", \"--focus_source\", default='USPTO-random-1000.pkl.bz2', help=argparse.SUPPRESS)\n parser.add_argument(\"-tn\", \"--table_name\", default=os.path.join('outputs', 'table', 'table.xlsx'),\n help=argparse.SUPPRESS)\n\n parser.add_argument(\"-j\", \"--json\", default=True, action=\"store_true\",\n help=argparse.SUPPRESS)\n # tf-idf score mechanics\n parser.add_argument(\"-p\", \"--pick\", default='sum', choices=['median', 'max', 'sum', 'avg'],\n help=argparse.SUPPRESS)\n parser.add_argument(\"-tst\", \"--test\", default=False, action=\"store_true\", help=argparse.SUPPRESS)\n parser.add_argument(\"-fb\", \"--filter_by\", default='intersection', choices=['union', 'intersection'],\n help=argparse.SUPPRESS)\n # end __________________________________________________\n\n # Input files\n parser.add_argument(\"-ds\", \"--doc_source\", default='USPTO-random-1000.pkl.bz2',\n help=\"the document source to process\")\n parser.add_argument(\"-uc\", \"--use_cache\", default=None,\n help=\"Cache file to use, to speed up queries\")\n\n # Document column header names\n parser.add_argument(\"-th\", \"--text_header\", default='abstract', help=\"the column name for the free text\")\n parser.add_argument(\"-dh\", \"--date_header\", default=None, help=\"the column name for the date\")\n\n # Word filters\n parser.add_argument(\"-fc\", \"--filter_columns\", default=None,\n help=\"list of columns with binary entries by which to filter the rows\")\n\n parser.add_argument(\"-st\", \"--search_terms\", type=str, nargs='+', default=[],\n help=\"Search terms filter: search terms to restrict the tfidf dictionary. \"\n \"Outputs will be related to search terms\")\n parser.add_argument(\"-stthresh\", \"--search_terms_threshold\", type=float, default=0.75,\n help=\"Provides the threshold of how related you want search terms to be \"\n \"Values between 0 and 1: 0.8 is considered high\")\n # Time filters\n parser.add_argument(\"-df\", \"--date_from\", default=None,\n help=\"The first date for the document cohort in YYYY/MM/DD format\")\n parser.add_argument(\"-dt\", \"--date_to\", default=None,\n help=\"The last date for the document cohort in YYYY/MM/DD format\")\n\n parser.add_argument(\"-tsdf\", \"--timeseries-date-from\", default=None,\n help=\"The first date for the document cohort in YYYY/MM/DD format\")\n parser.add_argument(\"-tsdt\", \"--timeseries-date-to\", default=None,\n help=\"The last date for the document cohort in YYYY/MM/DD format\")\n\n # TF-IDF PARAMETERS\n # ngrams selection\n parser.add_argument(\"-mn\", \"--min_ngrams\", type=int, choices=[1, 2, 3], default=1, help=\"the minimum ngram value\")\n parser.add_argument(\"-mx\", \"--max_ngrams\", type=int, choices=[1, 2, 3], default=3, help=\"the maximum ngram value\")\n\n # maximum document frequency\n parser.add_argument(\"-mdf\", \"--max_document_frequency\", type=float, default=0.05,\n help=\"the maximum document frequency to contribute to TF/IDF\")\n\n # Normalize tf-idf scores by document length\n parser.add_argument(\"-ndl\", \"--normalize_doc_length\", default=False, action=\"store_true\",\n help=\"normalize tf-idf scores by document length\")\n\n # Remove noise terms before further processing\n parser.add_argument(\"-pt\", \"--prefilter_terms\", type=int, default=100000,\n help=\"Initially remove all but the top N terms by TFIDF score before pickling initial TFIDF\"\n \" (removes 'noise' terms before main processing pipeline starts)\")\n\n # OUTPUT PARAMETERS\n # select outputs\n\n parser.add_argument(\"-o\", \"--output\", nargs='*', default=[],\n choices=['graph', 'wordcloud', 'multiplot'], # suppress table output option\n help=\"Note that this can be defined multiple times to get more than one output. \")\n\n # file names etc.\n parser.add_argument(\"-on\", \"--outputs_name\", default='out', help=\"outputs filename\")\n parser.add_argument(\"-wt\", \"--wordcloud_title\", default='Popular Terms', help=\"wordcloud title\")\n\n parser.add_argument(\"-nltk\", \"--nltk_path\", default=None, help=\"custom path for NLTK data\")\n\n # number of ngrams reported\n parser.add_argument(\"-np\", \"--num_ngrams_report\", type=int, default=250,\n help=\"number of ngrams to return for report\")\n parser.add_argument(\"-nd\", \"--num_ngrams_wordcloud\", type=int, default=250,\n help=\"number of ngrams to return for wordcloud\")\n parser.add_argument(\"-nf\", \"--num_ngrams_fdg\", type=int, default=250,\n help=\"number of ngrams to return for fdg graph\")\n\n # PATENT SPECIFIC SUPPORT\n parser.add_argument(\"-cpc\", \"--cpc_classification\", default=None,\n help=\"the desired cpc classification (for patents only)\")\n\n # emtech options\n parser.add_argument(\"-ts\", \"--timeseries\", default=False, action=\"store_true\",\n help=\"denote whether timeseries analysis should take place\")\n\n parser.add_argument(\"-pns\", \"--predictor_names\", type=int, nargs='+', default=[2],\n help=(\", \".join([f\"{index}. {value}\" for index, value in enumerate(predictor_names)]))\n + \"; multiple inputs are allowed.\\n\")\n\n parser.add_argument(\"-nts\", \"--nterms\", type=int, default=25,\n help=\"number of terms to analyse\")\n parser.add_argument(\"-mpq\", \"--minimum-per-quarter\", type=int, default=15,\n help=\"minimum number of patents per quarter referencing a term\")\n parser.add_argument(\"-stp\", \"--steps_ahead\", type=int, default=5,\n help=\"number of steps ahead to analyse for\")\n\n parser.add_argument(\"-ei\", \"--emergence-index\", default='porter', choices=('porter', 'net-growth'),\n help=\"Emergence calculation to use\")\n parser.add_argument(\"-sma\", \"--smoothing-alg\", default='savgol', choices=('kalman', 'savgol'),\n help=\"Time series smoothing to use\")\n\n parser.add_argument(\"-exp\", \"--exponential_fitting\", default=False, action=\"store_true\",\n help=\"analyse using exponential type fit or not\")\n\n parser.add_argument(\"-nrm\", \"--normalised\", default=False, action=\"store_true\",\n help=\"analyse using normalised patents counts or not\")\n\n args = parser.parse_args(command_line_arguments)\n\n return args\n\n\ndef main(supplied_args):\n paths = [os.path.join('outputs', 'reports'), os.path.join('outputs', 'wordclouds'),\n os.path.join('outputs', 'table'), os.path.join('outputs', 'emergence')]\n for path in paths:\n os.makedirs(path, exist_ok=True)\n\n args = get_args(supplied_args)\n args_default = get_args([])\n argscheck = ArgsChecker(args, args_default)\n argscheck.checkargs()\n outputs = args.output[:]\n outputs.append('reports')\n outputs.append('json_config')\n if args.timeseries:\n outputs.append('timeseries')\n if args.n_nmf_topics > 0:\n outputs.append('nmf')\n\n docs_mask_dict = argscheck.get_docs_mask_dict()\n terms_mask_dict = argscheck.get_terms_mask_dict()\n\n doc_source_file_name = os.path.join(args.path, args.doc_source)\n\n pipeline = Pipeline(doc_source_file_name, docs_mask_dict, pick_method=args.pick,\n ngram_range=(args.min_ngrams, args.max_ngrams), text_header=args.text_header,\n cached_folder_name=args.use_cache,\n max_df=args.max_document_frequency, user_ngrams=args.search_terms,\n prefilter_terms=args.prefilter_terms, terms_threshold=args.search_terms_threshold,\n output_name=args.outputs_name, calculate_timeseries=args.timeseries, m_steps_ahead=args.steps_ahead,\n emergence_index=args.emergence_index, exponential=args.exponential_fitting, nterms=args.nterms,\n patents_per_quarter_threshold=args.minimum_per_quarter, sma = args.smoothing_alg\n )\n\n pipeline.output(outputs, wordcloud_title=args.wordcloud_title, outname=args.outputs_name,\n nterms=args.num_ngrams_report, n_nmf_topics=args.n_nmf_topics)\n\n outputs_name = pipeline.outputs_folder_name\n\n # emtech integration\n if args.timeseries:\n if 0 in args.predictor_names:\n algs_codes = list(range(1, 7))\n else:\n algs_codes = args.predictor_names\n\n if isinstance(algs_codes, int):\n predictors_to_run = [predictor_names[algs_codes]]\n else:\n predictors_to_run = [predictor_names[i] for i in algs_codes]\n\n dir_path = os.path.join(outputs_name, 'emergence')\n os.makedirs(dir_path, exist_ok=True)\n\n for emergence in ['emergent', 'declining']:\n print(f'Running pipeline for \"{emergence}\"')\n\n if args.normalised:\n title = 'Forecasts Evaluation: Normalised Counts' if args.test else 'Forecasts: Normalised Counts'\n else:\n title = 'Forecasts Evaluation' if args.test else 'Forecasts'\n\n title += f' ({emergence})'\n\n html_results, training_values = pipeline.run(predictors_to_run, normalized=args.normalised,\n train_test=args.test, emergence=emergence)\n if training_values is not None:\n # save training_values to csv file\n #\n # training_values: csv file:\n # {'term1': [0,2,4,6], 'term2': [2,4,1,3]} 'term1', 0, 2, 4, 6\n # 'term2', 2, 4, 1, 3\n #\n\n filename = os.path.join(dir_path,\n args.outputs_name + '_' + emergence + '_time_series.csv')\n with open(filename, 'w') as f:\n w = csv.writer(f)\n for key, values in training_values:\n my_list = [\"'\" + str(key) + \"'\"] + values\n w.writerow(my_list)\n\n html_doc = f'''<!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <title>{title}</title>\n </head>\n <body>\n <h1>{title}</h1>\n {html_results}\n </body>\n </html>\n '''\n\n base_file_name = os.path.join(dir_path, args.outputs_name + '_' + emergence)\n\n if args.normalised:\n base_file_name += '_normalised'\n\n if args.test:\n base_file_name += '_test'\n\n html_filename = base_file_name + '.html'\n\n with open(html_filename, 'w') as f:\n f.write(html_doc)\n\n print()\n\n\nif __name__ == '__main__':\n try:\n start = time.time()\n main(sys.argv[1:])\n end = time.time()\n diff = int(end - start)\n hours = diff // 3600\n minutes = diff // 60\n seconds = diff % 60\n\n print('')\n print(f\"pyGrams query took {hours}:{minutes:02d}:{seconds:02d} to complete\")\n except PygramsException as err:\n print(f\"pyGrams error: {err.message}\")\n", "id": "265123", "language": "Python", "matching_score": 4.356778621673584, "max_stars_count": 1, "path": "pygrams.py" }, { "content": "import os\nimport unittest\nfrom unittest import mock\nfrom unittest.mock import Mock, MagicMock\n\nimport numpy as np\nimport pandas as pd\nimport pygrams\nfrom scripts import FilePaths\nfrom scripts.text_processing import WordAnalyzer\nfrom scripts.utils.pygrams_exception import PygramsException\n\n\ndef bz2file_fake(file_name, state):\n assert state == 'wb', \"Only supports file.open in write mode\"\n m = MagicMock()\n m.__enter__.return_value = Mock()\n m.__exit__.return_value = Mock()\n m.__enter__.return_value = file_name\n return m\n\n\nclass TestPyGrams(unittest.TestCase):\n data_source_name = 'dummy.pkl.bz2'\n out_name = 'out'\n\n def setUp(self):\n self.global_stopwords = '''the\n'''\n self.ngram_stopwords = '''patent with extra'''\n self.unigram_stopwords = '''of\n'''\n\n def assertListAlmostEqual(self, list_a, list_b, places=7):\n self.assertEqual(len(list_a), len(list_b), 'Lists must be same length')\n for a, b in zip(list_a, list_b):\n self.assertAlmostEqual(a, b, places=places)\n\n def preparePyGrams(self, fake_df_data, mock_read_pickle, mock_open, mock_bz2file, mock_path_isfile):\n\n self.number_of_rows = len(fake_df_data['abstract'])\n self.patent_id_auto_tested = 'patent_id' not in fake_df_data\n self.application_id_auto_tested = 'application_id' not in fake_df_data\n self.application_date_auto_tested = 'application_date' not in fake_df_data\n self.publication_date_auto_tested = 'publication_date' not in fake_df_data\n self.invention_title_auto_tested = 'invention_title' not in fake_df_data\n self.classifications_cpc_auto_tested = 'classifications_cpc' not in fake_df_data\n self.inventor_names_auto_tested = 'inventor_names' not in fake_df_data\n self.inventor_countries_auto_tested = 'inventor_countries' not in fake_df_data\n self.inventor_cities_auto_tested = 'inventor_cities' not in fake_df_data\n self.applicant_organisation_auto_tested = 'applicant_organisation' not in fake_df_data\n self.applicant_countries_auto_tested = 'applicant_countries' not in fake_df_data\n self.applicant_cities_auto_tested = 'applicant_cities' not in fake_df_data\n\n if self.patent_id_auto_tested:\n fake_df_data['patent_id'] = [f'patent_id-{pid}' for pid in range(self.number_of_rows)]\n\n if self.application_id_auto_tested:\n fake_df_data['application_id'] = [f'application_id-{pid}' for pid in range(self.number_of_rows)]\n\n if self.application_date_auto_tested:\n fake_df_data['application_date'] = [pd.Timestamp('1998-01-01 00:00:00') + pd.DateOffset(weeks=row) for row\n in range(self.number_of_rows)]\n\n if self.publication_date_auto_tested:\n fake_df_data['publication_date'] = [\n f\"{pd.Timestamp('2000-12-28 00:00:00') - pd.DateOffset(weeks=row):%Y-%m-%d}\" for row\n in range(self.number_of_rows)]\n\n if self.invention_title_auto_tested:\n fake_df_data['invention_title'] = [f'invention_title-{pid}' for pid in range(self.number_of_rows)]\n\n if self.classifications_cpc_auto_tested:\n fake_df_data['classifications_cpc'] = [[f'Y{row:02}'] for row in range(self.number_of_rows)]\n\n if self.inventor_names_auto_tested:\n fake_df_data['inventor_names'] = [[f'Fred {row:02}'] for row in range(self.number_of_rows)]\n\n if self.inventor_countries_auto_tested:\n fake_df_data['inventor_countries'] = [['GB']] * self.number_of_rows\n\n if self.inventor_cities_auto_tested:\n fake_df_data['inventor_cities'] = [['Newport']] * self.number_of_rows\n\n if self.applicant_organisation_auto_tested:\n fake_df_data['applicant_organisation'] = [['Neat and tidy']] * self.number_of_rows\n\n if self.applicant_countries_auto_tested:\n fake_df_data['applicant_countries'] = [['GB']] * self.number_of_rows\n\n if self.applicant_cities_auto_tested:\n fake_df_data['applicant_cities'] = [['Newport']] * self.number_of_rows\n\n df = pd.DataFrame(data=fake_df_data)\n mock_read_pickle.return_value = df\n\n def open_fake_file(file_name, state):\n self.assertEqual(state, 'r', \"Only supports file.open in read mode\")\n m = MagicMock()\n m.__enter__.return_value = Mock()\n m.__exit__.return_value = Mock()\n\n if file_name == FilePaths.global_stopwords_filename:\n m.__enter__.return_value.read.return_value = self.global_stopwords\n return m\n\n elif file_name == FilePaths.ngram_stopwords_filename:\n m.__enter__.return_value.readlines.return_value = self.ngram_stopwords.split('\\n')\n return m\n\n elif file_name == FilePaths.unigram_stopwords_filename:\n m.__enter__.return_value.read.return_value = self.unigram_stopwords\n return m\n\n else:\n return None\n\n mock_open.side_effect = open_fake_file\n\n mock_bz2file.side_effect = bz2file_fake\n\n def isfile_fake(file_name):\n if file_name == os.path.join('data', self.data_source_name):\n return True\n else:\n return False\n\n mock_path_isfile.side_effect = isfile_fake\n\n def assertTfidfOutputs(self, assert_func, mock_pickle_dump, mock_makedirs, max_df, min_date=200052,\n max_date=200052):\n self.assertTrue(self.publication_date_auto_tested)\n self.assertTrue(self.patent_id_auto_tested)\n\n mock_makedirs.assert_called_with(self.tfidfOutputFolder(self.out_name, max_df, min_date, max_date),\n exist_ok=True)\n\n results_checked = False\n expected_tfidf_file_name = self.tfidfFileName(self.out_name, max_df, min_date, max_date)\n for dump_args in mock_pickle_dump.call_args_list:\n if dump_args[0][1] == expected_tfidf_file_name:\n tfidf_obj = dump_args[0][0]\n assert_func(tfidf_matrix=tfidf_obj.tfidf_matrix, feature_names=tfidf_obj.feature_names)\n\n results_checked = True\n break\n\n if not results_checked:\n self.fail('TFIDF results were not matched - were filenames correct?')\n\n def assertTimeSeriesOutputs(self, assert_func, mock_pickle_dump, mock_makedirs):\n self.assertTrue(self.publication_date_auto_tested)\n self.assertTrue(self.patent_id_auto_tested)\n\n output_folder_name = self.out_name + '-mdf-1.0-200052-200052/'\n expected_term_counts_filename = self.termCountsFileName(output_folder_name, self.out_name)\n\n results_checked = False\n for dump_args in mock_pickle_dump.call_args_list:\n if dump_args[0][1] == expected_term_counts_filename:\n [term_counts_per_week, feature_names, number_of_documents_per_week, week_iso_dates] = dump_args[0][0]\n\n assert_func(term_counts_per_week, feature_names, number_of_documents_per_week, week_iso_dates)\n\n results_checked = True\n break\n\n if not results_checked:\n self.fail('Term counts results were not matched - were filenames correct?')\n\n @staticmethod\n def tfidfOutputFolder(data_source_name, max_df, min_date, max_date):\n return os.path.join('cached', data_source_name + f'-mdf-{max_df}-{min_date}-{max_date}')\n\n @staticmethod\n def tfidfFileName(data_source_name, max_df, min_date, max_date):\n return os.path.join(TestPyGrams.tfidfOutputFolder(data_source_name, max_df, min_date, max_date),\n 'tfidf.pkl.bz2')\n\n @staticmethod\n def termCountsOutputFolder(dir_name):\n return os.path.join('outputs',dir_name, 'termcounts')\n\n @staticmethod\n def termCountsFileName(dir_name, name):\n return os.path.join(TestPyGrams.termCountsOutputFolder(dir_name), name + '-term_counts.pkl.bz2')\n\n @staticmethod\n def find_matching_pickle(mock_pickle_dump, pickle_file_name):\n for args in mock_pickle_dump.call_args_list:\n if args[0][1] == pickle_file_name:\n return args[0][0]\n return None\n\n @mock.patch(\"scripts.data_factory.read_pickle\", create=True)\n @mock.patch(\"scripts.utils.utils.dump\", create=True)\n @mock.patch(\"scripts.text_processing.open\", create=True)\n @mock.patch(\"scripts.utils.utils.BZ2File\", create=True)\n @mock.patch(\"scripts.utils.utils.makedirs\", create=True)\n @mock.patch(\"os.path.isfile\", create=True)\n def test_simple_output_tfidf(self, mock_path_isfile, mock_makedirs, mock_bz2file, mock_open, mock_pickle_dump,\n mock_read_pickle):\n fake_df_data = {\n 'abstract': [\n 'abstract'\n ]\n }\n max_df = 1.0\n self.preparePyGrams(fake_df_data, mock_read_pickle, mock_open, mock_bz2file, mock_path_isfile)\n args = ['-ds', self.data_source_name, '--date_header', 'publication_date', '--max_document_frequency',\n str(max_df)]\n\n pygrams.main(args)\n\n def assert_tfidf_outputs(tfidf_matrix, feature_names):\n self.assertEqual(tfidf_matrix.todense(), np.ones(shape=(1, 1)), 'TFIDF should be 1x1 matrix of 1')\n self.assertListEqual(feature_names, ['abstract'])\n\n self.assertTfidfOutputs(assert_tfidf_outputs, mock_pickle_dump, mock_makedirs, max_df)\n\n @mock.patch(\"scripts.data_factory.read_pickle\", create=True)\n @mock.patch(\"scripts.text_processing.open\", create=True)\n @mock.patch(\"scripts.utils.utils.read_pickle\", create=True)\n @mock.patch(\"scripts.utils.utils.dump\", create=True)\n @mock.patch(\"scripts.utils.utils.BZ2File\", create=True)\n @mock.patch(\"scripts.utils.utils.makedirs\", create=True)\n @mock.patch(\"scripts.output_factory.open\", create=True)\n @mock.patch(\"scripts.output_factory.dump\", create=True)\n @mock.patch(\"scripts.output_factory.BZ2File\", create=True)\n @mock.patch(\"scripts.output_factory.makedirs\", create=True)\n @mock.patch(\"os.path.isfile\", create=True)\n def test_simple_output_to_cache_then_use_cache(self, mock_path_isfile,\n mock_output_makedirs,\n mock_output_bz2file,\n mock_output_pickle_dump,\n mock_output_open,\n mock_utils_makedirs,\n mock_utils_bz2file,\n mock_utils_pickle_dump,\n mock_utils_read_pickle,\n mock_open,\n mock_factory_read_pickle\n ):\n fake_df_data = {\n 'abstract': [\n 'abstract'\n ]\n }\n\n fake_output_file_content = {}\n\n def open_fake_output_file(file_name, state):\n self.assertEqual(state, 'w', \"Only supports file.open in write mode\")\n\n def snag_results(text):\n fake_output_file_content[file_name] = fake_output_file_content.get(file_name, '') + text\n\n m = MagicMock()\n m.__enter__.return_value = Mock()\n m.__enter__.return_value.write.side_effect = snag_results\n m.__exit__.return_value = Mock()\n return m\n\n mock_output_open.side_effect = open_fake_output_file\n\n # Make a note of the dumped TFIDF object for later\n self.preparePyGrams(fake_df_data, mock_factory_read_pickle, mock_open, mock_utils_bz2file, mock_path_isfile)\n args = ['-ds', self.data_source_name, '--date_header', 'publication_date', '--max_document_frequency', '1.0']\n pygrams.main(args)\n\n # reset static object\n WordAnalyzer.tokenizer = None\n WordAnalyzer.preprocess = None\n WordAnalyzer.ngram_range = None\n WordAnalyzer.stemmed_stop_word_set_n = None\n WordAnalyzer.stemmed_stop_word_set_uni = None\n\n fake_output_file_content = {}\n\n # Fail if original data frame is requested from disc\n def factory_read_pickle_fake(pickle_file_name):\n self.fail(f'Should not be reading {pickle_file_name} via a factory if TFIDF was requested from pickle')\n\n dumped_tfidf_file_name = os.path.join('cached', self.out_name + '-mdf-1.0-200052-200052', 'tfidf.pkl.bz2')\n self.dumped_tfidf = self.find_matching_pickle(mock_utils_pickle_dump, dumped_tfidf_file_name)\n\n dumped_dates_file_name = os.path.join('cached', self.out_name + '-mdf-1.0-200052-200052', 'dates.pkl.bz2')\n self.dumped_dates = self.find_matching_pickle(mock_utils_pickle_dump, dumped_dates_file_name)\n\n dumped_cpc_dict_file_name = os.path.join('cached', self.out_name + '-mdf-1.0-200052-200052', 'cpc_dict.pkl.bz2')\n self.dumped_cpc_dict = self.find_matching_pickle(mock_utils_pickle_dump, dumped_cpc_dict_file_name)\n\n mock_factory_read_pickle.side_effect = factory_read_pickle_fake\n mock_utils_pickle_dump.reset_mock(return_value=True, side_effect=True)\n\n # Instead support TFIDF pickle read - and return the TFIDF object previously saved to disc\n def pipeline_read_pickle_fake(pickle_file_name):\n if pickle_file_name == dumped_tfidf_file_name:\n return self.dumped_tfidf\n elif pickle_file_name == dumped_dates_file_name:\n return self.dumped_dates\n elif pickle_file_name == dumped_cpc_dict_file_name:\n return self.dumped_cpc_dict\n else:\n self.fail(f'Should not be reading {pickle_file_name} via a factory if TFIDF was requested from pickle')\n\n mock_output_bz2file.side_effect = bz2file_fake\n mock_utils_read_pickle.side_effect = pipeline_read_pickle_fake\n mock_utils_read_pickle.return_value = self.dumped_tfidf\n args = ['-ds', self.data_source_name, '-ts',\n '--date_header',\n 'publication_date', '--max_document_frequency', '1.0',\n '--use_cache', self.out_name + '-mdf-1.0-200052-200052']\n pygrams.main(args)\n\n self.assertEqual(' abstract 1.000000\\n',\n fake_output_file_content[\n os.path.join('outputs', self.out_name+'-mdf-1.0-200052-200052', 'reports',\n self.out_name+'_keywords.txt')])\n\n @mock.patch(\"scripts.data_factory.read_pickle\", create=True)\n @mock.patch(\"scripts.utils.utils.dump\", create=True)\n @mock.patch(\"scripts.text_processing.open\", create=True)\n @mock.patch(\"scripts.utils.utils.BZ2File\", create=True)\n @mock.patch(\"scripts.utils.utils.makedirs\", create=True)\n @mock.patch(\"os.path.isfile\", create=True)\n def test_simple_two_patents_unigrams_only_output_tfidf(self, mock_path_isfile, mock_makedirs, mock_bz2file,\n mock_open, mock_pickle_dump, mock_read_pickle):\n fake_df_data = {\n 'abstract': [\n 'abstract one',\n 'abstract two'\n ]\n }\n max_df = 1.0\n\n self.preparePyGrams(fake_df_data, mock_read_pickle, mock_open, mock_bz2file, mock_path_isfile)\n args = ['-ds', self.data_source_name, '--date_header',\n 'publication_date', '--max_document_frequency', str(max_df), '--max_ngrams', '1']\n\n pygrams.main(args)\n\n # tf(t) = num of occurrences / number of words in doc\n #\n # smoothing is false, so no modification to log numerator or denominator:\n # idf(d, t) = log [ n / df(d, t) ] + 1\n #\n # n = total number of docs\n #\n # norm='l2' by default\n\n tfidf_abstract = (1 / 2) * (np.log(2 / 2) + 1)\n tfidf_one = (1 / 2) * (np.log(2 / 1) + 1)\n l2norm = np.sqrt(tfidf_abstract * tfidf_abstract + tfidf_one * tfidf_one)\n l2norm_tfidf_abstract = tfidf_abstract / l2norm\n l2norm_tfidf_one = tfidf_one / l2norm\n\n # Note that 'one' will have same weight as 'two' given where it appears\n\n def assert_tfidf_outputs(tfidf_matrix, feature_names):\n self.assertListEqual(feature_names, ['abstract', 'one', 'two'])\n tfidf_as_lists = tfidf_matrix.todense().tolist()\n self.assertListAlmostEqual(tfidf_as_lists[0], [l2norm_tfidf_abstract, l2norm_tfidf_one, 0], places=4)\n self.assertListAlmostEqual(tfidf_as_lists[1], [l2norm_tfidf_abstract, 0, l2norm_tfidf_one], places=4)\n\n self.assertTfidfOutputs(assert_tfidf_outputs, mock_pickle_dump, mock_makedirs, max_df, 200051, 200052)\n\n \"\"\"\n Extended from test_simple_two_patents_unigrams_only_output_tfidf - sets prefilter-terms to remove 'noise' terms\n \"\"\"\n @mock.patch(\"scripts.data_factory.read_pickle\", create=True)\n @mock.patch(\"scripts.utils.utils.dump\", create=True)\n @mock.patch(\"scripts.text_processing.open\", create=True)\n @mock.patch(\"scripts.utils.utils.BZ2File\", create=True)\n @mock.patch(\"scripts.utils.utils.makedirs\", create=True)\n @mock.patch(\"os.path.isfile\", create=True)\n def test_simple_two_patents_unigrams_and_prefilter_only_output_tfidf(self, mock_path_isfile, mock_makedirs,\n mock_bz2file, mock_open, mock_pickle_dump,\n mock_read_pickle):\n fake_df_data = {\n 'abstract': [\n 'abstract one',\n 'abstract two'\n ]\n }\n max_df = 1.0\n self.preparePyGrams(fake_df_data, mock_read_pickle, mock_open, mock_bz2file, mock_path_isfile)\n args = ['-ds', self.data_source_name, '--date_header', 'publication_date',\n '--max_document_frequency', str(max_df), '--max_ngrams', '1',\n '--prefilter_terms', '1']\n\n pygrams.main(args)\n\n # tf(t) = num of occurrences / number of words in doc\n #\n # smoothing is false, so no modification to log numerator or denominator:\n # idf(d, t) = log [ n / df(d, t) ] + 1\n #\n # n = total number of docs\n #\n # norm='l2' by default\n\n tfidf_abstract = (1 / 2) * (np.log(2 / 2) + 1)\n tfidf_one = (1 / 2) * (np.log(2 / 1) + 1)\n l2norm = np.sqrt(tfidf_abstract * tfidf_abstract + tfidf_one * tfidf_one)\n l2norm_tfidf_abstract = tfidf_abstract / l2norm\n\n def assert_tfidf_outputs(tfidf_matrix, feature_names):\n self.assertListEqual(feature_names, ['abstract', 'one'])\n tfidf_as_lists = tfidf_matrix.todense().tolist()\n self.assertListAlmostEqual([tfidf_as_lists[0][0]], [l2norm_tfidf_abstract], places=4)\n self.assertListAlmostEqual([tfidf_as_lists[1][0]], [l2norm_tfidf_abstract], places=4)\n\n self.assertTfidfOutputs(assert_tfidf_outputs, mock_pickle_dump, mock_makedirs, max_df, 200051, 200052)\n\n @mock.patch(\"scripts.data_factory.read_pickle\", create=True)\n @mock.patch(\"scripts.utils.utils.dump\", create=True)\n @mock.patch(\"scripts.utils.utils.BZ2File\", create=True)\n @mock.patch(\"scripts.text_processing.open\", create=True)\n @mock.patch(\"scripts.output_factory.dump\", create=True)\n @mock.patch(\"scripts.output_factory.BZ2File\", create=True)\n @mock.patch(\"scripts.output_factory.makedirs\", create=True)\n @mock.patch(\"os.path.isfile\", create=True)\n def test_unibitri_reduction_output_termcounts(self, mock_path_isfile, mock_of_makedirs,\n mock_of_bz2file, mock_of_dump, mock_open,\n mock_utils_bz2file, mock_utils_dump, mock_read_pickle):\n fake_df_data = {\n 'abstract': [\n 'abstract 1, of the patent with extra stuff'\n ]\n }\n\n mock_of_bz2file.side_effect = bz2file_fake\n\n self.preparePyGrams(fake_df_data, mock_read_pickle, mock_open, mock_utils_bz2file, mock_path_isfile)\n args = ['-ds', self.data_source_name, '--id_header', 'patent_id', '--date_header',\n 'publication_date', '--max_document_frequency', '1.0'] # '-ts', '-tc'\n\n pygrams.main(args)\n\n dumped_tfidf_file_name = os.path.join('cached', self.out_name + '-mdf-1.0-200052-200052', 'tfidf.pkl.bz2')\n self.dumped_tfidf = self.find_matching_pickle(mock_utils_dump, dumped_tfidf_file_name)\n\n dumped_dates_file_name = os.path.join('cached', self.out_name + '-mdf-1.0-200052-200052', 'dates.pkl.bz2')\n self.dumped_dates = self.find_matching_pickle(mock_utils_dump, dumped_dates_file_name)\n\n self.assertListEqual(self.dumped_tfidf.feature_names, ['abstract', 'of patent with', 'with extra stuff'])\n term_counts_as_lists = self.dumped_tfidf.count_matrix.todense().tolist()\n self.assertListEqual(term_counts_as_lists[0], [1, 1, 1])\n self.assertListEqual(self.dumped_dates.tolist(), [200052])\n\n @unittest.skip(\"json compulsory now, so not an option\")\n def test_args_json_not_requested(self):\n args = pygrams.get_args([])\n self.assertFalse(args.json)\n\n @unittest.skip(\"json compulsory now, so not an option\")\n def test_args_json_requested_short(self):\n args = pygrams.get_args(['-j'])\n self.assertTrue(args.json)\n\n @unittest.skip(\"json compulsory now, so not an option\")\n def test_args_json_requested_long(self):\n args = pygrams.get_args(['--json'])\n self.assertTrue(args.json)\n\n def test_args_output_name_requested_long(self):\n args = pygrams.get_args(['--outputs_name=my/test/name.txt'])\n self.assertEqual('my/test/name.txt', args.outputs_name)\n\n def test_args_document_source_requested_long(self):\n args = pygrams.get_args(['--doc_source=my-test'])\n self.assertEqual('my-test', args.doc_source)\n\n @mock.patch(\"scripts.output_factory.json.dump\", create=True)\n @mock.patch(\"scripts.output_factory.open\", create=True)\n def test_json_configuration_encoding_sum_no_time_weighting(self, mock_open, mock_json_dump):\n patent_pickle_file_name = 'USPTO-random-100.pkl.bz2'\n patent_pickle_absolute_file_name = os.path.abspath(os.path.join('data', patent_pickle_file_name))\n output_file_name = 'test'\n suffix = '-mdf-0.05-200502-201808'\n report_file_name = os.path.join('outputs',output_file_name+suffix, 'json_config', output_file_name + '_keywords.txt')\n json_file_name = os.path.join('outputs',output_file_name+suffix, 'json_config', output_file_name + '_keywords_config.json')\n pygrams.main([f'--outputs_name={output_file_name}', '-f=set', '-p=sum', '-cpc=Y12',\n '--date_from=1999/03/12', '--date_to=2000/11/30', '-dh', 'publication_date', '-ds',\n patent_pickle_file_name])\n\n mock_open.assert_any_call(json_file_name, 'w')\n\n actual_json = mock_json_dump.call_args[0][0]\n expected_json = {\n 'paths': {\n 'data': patent_pickle_absolute_file_name,\n 'tech_report': report_file_name\n },\n 'month_year': {\n 'from': 199910,\n 'to': 200048\n },\n 'parameters': {\n 'pick': 'sum'\n }\n }\n self.assertEqual(expected_json, actual_json)\n\n @mock.patch(\"scripts.output_factory.json.dump\", create=True)\n @mock.patch(\"scripts.output_factory.open\", create=True)\n def test_json_configuration_encoding_maximal(self, mock_open, mock_json_dump):\n patent_pickle_file_name = 'USPTO-random-100.pkl.bz2'\n patent_pickle_absolute_file_name = os.path.abspath(os.path.join('data', patent_pickle_file_name))\n output_file_name = 'test'\n report_file_name = os.path.join('outputs', 'test-mdf-0.05-200502-201808', 'json_config', output_file_name + '_keywords.txt')\n json_file_name = os.path.join('outputs', 'test-mdf-0.05-200502-201808','json_config', output_file_name + '_keywords_config.json')\n pygrams.main([f'--outputs_name={output_file_name}', '-p=max', '-cpc=Y12',\n '--date_from=1998/01/01', '--date_to=2001/12/31', '-dh', 'publication_date', '-ds',\n patent_pickle_file_name])\n\n mock_open.assert_any_call(json_file_name, 'w')\n actual_json = mock_json_dump.call_args[0][0]\n expected_json = {\n 'paths': {\n 'data': patent_pickle_absolute_file_name,\n 'tech_report': report_file_name\n },\n 'month_year': {\n 'from': 199801,\n 'to': 200201\n },\n 'parameters': {\n 'pick': 'max'\n }\n }\n self.assertEqual(expected_json, actual_json)\n\n @mock.patch(\"scripts.terms_graph.json.dump\", create=True)\n @mock.patch(\"scripts.terms_graph.open\", create=True)\n def test_graph_creation(self, mock_open, mock_json_dump):\n fname = 'other'\n suffix='-mdf-0.05-200502-201808'\n js_file_name = os.path.join('outputs', fname+suffix, 'visuals', 'key-terms.js')\n json_file_name = os.path.join('outputs', fname+suffix, 'reports', 'key-terms.json')\n graph_report_name = os.path.join('outputs', fname+suffix, 'reports', fname + '_graph.txt')\n\n test_args = ['--doc_source', 'USPTO-random-100.pkl.bz2', '--date_header', 'publication_date', '-o', 'graph',\n '--outputs_name', fname]\n pygrams.main(test_args)\n\n mock_open.assert_any_call(graph_report_name, 'w')\n mock_open.assert_any_call(json_file_name, 'w')\n mock_open.assert_any_call(js_file_name, 'w')\n\n actual_json = mock_json_dump.call_args_list[0][0][0]\n self.assertIn('nodes', actual_json)\n self.assertIn('links', actual_json)\n\n @mock.patch(\"os.path.isfile\", create=True)\n def test_reports_unsupported_df_format(self, mock_path_isfile):\n\n unknown_filename = 'unknown.format'\n\n def isfile_fake(file_name):\n if file_name == os.path.join('data', unknown_filename):\n return True\n else:\n return False\n\n mock_path_isfile.side_effect = isfile_fake\n test_args = ['--doc_source', unknown_filename]\n try:\n pygrams.main(test_args)\n self.fail(\"should raise exception\")\n except PygramsException as err:\n self.assertEqual('Unsupported file: ' + os.path.join('data', unknown_filename), err.message)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "3863412", "language": "Python", "matching_score": 1.2428679466247559, "max_stars_count": 1, "path": "tests/test_pygrams.py" }, { "content": "#!/usr/bin/env python3\n\n\"\"\"\n Dependencies:\n\n Chrome\n Chromedriver\n python-selenium\n\n\"\"\"\nimport glob\nimport os\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport numpy as np\nimport pandas as pd\n\n\nnum_workers=10\nwith open('worker.txt', 'r') as file:\n worker = int(file.readlines()[0])\n\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--window-size=1920,1080')\nchrome_options.add_argument('--headless')\nchrome_options.add_argument('--disable-gpu')\nbrowser = webdriver.Chrome(chrome_options=chrome_options)\n\n# https://stackoverflow.com/a/3277516\nwith open(\"list.txt\") as f:\n content = f.readlines()\ncontent = [x.strip() for x in content]\nprint(len(content))\n\narchived = [a[:-4].replace('\\\\','/').split('/')[1] for a in glob.glob('archived/*.png')]\nprint(archived[:3])\n\n# https://stackoverflow.com/a/7406369\nkeepcharacters = (' ', '.', '_', \"-\")\nsavestring = lambda line: \"\".join(c for c in line.replace(\"/\", \"_\") if c.isalnum() or c in keepcharacters).rstrip()\n\nif (not os.path.exists(\"./archived/\")):\n os.makedirs(\"./archived/\")\n\ncomps = pd.DataFrame(data=content)\ncomps.columns = ['url']\ncomps['id'] =range(len(content))\n\npercentiles = [np.round(step * 1 / num_workers, 3) for step in range(1, num_workers)]\npercentiles = pd.DataFrame(comps.id.describe(percentiles=percentiles)).T.round().loc[:, 'min':'max'].values[0]\n\nrelevant_ids = comps[(comps.id >= percentiles[worker]) & (comps.id < percentiles[worker + 1])]['id']\ncontent = comps[comps.id.isin(relevant_ids)].url.values.tolist()\nprint('min id', relevant_ids.min())\nprint('max id', relevant_ids.max())\nprint('shape', relevant_ids.shape)\nprint()\n\n\nc = 0\nt1 = time.time()\nfor i, line in enumerate(content):\n try:\n if (line != \"\" and not str(line).startswith(\"#\")):\n if savestring(line) in archived:\n continue\n print(\"(\" + str(i + 1) + \"/\" + str(len(content)) + \") \" + str(line), flush=True)\n print(\"loading...\", flush=True, end=\"\")\n browser.get(line)\n print(\"done\", flush=True)\n print(\"calculating size...\", flush=True, end=\"\")\n h = browser.execute_script(\"return Math.max(document.body.scrollHeight, document.body.offsetHeight );\")\n if (h == 0):\n print(\"\\nError getting page length\\n\")\n continue\n browser.set_window_size(width=1920, height=h)\n print(\"done\", flush=True)\n print(\"shooting...\", flush=True, end=\"\")\n f = savestring(line)\n browser.get_screenshot_as_file(\"./archived/\" + str(f) + \".png\")\n print(\"done\\n\", flush=True)\n browser.set_window_size(width=1920, height=800)\n c += 1\n else:\n print(\"(\" + str(i + 1) + \"/\" + str(len(content)) + \") skipping\\n\")\n\n except Exception as e:\n print(e)\n browser.quit()\n browser = webdriver.Chrome(chrome_options=chrome_options)\nt2 = time.time()\nt = t2 - t1\nif (c > 0):\n print(\"saved\", c, \"items in\", round(t, 2), \"seconds (\" + str(round(t / c, 2)), \"seconds per item)\")\nbrowser.quit()\n\n#\n# import glob\n# import os\n# import time\n# from selenium import webdriver\n# from selenium.webdriver.common.keys import Keys\n#\n# options = webdriver.ChromeOptions()\n# options.add_argument('headless')\n# options.add_argument('window-size=1920x800')\n# browser = webdriver.Chrome(options=options)\n#\n# results_url = \"https://duckduckgo.com/?q=paralegal&t=h_&ia=web\"\n# browser.get(results_url)\n# results = browser.find_elements_by_id('links')\n# num_page_items = len(results)\n# for i in range(num_page_items):\n# print(results[i].text)\n# print(len(results))", "id": "5787931", "language": "Python", "matching_score": 0.7199982404708862, "max_stars_count": 0, "path": "webpageArchiver.py" }, { "content": "import os\nimport pandas as pd\n\nlocation = os.path.dirname(os.path.realpath(__file__))\nmy_file = os.path.join(location, 'data', 'sample_5000.csv')\n\nsample_data = pd.read_csv(my_file)\nexample_texts = sample_data.text.values", "id": "12057972", "language": "Python", "matching_score": 0.49949654936790466, "max_stars_count": 5, "path": "pvtm/__init__.py" }, { "content": "from setuptools import setup\n\ndef readme_file_contents():\n with open('README.rst') as readme_file:\n data = readme_file.read()\n return data\n\nsetup(\n name='pvtm',\n version='1.0.0',\n description='Topic Modeling with doc2vec and Gaussian mixture clustering',\n long_description=readme_file_contents(),\n author='<NAME>',\n author_email='<EMAIL>',\n licence='MIT',\n packages=['pvtm'], #same as name\n package_data={'pvtm': ['data/*']},\n #include_package_data=True,\n #install_requires=['smtplib', 'email'], #external packages as dependencies\n)", "id": "6874467", "language": "Python", "matching_score": 0.5163608193397522, "max_stars_count": 5, "path": "setup.py" } ]
2.462467
sangram11
[ { "content": "import os\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='masking',\n version='0.0.3',\n author='<NAME>',\n author_email='<EMAIL>',\n description='This package contains a function \\'maskstring\\' to mask a given input string based on the index positions passed as arguments.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/sangram11/MASKING',\n project_urls={'BUG Tracker':'https://github.com/sangram11/MASKING/tree/master/archive',},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.6\",\n)", "id": "6159741", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "setup.py" }, { "content": "import sys\nclass indexError(Exception):\n '''\n This class is used to raise custom exception when user supplies index values that are not available for the string. \n '''\n def __init__(self,value):\n self.value = value\n def __str__(self):\n return str(self.value)\n\nclass maskError(Exception):\n '''\n This class is used to raise custom exception when main string is equal to substring used for masking.\n '''\n def __init__(self,value):\n self.value = value\n def __str__(self):\n return str(self.value)\ndef maskstring(strng,substr,*args):\n '''\n This function is used to mask a given input string based on the index positions passed as arguments.\n This function will replace other characters from the main string with a substring where index positions in the main string is not present in the list of arguments(index position) supplied to the function.\n :param strng: This is the main string that you want to mask\n :param substr: This is the substring that you want mask with in the main string\n :param args: Array of index positions where you don't want masking to happen. Viewer will be able to view the masked string with only those characters that are available in these index positions.\n :return: return the masked string e.g.******3.9.2 where string 'Python' has been masked with '*'\n '''\n try:\n if strng == substr:\n raise maskError('Masking cannot be done with the same string in the main string')\n for indx in args:\n if indx > (len(strng) - 1):\n raise indexError(f'Index {indx} supplied in the argument is not available in the main string.\\nSolution:Kindly, supply arguments(index positions) available for the string.')\n except indexError as ex:\n print(f''' File \"{sys.argv[0]}\"''')\n print(f'indexError:{ex}')\n except maskError as ex:\n print(f''' File \"{sys.argv[0]}\"''')\n print(f'maskError:{ex}')\n else:\n mskstr = ''\n for tup in enumerate(strng):\n idx, ch = tup\n if idx in args: # Checking if the index position is available in the list of arguments(index positions) that were supplied. If it is available it will not mask.\n mskstr += ch\n else: # If the index position is not available in the list of arguments(index positions) that were supplied it will mask.\n mskstr += substr\n return mskstr\n", "id": "737063", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "src/masking/maskstring.py" } ]
0
zhuoranzma
[ { "content": "import networks\nimport argparse\nimport utils\nimport torch\nimport itertools\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom datasets import ImageDataset\nimport time\nimport numpy as np\n\n\n\n# Get training options from the command line\ndef get_opt():\n parser = argparse.ArgumentParser()\n # Training parameters\n parser.add_argument('--epoch', type=int, default=0, help='starting epoch')\n parser.add_argument('--n_epochs', type = int, default = 100, help = 'number of epochs with initial learning rate')\n parser.add_argument('--n_epochs_decay', type = int, default = 100, help = 'number of epochs starting the decay of learning rate')\n parser.add_argument('--beta1', type = float, default = 0.5, help = 'momentum term of the Adam optimizer')\n parser.add_argument('--lr', type = float, default = 0.0002, help = 'initial learning rate')\n parser.add_argument('--batch_size', type = int, default = 1, help = 'batch size of training')\n parser.add_argument('--cuda', action='store_true', help='use GPU computation')\n parser.add_argument('--rootdir', type=str, default='datasets/NIRI_to_NIRII/', help='root directory of the dataset')\n parser.add_argument('--n_cpu', type=int, default=4, help='number of cpu threads to use during batch generation')\n parser.add_argument('--u_net', action='store_true', help='use U-net generator')\n parser.add_argument('--pretrained', action='store_true', help='load pretrained weights')\n\n # Model parameters\n parser.add_argument('--sizeh', type=int, default=512, help='size of the image')\n parser.add_argument('--sizew', type=int, default=640, help='size of the image')\n parser.add_argument('--input_nc', type = int, default = 1, help = 'number of input channels')\n parser.add_argument('--output_nc', type = int, default = 1, help = 'number of output channels')\n parser.add_argument('--ngf', type = int, default = 64, help = 'number of filters in the generator')\n parser.add_argument('--ndf', type = int, default = 64, help = 'number of filters in the discriminator')\n parser.add_argument('--dropout', type = bool, default = False, help = 'whether to use dropout')\n parser.add_argument('--n_res', type = int, default = 9, help = 'number of resNet blocks')\n parser.add_argument('--cycle_loss', type = float, default=10, help = 'coefficient of cycle consistent loss')\n parser.add_argument('--identity_loss', type = float, default=0, help = 'coefficient of identity loss')\n\n opt = parser.parse_args()\n return opt\n\n\ndef main():\n # Get training options\n opt = get_opt()\n\n # Define the networks\n # netG_A: used to transfer image from domain A to domain B\n # netG_B: used to transfer image from domain B to domain A\n netG_A = networks.Generator(opt.input_nc, opt.output_nc, opt.ngf, opt.n_res, opt.dropout)\n netG_B = networks.Generator(opt.output_nc, opt.input_nc, opt.ngf, opt.n_res, opt.dropout)\n if opt.u_net:\n netG_A = networks.U_net(opt.input_nc, opt.output_nc, opt.ngf)\n netG_B = networks.U_net(opt.output_nc, opt.input_nc, opt.ngf)\n\n # netD_A: used to test whether an image is from domain B\n # netD_B: used to test whether an image is from domain A\n netD_A = networks.Discriminator(opt.input_nc, opt.ndf)\n netD_B = networks.Discriminator(opt.output_nc, opt.ndf)\n\n # Initialize the networks\n if opt.cuda:\n netG_A.cuda()\n netG_B.cuda()\n netD_A.cuda()\n netD_B.cuda()\n utils.init_weight(netG_A)\n utils.init_weight(netG_B)\n utils.init_weight(netD_A)\n utils.init_weight(netD_B)\n\n if opt.pretrained:\n netG_A.load_state_dict(torch.load('pretrained/netG_A.pth'))\n netG_B.load_state_dict(torch.load('pretrained/netG_B.pth'))\n netD_A.load_state_dict(torch.load('pretrained/netD_A.pth'))\n netD_B.load_state_dict(torch.load('pretrained/netD_B.pth'))\n\n\n # Define the loss functions\n criterion_GAN = utils.GANLoss()\n if opt.cuda:\n criterion_GAN.cuda()\n\n criterion_cycle = torch.nn.L1Loss()\n # Alternatively, can try MSE cycle consistency loss\n #criterion_cycle = torch.nn.MSELoss()\n criterion_identity = torch.nn.L1Loss()\n\n # Define the optimizers\n optimizer_G = torch.optim.Adam(itertools.chain(netG_A.parameters(), netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))\n optimizer_D_A = torch.optim.Adam(netD_A.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n optimizer_D_B = torch.optim.Adam(netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\n # Create learning rate schedulers\n lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(optimizer_G, lr_lambda = utils.Lambda_rule(opt.epoch, opt.n_epochs, opt.n_epochs_decay).step)\n lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(optimizer_D_A, lr_lambda = utils.Lambda_rule(opt.epoch, opt.n_epochs, opt.n_epochs_decay).step)\n lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(optimizer_D_B, lr_lambda = utils.Lambda_rule(opt.epoch, opt.n_epochs, opt.n_epochs_decay).step)\n\n Tensor = torch.cuda.FloatTensor if opt.cuda else torch.Tensor\n input_A = Tensor(opt.batch_size, opt.input_nc, opt.sizeh, opt.sizew)\n input_B = Tensor(opt.batch_size, opt.output_nc, opt.sizeh, opt.sizew)\n\n # Define two image pools to store generated images\n fake_A_pool = utils.ImagePool()\n fake_B_pool = utils.ImagePool()\n\n # Define the transform, and load the data\n transform = transforms.Compose([transforms.Resize((opt.sizeh, opt.sizew)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n dataloader = DataLoader(ImageDataset(opt.rootdir, transform = transform, mode = 'train'), batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)\n\n # numpy arrays to store the loss of epoch\n loss_G_array = np.zeros(opt.n_epochs + opt.n_epochs_decay)\n loss_D_A_array = np.zeros(opt.n_epochs + opt.n_epochs_decay)\n loss_D_B_array = np.zeros(opt.n_epochs + opt.n_epochs_decay)\n\n # Training\n for epoch in range(opt.epoch, opt.n_epochs + opt.n_epochs_decay):\n start = time.strftime(\"%H:%M:%S\")\n print(\"current epoch :\", epoch, \" start time :\", start)\n # Empty list to store the loss of each mini-batch\n loss_G_list = []\n loss_D_A_list = []\n loss_D_B_list = []\n\n for i, batch in enumerate(dataloader):\n if i % 50 == 1:\n print(\"current step: \", i)\n current = time.strftime(\"%H:%M:%S\")\n print(\"current time :\", current)\n print(\"last loss G:\", loss_G_list[-1], \"last loss D_A\", loss_D_A_list[-1], \"last loss D_B\", loss_D_B_list[-1])\n real_A = input_A.copy_(batch['A'])\n real_B = input_B.copy_(batch['B'])\n\n # Train the generator\n optimizer_G.zero_grad()\n\n # Compute fake images and reconstructed images\n fake_B = netG_A(real_A)\n fake_A = netG_B(real_B)\n\n if opt.identity_loss != 0:\n same_B = netG_A(real_B)\n same_A = netG_B(real_A)\n\n # discriminators require no gradients when optimizing generators\n utils.set_requires_grad([netD_A, netD_B], False)\n\n # Identity loss\n if opt.identity_loss != 0:\n loss_identity_A = criterion_identity(same_A, real_A) * opt.identity_loss\n loss_identity_B = criterion_identity(same_B, real_B) * opt.identity_loss\n\n # GAN loss\n prediction_fake_B = netD_B(fake_B)\n loss_gan_B = criterion_GAN(prediction_fake_B, True)\n prediction_fake_A = netD_A(fake_A)\n loss_gan_A = criterion_GAN(prediction_fake_A, True)\n\n # Cycle consistent loss\n recA = netG_B(fake_B)\n recB = netG_A(fake_A)\n loss_cycle_A = criterion_cycle(recA, real_A) * opt.cycle_loss\n loss_cycle_B = criterion_cycle(recB, real_B) * opt.cycle_loss\n\n # total loss without the identity loss\n loss_G = loss_gan_B + loss_gan_A + loss_cycle_A + loss_cycle_B\n\n if opt.identity_loss != 0:\n loss_G += loss_identity_A + loss_identity_B\n\n loss_G_list.append(loss_G.item())\n loss_G.backward()\n optimizer_G.step()\n\n # Train the discriminator\n utils.set_requires_grad([netD_A, netD_B], True)\n\n\n # Train the discriminator D_A\n optimizer_D_A.zero_grad()\n # real images\n pred_real = netD_A(real_A)\n loss_D_real = criterion_GAN(pred_real, True)\n\n # fake images\n fake_A = fake_A_pool.query(fake_A)\n pred_fake = netD_A(fake_A.detach())\n loss_D_fake = criterion_GAN(pred_fake, False)\n\n #total loss\n loss_D_A = (loss_D_real + loss_D_fake) * 0.5\n loss_D_A_list.append(loss_D_A.item())\n loss_D_A.backward()\n optimizer_D_A.step()\n\n # Train the discriminator D_B\n optimizer_D_B.zero_grad()\n # real images\n pred_real = netD_B(real_B)\n loss_D_real = criterion_GAN(pred_real, True)\n\n # fake images\n fake_B = fake_B_pool.query(fake_B)\n pred_fake = netD_B(fake_B.detach())\n loss_D_fake = criterion_GAN(pred_fake, False)\n\n # total loss\n loss_D_B = (loss_D_real + loss_D_fake) * 0.5\n loss_D_B_list.append(loss_D_B.item())\n loss_D_B.backward()\n optimizer_D_B.step()\n\n # Update the learning rate\n lr_scheduler_G.step()\n lr_scheduler_D_A.step()\n lr_scheduler_D_B.step()\n\n # Save models checkpoints\n torch.save(netG_A.state_dict(), 'model/netG_A.pth')\n torch.save(netG_B.state_dict(), 'model/netG_B.pth')\n torch.save(netD_A.state_dict(), 'model/netD_A.pth')\n torch.save(netD_B.state_dict(), 'model/netD_B.pth')\n\n\n\n\n # Save other checkpoint information\n checkpoint = {'epoch': epoch,\n 'optimizer_G': optimizer_G.state_dict(),\n 'optimizer_D_A': optimizer_D_A.state_dict(),\n 'optimizer_D_B': optimizer_D_B.state_dict(),\n 'lr_scheduler_G': lr_scheduler_G.state_dict(),\n 'lr_scheduler_D_A': lr_scheduler_D_A.state_dict(),\n 'lr_scheduler_D_B': lr_scheduler_D_B.state_dict()}\n torch.save(checkpoint, 'model/checkpoint.pth')\n\n\n\n # Update the numpy arrays that record the loss\n loss_G_array[epoch] = sum(loss_G_list) / len(loss_G_list)\n loss_D_A_array[epoch] = sum(loss_D_A_list) / len(loss_D_A_list)\n loss_D_B_array[epoch] = sum(loss_D_B_list) / len(loss_D_B_list)\n np.savetxt('model/loss_G.txt', loss_G_array)\n np.savetxt('model/loss_D_A.txt', loss_D_A_array)\n np.savetxt('model/loss_D_b.txt', loss_D_B_array)\n\n\n if epoch % 10 == 9:\n torch.save(netG_A.state_dict(), 'model/netG_A' + str(epoch) + '.pth')\n torch.save(netG_B.state_dict(), 'model/netG_B' + str(epoch) + '.pth')\n torch.save(netD_A.state_dict(), 'model/netD_A' + str(epoch) + '.pth')\n torch.save(netD_B.state_dict(), 'model/netD_B' + str(epoch) + '.pth')\n\n end = time.strftime(\"%H:%M:%S\")\n print(\"current epoch :\", epoch, \" end time :\", end)\n print(\"G loss :\", loss_G_array[epoch], \"D_A loss :\", loss_D_A_array[epoch], \"D_B loss :\", loss_D_B_array[epoch])\n\n\n\nif __name__ == \"__main__\":\n main()\n\n", "id": "7934151", "language": "Python", "matching_score": 5.608248233795166, "max_stars_count": 6, "path": "train.py" }, { "content": "import networks\nimport argparse\nimport utils\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom datasets import ImageDataset\nfrom PIL import Image\n\n\n# Get the options for testing\ndef get_opt():\n parser = argparse.ArgumentParser()\n # Parameters for testing\n parser.add_argument('--batch_size', type=int, default=1, help='batch size of testing')\n parser.add_argument('--cuda', action='store_true', help='use GPU computation')\n parser.add_argument('--rootdir', type=str, default='datasets/NIRI_to_NIRII/', help='root directory of the dataset')\n parser.add_argument('--n_cpu', type=int, default=4, help='number of cpu threads to use during batch generation')\n parser.add_argument('--u_net', action='store_true', help='use U-net generator')\n\n # Model parameters\n parser.add_argument('--sizeh', type=int, default=512, help='size of the image')\n parser.add_argument('--sizew', type=int, default=640, help='size of the image')\n parser.add_argument('--input_nc', type=int, default=1, help='number of input channels')\n parser.add_argument('--output_nc', type=int, default=1, help='number of output channels')\n parser.add_argument('--ngf', type=int, default=64, help='number of filters in the generator')\n parser.add_argument('--ndf', type=int, default=64, help='number of filters in the discriminator')\n parser.add_argument('--dropout', type=bool, default=False, help='whether to use dropout')\n parser.add_argument('--n_res', type=int, default=9, help='number of resNet blocks')\n parser.add_argument('--net_GA', type=str, default='model/netG_A.pth', help='path of the parameters of the generator A')\n\n opt = parser.parse_args()\n return opt\n\n\ndef main():\n opt = get_opt()\n\n # Define the Generators, only G_A is used for testing\n netG_A = networks.Generator(opt.input_nc, opt.output_nc, opt.ngf, opt.n_res, opt.dropout)\n if opt.u_net:\n netG_A = networks.U_net(opt.input_nc, opt.output_nc, opt.ngf)\n\n if opt.cuda:\n netG_A.cuda()\n # Do not need to track the gradients during testing\n utils.set_requires_grad(netG_A, False)\n netG_A.eval()\n netG_A.load_state_dict(torch.load(opt.net_GA))\n\n # Load the data\n transform = transforms.Compose([transforms.Resize((opt.sizeh, opt.sizew)),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))])\n dataloader = DataLoader(ImageDataset(opt.rootdir, transform=transform, mode='val'), batch_size=opt.batch_size,\n shuffle=False, num_workers=opt.n_cpu)\n\n Tensor = torch.cuda.FloatTensor if opt.cuda else torch.Tensor\n input_A = Tensor(opt.batch_size, opt.input_nc, opt.size, opt.size)\n\n for i, batch in enumerate(dataloader):\n name, image = batch\n real_A = input_A.copy_(image)\n fake_B = netG_A(real_A)\n batch_size = len(name)\n # Save the generated images\n for j in range(batch_size):\n image_name = name[j].split('/')[-1]\n path = 'generated_image/' + image_name\n utils.save_image(fake_B[j, :, :, :], path)\n\nif __name__ == '__main__':\n main()\n", "id": "9120207", "language": "Python", "matching_score": 1.5173746347427368, "max_stars_count": 6, "path": "test.py" }, { "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n# Define the Resnet block\nclass ResnetBlock(nn.Module):\n def __init__(self, dim, use_dropout):\n super(ResnetBlock, self).__init__()\n layers = [nn.ReflectionPad2d(1),\n nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3),\n nn.InstanceNorm2d(dim),\n nn.ReLU(inplace=True)]\n if use_dropout:\n layers.append(nn.Dropout(0.5))\n\n layers += [nn.ReflectionPad2d(1),\n nn.Conv2d(in_channels=dim, out_channels=dim, kernel_size=3),\n nn.InstanceNorm2d(dim)]\n self.block = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.block(x) + x\n return out\n\n\n# The Generator class\n# The Generator consists of down-sampling layers, Resnet block, and up-sampling layers\nclass Generator(nn.Module):\n\n def __init__(self, in_channels, out_channels, out_features = 64, num_res_blocks = 9, use_dropout = False):\n '''\n Parameters:\n in_channels: number of channels in input images\n out_channels: number of channels in output images\n out_features: number of channels after the first convolutional layer\n num_res_blocks: number of ResNet blocks\n use_dropput: whether to use dropout in the resnet block\n '''\n super(Generator, self).__init__()\n\n layers = [\n nn.ReflectionPad2d(3),\n nn.Conv2d(in_channels = in_channels, out_channels = out_features, kernel_size = 7),\n nn.InstanceNorm2d(out_features),\n nn.ReLU(inplace = True)\n ]\n\n # Downsampling layers, there are 2 down-sampling layers by default\n num_dowsampling = 2 #number of downsampling layers\n curr_channels = out_features\n for i in range(num_dowsampling):\n next_channels = curr_channels * 2\n layers.append(nn.Conv2d(in_channels = curr_channels, out_channels = next_channels, kernel_size = 3, stride = 2, padding = 1))\n layers.append(nn.InstanceNorm2d(next_channels))\n layers.append(nn.ReLU(inplace=True))\n curr_channels = next_channels\n\n # Adding the resnet blocks\n for i in range(num_res_blocks):\n layers.append(ResnetBlock(curr_channels, use_dropout = use_dropout))\n\n # Upsampling layers, there are 2 up-sampling layers by default\n for i in range(num_dowsampling):\n next_channels = curr_channels // 2\n layers.append(nn.ConvTranspose2d(in_channels=curr_channels, out_channels=next_channels, kernel_size=3, stride=2, padding=1, output_padding=1))\n layers.append(nn.InstanceNorm2d(next_channels))\n layers.append(nn.ReLU(inplace=True))\n curr_channels = next_channels\n\n layers += [nn.ReflectionPad2d(3),\n nn.Conv2d(in_channels = out_features, out_channels = out_channels, kernel_size = 7),\n nn.Tanh()]\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)\n\n\n# The Discriminator class\nclass Discriminator(nn.Module):\n def __init__(self, in_channels, out_features = 64):\n '''\n Parameters\n in_channels: number of channels in input images\n out_features: number of channels after the first convolutional layer\n '''\n super(Discriminator, self).__init__()\n layers = []\n\n layers += [nn.Conv2d(in_channels=in_channels, out_channels=out_features, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2, inplace=True)]\n\n curr_channels = out_features\n num_layers = 3 # number of convolutional layers\n\n for i in range(1, num_layers):\n next_channels = curr_channels * 2\n layers += [nn.Conv2d(in_channels= curr_channels, out_channels=next_channels, kernel_size=4, stride=2, padding=1),\n nn.InstanceNorm2d(next_channels),\n nn.LeakyReLU(0.2, inplace=True)]\n curr_channels = next_channels\n\n next_channels = curr_channels * 2\n layers += [nn.Conv2d(in_channels=curr_channels, out_channels=next_channels, kernel_size=4, stride=1, padding=1),\n nn.InstanceNorm2d(next_channels),\n nn.LeakyReLU(0.2, inplace=True)]\n curr_channels = next_channels\n\n # output one channel prediction map\n layers += [nn.Conv2d(in_channels= curr_channels, out_channels=1, kernel_size=4, stride=1, padding=1)]\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.model(x)\n\n# The building block of U-Net\n# Two consecutive convolutional layers\n\nclass DoubleConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(DoubleConv, self).__init__()\n layers = [\n nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),\n nn.InstanceNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),\n nn.InstanceNorm2d(out_channels),\n nn.ReLU(inplace=True)\n ]\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layers(x)\n\n\n# The building block of U-Net\n# Down-sampling layers of U-Net\n\nclass Down(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(Down, self).__init__()\n layers = [\n nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels)\n ]\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layers(x)\n\n# The building block of U-Net\n# Up-sampling layers of U-Net\n\nclass Up(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(Up, self).__init__()\n self.conv1 = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)\n self.conv2 = DoubleConv(in_channels, out_channels)\n\n def forward(self, x1, x2):\n x1 = self.conv1(x1)\n x = torch.cat((x2, x1), dim=1)\n x = self.conv2(x)\n return x\n\nclass U_net(nn.Module):\n def __init__(self, in_channels, out_channels, out_features = 64):\n super(U_net, self).__init__()\n self.conv1 = DoubleConv(in_channels, out_features)\n self.conv2 = Down(out_features, out_features * 2)\n self.conv3 = Down(out_features * 2, out_features * 4)\n self.conv4 = Down(out_features * 4, out_features * 8)\n self.conv5 = Down(out_features * 8, out_features * 16)\n self.deconv4 = Up(out_features * 16, out_features * 8)\n self.deconv3 = Up(out_features * 8, out_features * 4)\n self.deconv2 = Up(out_features * 4, out_features * 2)\n self.deconv1 = Up(out_features * 2, out_features)\n layers= [\n nn.Conv2d(in_channels=out_features, out_channels=out_channels, kernel_size=3, padding=1),\n nn.Tanh()\n ]\n self.output = nn.Sequential(*layers)\n\n\n def forward(self, x):\n x1 = self.conv1(x)\n x2 = self.conv2(x1)\n x3 = self.conv3(x2)\n x4 = self.conv4(x3)\n x5 = self.conv5(x4)\n out = self.deconv4(x5, x4)\n out = self.deconv3(out, x3)\n out = self.deconv2(out, x2)\n out = self.deconv1(out, x1)\n out = self.output(out)\n return out\n\n\n\n", "id": "9090444", "language": "Python", "matching_score": 0.4259721338748932, "max_stars_count": 6, "path": "networks.py" }, { "content": "import torch\nfrom torch.nn import init\nimport torch.nn as nn\nimport random\nimport torchvision.transforms as transforms\n\n# Initialize the weight of the network\ndef init_weight(net, init_gain = 0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n init.normal_(m.weight.data, 0.0, init_gain)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1:\n init.normal_(m.weight.data, 1.0, init_gain)\n init.constant_(m.bias.data, 0.0)\n net.apply(init_func)\n\n\n# Define the lambda policy for the learning rate decay\nclass Lambda_rule():\n def __init__(self, start_epoch, initial_epoch, decay_epoch):\n self.start_epoch = start_epoch #index of the first epoch\n self.initial_epoch = initial_epoch #number of epochs with the initial learning rate\n self.decay_epoch = decay_epoch #number of epochs with learning rate decay\n\n def step(self, epoch):\n return 1.0 - max(0, epoch + self.start_epoch - self.initial_epoch) / float(self.decay_epoch + 1)\n\n# Set if parameters of a network requires gradient\ndef set_requires_grad(nets, requires_grad = False):\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n# GAN loss for the network\nclass GANLoss(nn.Module):\n def __init__(self, target_real_label = 1.0, target_fake_label = 0.0):\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n self.loss = nn.MSELoss()\n\n def get_target_tensor(self, prediction, target_is_real):\n # Return a tensor filled with ground-truth label, and has the same size as the prediction\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n\n return target_tensor.expand_as(prediction)\n\n def __call__(self, prediction, target_is_real):\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n return self.loss(prediction, target_tensor)\n\n# Store and load previously generated fake images\n# The implementation is in reference to https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix.git\nclass ImagePool():\n def __init__(self, pool_size = 50):\n self.pool_size = pool_size\n if self.pool_size > 0:\n # Create an empty pool\n self.num_imgs = 0\n self.images = []\n\n def query(self, images):\n # return an image from the image pool\n # If the pool size is 0, just return the input images\n if self.pool_size == 0:\n return images\n return_images = []\n for image in images:\n image = torch.unsqueeze(image.data, 0)\n if self.num_imgs < self.pool_size:\n # If the pool is not full, insert the current image\n self.num_imgs += 1\n self.images.append(image)\n return_images.append(image)\n else:\n p = random.uniform(0, 1)\n if p > 0.5:\n # return a random image, and insert current image in the pool\n random_id = random.randint(0, self.pool_size - 1)\n tmp = self.images[random_id].clone()\n self.images[random_id] = image\n return_images.append(tmp)\n else:\n # return current image\n return_images.append(image)\n return_images = torch.cat(return_images, 0)\n return return_images\n\n# Transform image tensor to png image\ndef save_image(tensor, name):\n unloader = transforms.ToPILImage()\n image = tensor.cpu().clone() # we clone the tensor to not do changes on it\n image = image * 0.5 + 0.5\n image = image.squeeze(0) # remove the fake batch dimension\n image = unloader(image)\n image.save(name, \"PNG\")", "id": "5528214", "language": "Python", "matching_score": 1.4397469758987427, "max_stars_count": 6, "path": "utils.py" }, { "content": "from torch.utils.data import Dataset\nfrom PIL import Image\nimport random\nimport glob\nimport os\n\n\n# Define the dataset class\n# In refrence to the following implementation: https://github.com/aitorzip/PyTorch-CycleGAN.git\nclass ImageDataset(Dataset):\n def __init__(self, root, transform = None, mode = 'train'):\n self.transform = transform\n self.files_A = sorted(glob.glob(os.path.join(root, '%s/A' % mode) + '/*.*'))\n self.mode = mode\n if self.mode == 'train':\n self.files_B = sorted(glob.glob(os.path.join(root, '%s/B' % mode) + '/*.*'))\n\n\n def __getitem__(self, index):\n item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))\n if self.mode == 'train':\n item_B = self.transform(Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)]))\n return {'A': item_A, 'B': item_B}\n else:\n return self.files_A[index % len(self.files_A)], item_A\n\n def __len__(self):\n if self.mode == 'train':\n return max(len(self.files_A), len(self.files_B))\n else:\n return len(self.files_A)\n\n\n# Define the paired dataset class\nclass PairedImage(Dataset):\n def __init__(self, root, transform = None, mode = 'train'):\n self.transform = transform\n self.files_A = sorted(glob.glob(os.path.join(root, '%s/A' % mode) + '/*.*'))\n self.mode = mode\n if self.mode == 'train':\n self.files_B = sorted(glob.glob(os.path.join(root, '%s/B' % mode) + '/*.*'))\n\n def __getitem__(self, index):\n item_A = self.transform(Image.open(self.files_A[index]))\n if self.mode == 'train':\n item_B = self.transform(Image.open(self.files_B[index]))\n return {'A': item_A, 'B': item_B}\n else:\n return self.files_A[index], item_A\n\n def __len__(self):\n return len(self.files_A)\n", "id": "3437739", "language": "Python", "matching_score": 0.3269580602645874, "max_stars_count": 6, "path": "datasets.py" } ]
1.439747
langdonholmes
[ { "content": "import os\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport pickle5 as pickle\nfrom typing import Optional, Dict, Any, List\n\nimport spacy\nfrom spacy import displacy\nfrom spacy.tokens import Doc\nfrom spacy.matcher import DependencyMatcher\n\nimport streamlit as st\nfrom spacy_streamlit.util import load_model, process_text, get_svg, LOGO\n\n# I should really split these functions into other files, but it is tricky\n# because I haven't managed my vars well...\n\n\ndef my_parser(doc: Doc, sent_index) -> Dict[str, Any]:\n words = [\n {\n \"text\": w.text,\n \"tag\": w.pos_,\n \"lemma\": None,\n }\n for w in doc\n ]\n arcs = []\n for i, (coltype, parent, child) in extractions:\n if i == sent_index:\n if child.i < parent.i:\n arcs.append(\n {\"start\": child.i, \"end\": parent.i, \"label\": coltype, \"dir\": \"left\"}\n )\n elif child.i > parent.i:\n arcs.append(\n {\"start\": parent.i, \"end\": child.i, \"label\": coltype, \"dir\": \"right\"}\n )\n return {\"words\": words, \"arcs\": arcs}\n\n\ndef visualize_parser(docs: List[spacy.tokens.Doc], *, title: Optional[str] = None, key: Optional[str] = None) -> None:\n st.header(title)\n cols = st.beta_columns(2)\n num_parses = cols[1].select_slider('Number of Sentences to Visualise:', options=[0, 1, 2, 3, 4], value=1)\n vismode = cols[0].radio('Which Dependencies to Show', ('All', 'Collocation Candidates'))\n if num_parses >= 1:\n for num, sent in enumerate(docs):\n if num < num_parses:\n allparsed = displacy.parse_deps(sent)\n colparsed = my_parser(sent, num)\n html = displacy.render((allparsed if vismode == 'All' else colparsed), style=\"dep\", manual=True)\n # Double newlines seem to mess with the rendering\n html = html.replace(\"\\n\\n\", \"\\n\")\n if len(docs) > 1:\n st.markdown(f\"> {sent.text}\")\n st.write(get_svg(html), unsafe_allow_html=True)\n\n# Cosmetics\n\nst.sidebar.markdown(LOGO, unsafe_allow_html=True)\nst.sidebar.title('Collocations')\nst.sidebar.markdown('Collocations are arbitrarily conventionalized combinations of words. '\n 'This page demos a new method for automatically extracting collocation candidates using a dependency parser and a part-of-speech tagger. '\n 'Here, candidates are extracted from a user-provided sample text. '\n 'The same method has been applied at scale to extract collocation candidates from a reference corpus.'\n 'Statistical measures of association strength (AMs) were calculated for all the collocation candidates extracted from the reference corpus.'\n 'Although there is no clear cutoff, the most strongly associated candidates can be considered collocations.'\n 'This method may be used to assess a language learner\\'s lexical proficiency.'\n ' -- <NAME>, California State University Long Beach')\n\n\n# Select a Model, Load its Configuration\n\nmodels = {\"en_core_web_sm\": 'Small', \"en_core_web_lg\": 'Large', \"en_core_web_trf\": 'Transformer-based'}\nmodel_names = models\nformat_func = str\nformat_func = lambda name: models.get(name, name)\nmodel_names = list(models.keys())\nspacy_model = st.sidebar.selectbox(\n \"Choose a Pre-trained NLP Model:\",\n model_names,\n index=0,\n format_func=format_func,\n)\nmodel_load_state = st.info(f\"Loading model '{spacy_model}'...\")\nnlp = load_model(spacy_model)\nmodel_load_state.empty()\n\nst.sidebar.subheader(\"spaCy pipeline:\")\ndesc = f\"\"\"<p style=\"font-size: 0.85em; line-height: 1.5\"><strong>{spacy_model}:</strong> <code>v{nlp.meta['version']}</code></p>\"\"\"\nst.sidebar.markdown(desc, unsafe_allow_html=True)\n\n# Initialize Matcher Generator\n\nmatcher = DependencyMatcher(nlp.vocab)\nwith open('matcherPatterns.pickle', 'rb') as fp:\n pattern_dict = pickle.load(fp)\nfor coltype, pattern in pattern_dict.items():\n matcher.add(coltype, pattern)\n\n# Text Box\ndefault_text = \"You can enter some sentences here to see their dependency relationships. In the sidebar, you can choose which spaCy pipeline to use. Hit ctrl + enter to give it a whirl (and check out how each parser handles the first phrase of this sentence).\"\nst.title(\"Collocation Extractor\")\ntext = st.text_area(\"Text to analyze\", default_text, height=200)\n\n# Process Text, then retokenize with collapsed punctuation, then split into sentence docs\ndoc = process_text(spacy_model, text)\n\ndef my_spans(doc):\n spans = []\n for word in doc[:-1]:\n if word.is_punct or not word.nbor(1).is_punct:\n continue\n start = word.i\n end = word.i + 1\n while end < len(doc) and doc[end].is_punct:\n end += 1\n span = doc[start:end]\n spans.append((span, word.tag_, word.lemma_, word.is_alpha, word.ent_type_))\n with doc.retokenize() as retokenizer:\n for span, tag, lemma, is_alpha, ent_type in spans:\n attrs = {\"tag\": tag, \"lemma\": lemma, \"is_alpha\": is_alpha, \"ent_type\": ent_type}\n retokenizer.merge(span, attrs=attrs)\n docs = [span.as_doc() for span in doc.sents]\n return docs\n\n\ndocs = my_spans(doc)\n\n# Run Collocation Candidate Extractor on the Punctuation-Collapsed Spans\n# Gets a Tuple( sentence index, ( coltype as string, parent as doc.token, child as doc.token))\n\nextractions = []\nfor i, sent in enumerate(docs):\n for match_id, match in matcher(sent):\n parent = sent[match[0]]\n child = sent[match[1]]\n coltype = nlp.vocab.strings[match_id]\n extractions.append((i, (coltype, parent, child)))\n\n# Get text version of candidates for dataframe manipulations\n\ntextextractions = []\nfor (sent, (coltype, parent, child)) in extractions:\n textextractions.append((coltype, parent.lemma_, parent.tag_, child.lemma_, child.tag_))\n\n# Run the Visualizer\n\nvisualize_parser(docs, title='Visualize the Dependencies')\n\nst.header(\"Collocation Candidate Statistics\")\n\[email protected]\ndef my_calc(textextractions):\n if os.path.isfile('OANCBigramStats.pickle'):\n df1 = pd.read_pickle('OANCBigramStats.pickle')\n num_candidates = 7389634\n\n common_cols = [\"Collocation Type\", \"Headword Lemma\", \"Headword Tag\", \"Dependent Word Lemma\", \"Dependent Word Tag\"]\n\n df2 = pd.DataFrame(textextractions, columns=common_cols)\n\n df = pd.merge(df1, df2, on=common_cols, how='inner')\n df = df.assign(r_1=lambda x: x[\"o_11\"] + x[\"o_21\"],\n c_1=lambda x: x['o_11'] + x['o_12'],\n e_11=lambda x: x['r_1'] * x['c_1'] / num_candidates,\n MI=lambda x: np.log2(x['o_11'] / x['e_11']))\n df = df.assign(T=lambda x: (x['o_11'] - x['e_11']) / np.sqrt(x['o_11']))\n\n output = df.loc[:, (\"Headword Lemma\", \"Dependent Word Lemma\", \"T\", \"MI\")]\n output['Reference Frequency'] = df['o_11']\n return(output)\n\n\noutput = my_calc(textextractions)\nst.table(output)\n", "id": "6478195", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "colextractor.py" } ]
0
manureta
[ { "content": "import tkinter\nm=tkinter.Tk()\n''' \nwidgets are added here \n'''\nm.mainloop() \n", "id": "6596238", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "tkinter.py" }, { "content": "from CortaCinta import CortaCinta\n\ncintas = [\n[],\n[1],\n[1, 1],\n[4, 5],\n[2, 2, 2, 2],\n[3, 2, 1, 1, 2],\n]\n\n\nn, m = 2, 3\n\nfor cinta in cintas:\n print (cinta, ':', CortaCinta(cinta, n, m))\n \n", "id": "11669483", "language": "Python", "matching_score": 0.985596776008606, "max_stars_count": 1, "path": "CortaCinta.tests.py" }, { "content": "\"\"\"\ntiyulo: CortaCinta.py\ndescripcion: algoritmo recursivo para cortar \nuna cinta (listado de un solo lado) \nen segmentos de entre menos y mas viviendas\nmenos y mas son los límites inferior y superior\ndel tamaño de un segmento respectivamente\nin: lista de números que representan la cantidad de viviendas de una secuencia de componentes\nout: lista con una secuencia de números que representan la cantidad de viviendas de los segmentos de la cinta\nescencialmente es una funcion de agregación de componentes, la secuencia de entrada puede no representar\ndirecciones de un solo lado, sino componentes en un recorrido\nautor: -h\nfecha: 2019-03-17 Do\n\"\"\"\ndef CortaCinta(cinta, menos, mas):\n if sum(cinta) < menos: \n # la cinta es muy corta\n # la cantidad de viviendas es menos que el límite inferior de un segmento\n # no es posible segmentar\n return None \n # caso base devuelve no factible\n if menos <= sum(cinta) <= mas: \n # la cinta puede ser 1 segmento\n # devuelve una lista unitaria con el numero de viviendas del componente\n return [sum(cinta)] \n # devuelve la longitud del segmento (componente)\n else:\n # se va a buscar dividir la cinta en los posibles head-tail\n \n i, s, heads = 0, 0, [] # init variables\n # qué representan estas variables?\n # i, s variables de iteracion\n # heads lista de heads cuyos tails son factibles\n # heads es un listado de posibles poenciales componentes \n # crear una lista de ramas ya exploradas\n while i < len(cinta) and s < menos: \n # get upto sgm len lower bound\n # aumentar i hasta el menor valor de tamaño del segmento\n # aumentar s en cantidad de viviendas o componentes de la cinta\n i, s = i + 1, s + cinta[i]\n while i < len(cinta) and menos <= s <= mas:\n # mientras factible\n # chequear que el candidato no haya sido ya explorado\n heads.append((i, [s])) \n # agrega candidatos a explorar\n i, s = i + 1, s + cinta[i]\n # llama a 1 función heurística para ordenar candidatos\n # call a function to sort heads with heuristic\n while heads:\n i, candidate = heads.pop()\n # extrae el indice y cantidad de tails a explorar\n tail = cinta[i:]\n sgms = CortaCinta(tail, menos, mas)\n # la lista de segmentos de una segmentacion exitosa de tail\n if sgms:\n # no es vacía, no es None, hay al menos un corte factible de la cinta\n candidate.extend(sgms)\n return candidate\n", "id": "6471034", "language": "Python", "matching_score": 2.42077374458313, "max_stars_count": 1, "path": "CortaCinta.py" }, { "content": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n#\n# corta una lista en segmentos dada una longitud deseada d\n#\n\n# in: lista con numeros, no se puede corta dentro de repeticion de numeros\n# ejemplo [ 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, ]\n# no se puede cortar [1, 2, 3,] y [3, 3, 4, 4, 5, 5, 5,]\n# y si [1, 2, 3, 3, 3] [4, 4, 5, 5, 5]\n# out: devuelve una lista (no exhaustiva) con posibles segmentos\n# ejemplo\n# [\n# [\n# [1, 2, 3, 3, 3, 4, 4],\n# [5, 5, 5,]\n# ],\n# [\n# [1, 2, 3, 3, 3]\n# [4, 4, 5, 5, 5,]\n# ]\n# ]\n# lista de soluciones, donde\n# cada solucion es una lista de\n# segmentos\n\ndef CortarEn(k, lista):\n if k == 0:\n return 0\n # safe\n if lista[k] != lista[k - 1]:\n # se puede cortar\n return [ k ]\n else:\n # busca cortar antes y después\n i = 1\n while 0 < k - i and lista[ k - i ] == lista[ k ]:\n i += 1\n a = k - i + 1\n j = 1\n while k + j < len(lista) and lista[ k ] == lista[ k + j ]:\n j += 1\n b = k + j\n if a == 0:\n return [ b ]\n if b >= len(lista) - 1:\n return [ a ]\n else:\n return [ a, b ]\n\nlista = [ 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, ]\nprint 'lista: ', lista\nprint 'cortes: incrementales de a 1'\nfor k in range(len(lista) - 1):\n print k + 1, CortarEn(k + 1, lista)\n\nprint '---------------------------------------'\n\nd = 4\ni = 1\nPuntosDeCorte = []\nwhile i*d < len(lista):\n PuntosDeCorte.append(i*d)\n i += 1\nprint 'PuntosDeCorte: ', PuntosDeCorte\ncortes = []\nfor k in PuntosDeCorte:\n corte = CortarEn(k, lista)\n cortes.append(corte)\nprint 'cortes posibles: ', cortes\nprint '---------------------------------------'\n\nsolucion = []\nsoluciones = [[]]\nfor corte in cortes:\n solucion1.append(corte[0])\n soluciones.\n if len(corte) == 2\n\nprint solucion\n\n\n#### HAY QUE MASTICAR ESTO UN POCO MAS\n# Armar arbol de búsqueda en foram de Grafo y explorar según heurística\n# f(head) + h(tail)\n", "id": "4369130", "language": "Python", "matching_score": 1.0520609617233276, "max_stars_count": 1, "path": "cortar_deseado.py" }, { "content": "/*\ntitulo: costos_de_manzanas_y_lados.py\ndescripción: los costos de cada segemntación dados lados y manzanas en segmentos\nautor: -h\nfecha: 2019-05-09 Ju\n\n*/\n#################################################################################\n#\n# definición de funcion de costo\n# y relativas a la calidad del segmento y la segmentación\n#\n# caso 1\ncantidad_de_viviendas_deseada_por_segmento = 40\nif len(sys.argv) > 4:\n cantidad_de_viviendas_deseada_por_segmento = int(sys.argv[4])\n\n\ndef costo(segmento):\n # segmento es una lista de manzanas\n carga_segmento = carga(segmento)\n return abs(carga_segmento - cantidad_de_viviendas_deseada_por_segmento)\n\n#####################################################################################\n\ndef costos_segmentos(segmentacion):\n # segmentacion es una lista de segmentos\n return map(costo, segmentacion)\n # la lista de costos de los segmentos\n\ndef costo_segmentacion(segmentacion):\n # segmentacion es una lista de segmentos\n# cantidad_de_segmentos = len(segmentacion)\n# if cantidad_de_segmentos <= 2:\n return sum(costos_segmentos(segmentacion))\n# # suma la aplicación de costo a todos los segmentos\n# else:\n# return sum(costos_segmentos(segmentacion)) + 1e6*cantidad_de_segmentos\n\n\n", "id": "4176117", "language": "Python", "matching_score": 0.5303532481193542, "max_stars_count": 1, "path": "segmentador/costos_de_segmentacion_de_manzanas_y_lados_completos.py" }, { "content": "\"\"\"\ntitulo: particiones.py\ndescripción: calcula todas las particiones posibles de los componentes\nauthor: -h\nfecha: 2019-06\n\"\"\"\n# ver https://stackoverflow.com/questions/19368375/set-partitions-in-python\n\ndef partition(collection):\n if len(collection) == 1:\n yield [ collection ]\n return\n\n first = collection[0]\n for smaller in partition(collection[1:]):\n # insert `first` in each of the subpartition's subsets\n for n, subset in enumerate(smaller):\n yield smaller[:n] + [[ first ] + subset] + smaller[n+1:]\n # put `first` in its own subset \n yield [ [ first ] ] + smaller\n\n\n#something = list(range(1,5))\n\n#for n, p in enumerate(partition(something), 1):\n# print(n, sorted(p))\n", "id": "6491139", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "sandbox/particiones.py" }, { "content": "# -*- coding: utf-8 -*-\n\n# Resource object code\n#\n# Created by: The Resource Compiler for PyQt5 (Qt v5.12.8)\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x00\\x10\\x20\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x30\\x00\\x00\\x00\\x2c\\x08\\x06\\x00\\x00\\x00\\x23\\x16\\x3b\\x67\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0b\\x13\\x00\\x00\\\n\\x0b\\x13\\x01\\x00\\x9a\\x9c\\x18\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe4\\x09\\x1a\\x17\\x39\\x1f\\xe9\\x50\\x13\\x04\\x00\\x00\\x0f\\xad\\x49\\x44\\\n\\x41\\x54\\x68\\xde\\xad\\x99\\x69\\x8c\\x5e\\xd7\\x59\\xc7\\x7f\\xcf\\x73\\xee\\\n\\xbd\\xef\\x3e\\xab\\xc7\\xf6\\x78\\x8b\\x27\\xb6\\x93\\x34\\x69\\x6a\\x92\\x36\\\n\\x0e\\x2d\\xa5\\x69\\x43\\xa0\\x6a\\xa5\\xb6\\x42\\x5d\\x40\\xaa\\x04\\x08\\x09\\\n\\x81\\x8a\\xfa\\xa1\\x2d\\x42\\xa8\\x2c\\x5f\\x5a\\x09\\xf1\\xa5\\xaa\\x54\\x2a\\\n\\x04\\x42\\x42\\x20\\x36\\xb1\\x09\\x3e\\xb4\\x82\\x92\\xaa\\x8a\\x1b\\xba\\x85\\\n\\x42\\xea\\x6c\\x76\\x62\\x3b\\xb1\\x67\\xc6\\xcb\\x78\\xc6\\x33\\xf3\\xce\\xbb\\\n\\xde\\x7b\\xcf\\x79\\xf8\\x70\\xee\\x38\\xe3\\xa4\\x69\\xdd\\x94\\x19\\x1d\\xdd\\\n\\xfb\\xce\\xdc\\xf3\\xde\\xf3\\xec\\xff\\xff\\xf3\\x88\\x99\\x19\\x3f\\xf6\\x4f\\\n\\xa8\\xae\\xf6\\x8a\\x7b\\x83\\xed\\xaf\\x37\\x40\\x35\\x7e\\x16\\x07\\x08\\xa0\\\n\\xd5\\xb3\\xf2\\xba\\xdf\\x9c\\xbc\\xbe\\x6d\\x16\\x0f\\x24\\xf6\\xf2\\xa1\\x43\\\n\\x80\\x62\\x13\\xbb\\xfe\\x1c\\xd6\\x5d\\x44\\x06\\x97\\x60\\xb8\\x86\\x95\\x43\\\n\\x04\\x8f\\x99\\x40\\x6d\\x06\\x6b\\xed\\xc5\\xcd\\x1d\\x87\\x5d\\x6f\\x82\\xa4\\\n\\x0e\\x26\\x60\\x0a\\x2a\\x95\\x20\\xf6\\x23\\x09\\x24\\x3f\\xb2\\x05\\x82\\xaf\\\n\\x5e\\xe6\\x21\\x94\\xd8\\xd2\\x49\\xc2\\xa5\\x93\\x48\\x6f\\x09\\x24\\x40\\xd2\\\n\\x86\\x5a\\x13\\x92\\x1a\\xb8\\x0c\\x24\\x79\\xd9\\x4a\\x3e\\x87\\x7c\\x08\\xe3\\\n\\x2d\\x18\\x6f\\x61\\x49\\x07\\x3d\\xfa\\x01\\xe4\\xc0\\xc3\\xf1\\x59\\x13\\x10\\\n\\xf9\\xff\\x16\\xc0\\x76\\x68\\x26\\x6a\\xdb\\x36\\x2f\\x10\\xce\\xfc\\x3d\\xb2\\\n\\xfe\\x34\\xd4\\x3a\\xd0\\x98\\x81\\xc6\\x14\\x92\\xb5\\x21\\x6b\\x46\\xcd\\xba\\\n\\x2c\\xba\\x8a\\xba\\x6a\\xbb\\x41\\x28\\xa1\\x1c\\x43\\xde\\x87\\x71\\x17\\x1b\\\n\\x5c\\x87\\xfe\\x0a\\x36\\xd8\\x44\\x8e\\xbe\\x0f\\x3d\\xfa\\x41\\xd0\\x5a\\xf5\\\n\\x3e\\xb9\\x25\\xf7\\xba\\x05\\x01\\x42\\xf5\\x25\\x25\\x0c\\xd7\\xf0\\x4f\\x7e\\\n\\x01\\xd9\\x3a\\x07\\xed\\x7d\\x30\\xb1\\x07\\x69\\xec\\x86\\xe6\\x0c\\x64\\x6d\\\n\\x48\\xea\\x88\\x1a\\x94\\xdd\\xe8\\x52\\xf5\\x19\\xa0\\x24\\xac\\x7c\\x07\\x36\\\n\\x2f\\x42\\xf0\\x98\\x82\\x34\\xf7\\x21\\xed\\x05\\xb0\\x0c\\x06\\xd7\\xb1\\xfe\\\n\\x15\\xd8\\x58\\x82\\xd1\\x16\\xbc\\xe5\\xb7\\xd0\\x5d\\xc7\\xab\\xf8\\xd0\\x1f\\\n\\xd7\\x02\\xdb\\x41\\xe9\\x09\\xcf\\xfc\\x05\\x2c\\x7d\\x15\\x3a\\xfb\\x91\\xc9\\\n\\x03\\x51\\x80\\xd6\\x1c\\xa2\\x46\\xd8\\xba\\x00\\xfd\\x2b\\xd8\\xe0\\x1a\\xf4\\\n\\x16\\xb1\\xee\\x12\\x36\\x5e\\x85\\x99\\x79\\x64\\x72\\x81\\x70\\xfa\\xcb\\x48\\\n\\xa8\\x5e\\x23\\x95\\x46\\xb3\\x1a\\xba\\xe7\\x38\\xb2\\xfb\\xcd\\xc8\\xd4\\xfd\\\n\\xd0\\x5f\\xc5\\x36\\x2e\\xc2\\xc6\\x8b\\xd8\\xec\\x7d\\xb8\\x9f\\xf8\\x38\\x68\\\n\\x5a\\x09\\x21\\xaf\\x47\\x00\\x8b\\xfe\\x9e\\xaf\\x13\\xbe\\xf9\\x7b\\x20\\x1e\\\n\\x66\\x16\\x90\\x89\\x83\\xd0\\xda\\x03\\xe3\\x65\\x6c\\xfd\\x2c\\xd6\\xdf\\x02\\\n\\xd7\\x06\\xcd\\xc0\\x0a\\x18\\x6f\\x60\\xfd\\x25\\xe8\\x5f\\x24\\x84\\x2e\\xda\\\n\\x6c\\x11\\x36\\x57\\x90\\xe0\\x21\\x48\\xe5\\x56\\x49\\x3c\\x94\\x05\\x30\\x8f\\\n\\xcc\\xdc\\x86\\x2c\\xbc\\x17\\x6d\\xdd\\x05\\x9b\\x17\\xb1\\xb5\\xb3\\xe0\\x41\\\n\\xdf\\xfe\\x87\\x90\\x4e\\x82\\xbc\\xb6\\x10\\xaf\\x21\\x40\\xa5\\xf9\\xde\\x12\\\n\\xe1\\xe4\\x6f\\xc3\\xd4\\x3c\\x32\\x73\\x14\\xa6\\xf6\\x23\\x59\\x07\\x7f\\xfe\\\n\\xef\\xb0\\x2b\\x67\\x90\\xce\\x1d\\xd0\\x39\\x8c\\x24\\x35\\xac\\x1c\\x43\\xb1\\\n\\x85\\x8d\\xd6\\x60\\x78\\x29\\x2e\\xe9\\x83\\x7a\\xc8\\xc7\\x58\\x69\\x31\\xa0\\\n\\x5d\\x0b\\x71\\x59\\xd4\\xac\\x2f\\xb0\\x90\\x43\\x39\\x04\\x2b\\xd0\\xdb\\xdf\\\n\\x85\\x1e\\xfe\\x08\\x74\\xaf\\x60\\xab\\xcf\\xc3\\xd6\\x2a\\xfa\\xd0\\x1f\\x41\\\n\\x63\\xfe\\x35\\x2d\\xf1\\xea\\x34\\x6a\\x06\\x12\\xb0\\xee\\x39\\xfc\\xe3\\x9f\\\n\\x46\\x67\\x8e\\x20\\x73\\x77\\x40\\xe7\\x00\\x52\\xaf\\xe1\\x97\\xbe\\x42\\x78\\\n\\xe1\\x2b\\x48\\x73\\x3f\\x84\\x02\\xc6\\xd7\\xb1\\x61\\x01\\xf9\\x26\\x56\\x6c\\\n\\x62\\xa3\\x75\\x6c\\xbc\\x81\\x58\\x1f\\xad\\x0b\\x86\\xc4\\xd7\\x88\\x07\\xcd\\\n\\x10\\x57\\x83\\xb4\\x11\\x0f\\xe4\\x02\\x12\\x72\\x4c\\x14\\xca\\x2d\\xc2\\x0b\\\n\\xff\\x01\\x7e\\x84\\xde\\xf9\\x1b\\x88\\x4b\\x31\\x7d\\x9e\\xf0\\xd8\\xa7\\xd0\\\n\\x77\\x7c\\x0e\\x9a\\xf3\\xdf\\x37\\xa8\\x93\\x9b\\xb5\\x6e\\x31\\xb7\\x8f\\x56\\\n\\xb1\\xc7\\x7f\\x1f\\x9d\\x3d\\x82\\xec\\xbe\\x1b\\x9a\\x93\\x84\\x97\\xfe\\x12\\\n\\x56\\xbe\\x0b\\x0b\\xef\\x86\\x7a\\x13\\x2b\\x36\\xb0\\xee\\x79\\xa4\\x7f\\x19\\\n\\x2c\\x87\\xa2\\x87\\x95\\x7d\\x28\\x86\\x48\\x99\\x83\\x0b\\x84\\xac\\x81\\xb8\\\n\\x14\\x9c\\x47\\xbc\\xc3\\xc4\\x30\\x0d\\x88\\x05\\xd0\\x04\\x53\\x45\\x54\\x11\\\n\\x2b\\x31\\x3f\\x04\\x55\\xc2\\x85\\xaf\\x23\\x7b\\x1f\\x40\\xa6\\x4f\\x20\\xa6\\\n\\x98\\x18\\xfe\\xe4\\xa7\\x70\\x0f\\xff\\x31\\x24\\x53\\xb1\\x18\\xee\\xc8\\x52\\\n\\x3b\\xc2\\xbc\\x92\\xcc\\x97\\x84\\xc7\\x7f\\x07\\xa6\\xf6\\x21\\x73\\x77\\xc2\\\n\\xc4\\x7e\\xc2\\xb9\\x7f\\x80\\xa5\\xa7\\xa1\\x50\\x78\\xf1\\x3f\\xd1\\xc3\\x6f\\\n\\xc7\\xca\\x3e\\x36\\x58\\x24\\x6c\\x9e\\x23\\x6c\\xbd\\x84\\x0d\\x2e\\x63\\xa3\\\n\\x0d\\x2c\\x1f\\x44\\x81\\xcc\\x62\\x5a\\xd7\\x2a\\xb7\\x9b\\x07\\x2b\\x90\\xbc\\\n\\x0f\\xc5\\x08\\x2b\\x47\\x20\\x1e\\xb3\\x02\\x23\\x80\\x1a\\x24\\x8a\\xce\\x2e\\\n\\xa0\\x9d\\xc3\\xd8\\xea\\xa9\\xe8\\xba\\xbb\\xee\\x44\\x3a\\xbb\\x09\\xff\\xf5\\\n\\xe9\\xf8\\x1d\\xaf\\xb0\\xc2\\x8e\\x18\\xb0\\x98\\xf2\\x9e\\xfe\\x73\\xb8\\xf6\\\n\\x5d\\x64\\xfe\\x7e\\x98\\xbb\\x93\\x70\\xf1\\x4b\\xb0\\xf8\\xdd\\x28\\xb9\\x1f\\\n\\x80\\xdf\\x84\\x54\\xb0\\xdd\\x77\\x13\\xce\\x9e\\x04\\x6f\\x10\\x0c\\x2b\\x03\\\n\\x08\\x08\\x0a\\x18\\x92\\x25\\xd0\\xaa\\xa1\\xed\\x36\\x61\\x6b\\x88\\x0d\\x73\\\n\\x24\\x35\\xc4\\x1c\\xd0\\xc0\\xd2\\x06\\x24\\x0e\\x91\\x40\\x08\\x43\\xa4\\xe8\\\n\\xa3\\x13\\x73\\xc8\\xfe\\x77\\x63\\xdf\\xf8\\x32\\x61\\x71\\x09\\x39\\xb8\\x80\\\n\\xfe\\xe4\\x87\\x90\\xac\\x8d\\x5d\\x39\\x05\\xd3\\x6f\\x42\\x8f\\x7f\\x0c\\x70\\\n\\xaf\\xb4\\x40\\xe5\\x3e\\x5b\\xcb\\xd8\\x85\\xaf\\xc2\\xec\\x02\\x4c\\x1d\\xc2\\\n\\x06\\x4b\\xb0\\xf4\\x2c\\x64\\x73\\x90\\x4c\\x83\\x34\\xb1\\xe0\\x60\\xb0\\x05\\\n\\xab\\x67\\x70\\x07\\xdf\\x8c\\x09\\x31\\x9b\\x08\\x60\\x42\\x28\\x2c\\x5a\\x2a\\\n\\x08\\xe6\\x41\\x9c\\x03\\x34\\x1a\\xbd\\x34\\xac\\x28\\x30\\x5f\\x62\\xbe\\x40\\\n\\xac\\xc0\\xac\\x40\\xc4\\xd0\\xc9\\x3d\\xd8\\xcc\\x09\\xfc\\x97\\xff\\x9a\\x70\\\n\\x69\\x19\\x50\\x6c\\xf1\\x22\\xe5\\xbf\\xff\\x19\\x36\\xde\\x80\\xe9\\xdb\\xb1\\\n\\xcb\\x27\\xb1\\x8d\\x73\\x3b\\xf0\\xd6\\x4d\\x2e\\x14\\xf0\\x4f\\xfd\\x09\\x32\\\n\\x75\\x00\\x99\\x38\\x88\\x34\\x67\\xe0\\xdc\\xa3\\x20\\x1d\\x48\\x26\\x41\\x1b\\\n\\x11\\xb3\\x94\\x01\\xf3\\x01\\xe9\\xae\\x40\\xe8\\xa1\\x7b\\x8e\\x81\\x73\\x11\\\n\\x9f\\xf9\\x98\\x7a\\x83\\x2f\\xb1\\x32\\xa0\\x41\\x08\\x54\\x10\\x81\\x28\\x50\\\n\\x34\\x74\\x19\\x83\\x37\\x14\\xb1\\x40\\x36\\x26\\xb1\\xfa\\x1d\\xd8\\x57\\xff\\\n\\x95\\xd0\\x1f\\x41\\x70\\x98\\xa6\\xa0\\x35\\x64\\x98\\x63\\xdf\\xfe\\x37\\x68\\\n\\xcd\\x22\\x93\\xb7\\x61\\xff\\xfb\\xf9\\xa8\\xb0\\x4a\\x88\\x97\\x2d\\xd0\\x5b\\\n\\x82\\xee\\x39\\x64\\x22\\x16\\xa8\\xb0\\xf6\\x14\\xe1\\xea\\x12\\xb8\\x06\\x22\\\n\\x59\\xcc\\xe1\\xde\\x43\\xe9\\x21\\xf7\\x58\\x59\\xc2\\x95\\x33\\xe8\\xe4\\x2c\\\n\\x6e\\x76\\x2f\\xb8\\x14\\xc9\\x12\\x44\\x5d\\x14\\x28\\x51\\xbc\\x73\\x98\\x64\\\n\\x90\\x08\\xa6\\x60\\x22\\x98\\x09\\x46\\x81\\x85\\x3e\\xe6\\x7b\\x58\\x92\\x20\\\n\\x6e\\x17\\xe1\\x5b\\x8f\\x62\\xe3\\x12\\xb0\\x98\\x95\\x5c\\x1d\\x6a\\x6d\\x48\\\n\\x3b\\x84\\xab\\xab\\xd8\\xe5\\x53\\xc8\\xc4\\x01\\x28\\x36\\x08\\xeb\\xcf\\x55\\\n\\x9a\\xd8\\x16\\xc0\\x02\\xe1\\x85\\x7f\\x41\\x5a\\x7b\\x62\\x75\\xad\\x4d\\x62\\\n\\x17\\xbf\\x15\\x0f\\x1d\\x0c\\xf3\\x1e\\xca\\x31\\x56\\x8c\\x09\\xbd\\x31\\xfe\\\n\\x52\\x8e\\x0d\\x03\\x21\\x0f\\xd8\\x8b\\x4f\\xc0\\x9e\\xbb\\xd1\\x4e\\x0b\\x6a\\\n\\x09\\x52\\x77\\x58\\xaa\\x90\\x24\\x88\\x26\\x90\\xb6\\x21\\x51\\x70\\x0e\\x53\\\n\\x81\\xd4\\x21\\x99\\x83\\x24\\x45\\x5b\\x53\\xb8\\xda\\x1e\\xec\\xc9\\xff\\x46\\\n\\x8a\\x80\\x85\\x80\\x18\\x88\\x29\\x22\\x69\\x04\\x84\\x59\\x1d\\xd2\\x06\\xf6\\\n\\xd2\\xd3\\xd0\\x9e\\x83\\xf6\\x5e\\xe4\\xb9\\xbf\\xd9\\xe9\\x42\\x21\\xa6\\xce\\\n\\x95\\xef\\x40\\x6b\\x16\\xea\\x53\\xa0\\x0e\\x5b\\x7d\\x2e\\x9a\\xb7\\x1c\\x62\\\n\\xe3\\x2e\\x36\\xde\\x42\\x8a\\x01\\x9a\\x94\\xc8\\x34\\xd8\\xb5\\x12\\x56\\x8b\\\n\\xe8\\x52\\x17\\xbe\\x83\\xee\\xbf\\x1f\\x69\\xd4\\x90\\x46\\x8a\\xab\\xa7\\x50\\\n\\x4b\\x91\\x2c\\x83\\xfa\\x34\\x24\\x0e\\x4b\\x14\\xc9\\xe2\\xe1\\xa5\\x96\\xa2\\\n\\x9d\\x39\\x90\\x59\\xc2\\x33\\x4f\\x61\\x95\\x5b\\x5a\\x08\\x04\\x02\\xb6\\x0d\\\n\\x83\\xc4\\xc5\\x0a\\xef\\x52\\x6c\\x79\\x11\\xc2\\x00\\x5a\\xbb\\xb0\\xcd\\xb3\\\n\\x31\\xd3\\x11\\x2a\\x0b\\xf4\\xaf\\xc6\\xcc\\x51\\x9f\\x82\\x5a\\x1b\\xeb\\x9f\\\n\\x83\\x8d\\xcb\\x48\\xd8\\x82\\x7c\\x0d\\x46\\xab\\x48\\xbe\\x8e\\x95\\x03\\x2c\\\n\\x94\\x88\\x33\\x64\\xce\\xc1\\x84\\x12\\xc6\\x05\\x36\\x1e\\x60\\x1b\\x17\\x91\\\n\\xf9\\x7b\\x91\\x76\\x0b\\x6d\\xd7\\xa1\\x9e\\xa1\\x9d\\x49\\xb4\\x39\\x87\\xa4\\\n\\x09\\x2e\\x4d\\x70\\xb5\\x0c\\x6d\\xd6\\x09\\x53\\x87\\x30\\x6b\\xc2\\xf9\\x33\\\n\\x58\\x69\\x58\\x30\\x2c\\xf8\\x88\\xd2\\x0d\\x0c\\x03\\x3c\\x62\\x1e\\x11\\x43\\\n\\x44\\x23\\x7a\\xef\\x5e\\x43\\x6a\\x93\\x90\\xb5\\x08\\x2b\\xa7\\x76\\xb8\\xd0\\\n\\xda\\x33\\x90\\x75\\x20\\x6b\\x23\\x49\\x0d\\x49\\xdb\\xe0\\x0a\\xac\\xec\\x62\\\n\\xe3\\x6b\\x30\\xba\\x8a\\xe5\\xeb\\x98\\xef\\x03\\x45\\x55\\xad\\xb7\\x63\\x53\\\n\\x10\\x1f\\xb0\\x8d\\x4b\\x08\\x25\\x3a\\x7d\\x18\\xda\\x1d\\xa4\\xd9\\x86\\xd6\\\n\\x3c\\x52\\x9f\\x06\\xd7\\x24\\x34\\x5b\\xd0\\x9a\\x80\\x3d\\xf7\\xe1\\xfa\\x63\\\n\\x6c\\xf9\\x1c\\x56\\x7a\\x08\\x01\\x09\\x01\\x11\\xc1\\x9c\\xe0\\x1c\\x88\\xf8\\\n\\x68\\x7d\\x2b\\x22\\x04\\x37\\x0f\\x66\\x88\\xcf\\x63\\x5c\\x64\\x6d\\xb8\\xf6\\\n\\x24\\x98\\x91\\x40\\xc0\\xaf\\x9e\\x42\\xb2\\x06\\x24\\x8d\\x88\\x00\\x5b\\x47\\\n\\x91\\x7b\\xde\\x05\\x67\\xbf\\x4e\\xb0\\x12\\x0a\\x8f\\x78\\x1f\\x2b\\xa6\\x5a\\\n\\x94\\x5b\\x0c\\x44\\x11\\x95\\x28\\x88\\xf7\\xc8\\xd5\\xd3\\x84\\x85\\xb7\\x83\\\n\\x5a\\x64\\x60\\xed\\xfd\\xd0\\xde\\x87\\x4c\\xee\\xc5\\x25\\x33\\x98\\x36\\xe1\\\n\\xc2\\x13\\x84\\xee\\x2a\\xe2\\xa3\\xd6\\x11\\xc3\\x2a\\xca\\x20\\x56\\xd5\\x2a\\\n\\x09\\x40\\x81\\xd9\\x18\\x8a\\x8a\\xed\\x85\\x22\\x82\\xc0\\xa4\\x0e\\x59\\x13\\\n\\xdb\\x38\\x0b\\x62\\x24\\x60\\xe8\\x68\\x0d\\x73\\x35\\x70\\x29\\xa8\\x62\\x2b\\\n\\x27\\xb1\\x95\\x27\\xb0\\x66\\x12\\x09\\x95\\x28\\x66\\x09\\x42\\x52\\xa5\\x46\\\n\\x83\\x3c\\x20\\xb9\\x8f\\x99\\x49\\x04\\x04\\x42\\x08\\xe8\\xe5\\xef\\x61\\x87\\\n\\x7f\\x0a\\xf1\\x63\\x64\\xe6\\x0d\\xd0\\xdc\\x87\\x1c\\xfe\\x45\\x42\\x7f\\x19\\\n\\x5e\\xfc\\x1a\\x41\\x14\\x49\\xab\\x18\\x31\\x8b\\xb5\\xa2\\xf4\\xe0\\x63\\x1a\\\n\\x8e\\x82\\x49\\xac\\xd2\\x7e\\x84\\x50\\x46\\x54\\x1c\\x0a\\x98\\x9a\\xaf\\x62\\\n\\xa2\\x86\\xf4\\x57\\x21\\x04\\x12\\x10\\x7c\\xb1\\x85\\xd6\\xdb\\x37\\x60\\xab\\\n\\xf5\\xcf\\x63\\xeb\\xab\\x98\\xba\\x97\\x31\\x8b\\xab\\x3c\\xd3\\x04\\x71\\x02\\\n\\x59\\x02\\xcd\\x46\\xdc\\x52\\x81\\x40\\x21\\x44\\xc6\\xb5\\xf6\\x12\\x72\\xc7\\\n\\x2f\\x40\\x36\\x41\\x30\\x43\\x92\\x46\\x74\\xa5\\xa9\\xbd\\x88\\x2f\\xc0\\x17\\\n\\x31\\x40\\x6b\\x25\\xe2\\x4b\\x28\\x02\\x56\\x94\\x48\\x19\\xb9\\xb6\\xfa\\x00\\\n\\x85\\x81\\x2f\\xa1\\xc8\\xc1\\x07\\xe4\\xe0\\xed\\xe8\\x9e\\xe3\\xd8\\xe6\\x72\\\n\\x64\\x79\\x36\\x02\\xd5\\x0a\\xcc\\xd9\\x8e\\x6a\\xbc\\x03\\x1b\\x89\\x24\\x31\\\n\\xbf\\x4b\\x65\\xe3\\x50\\xa0\\xc1\\x30\\x33\\x28\\x0d\\x92\\x82\\x90\\x38\\x44\\\n\\x04\\x9c\\x21\\x99\\xc3\\x3a\\x09\\x34\\x06\\x58\\xb9\\x08\\xb5\\x7b\\x11\\x15\\\n\\xc4\\x20\\x84\\x02\\x48\\x91\\x56\\x1b\\xc8\\x21\\x4d\\x20\\xe4\\x90\\x97\\xd1\\\n\\xfd\\x4a\\x8f\\x95\\x1e\\x29\\x02\\x38\\x10\\x4d\\x30\\xd7\\x42\\xb4\\x01\\x9d\\\n\\x39\\xdc\\x91\\x47\\x5e\\x26\\x43\\x80\\x59\\x00\\x2c\\x5a\\x40\\xb2\\x69\\xf0\\\n\\xbd\\x2a\\x60\\x2c\\x12\\x73\\x49\\x30\\x9f\\x80\\x6b\\xa0\\x64\\x31\\xd5\\x85\\\n\\xd1\\x8d\\xc0\\x92\\xb4\\x04\\x4b\\x08\\x45\\x89\\xba\\x00\\x22\\x84\\xa0\\x68\\\n\\x0a\\x52\\x2b\\x60\\x70\\x19\\x9a\\x77\\x21\\x96\\xc6\\xca\\x3d\\x1c\\x20\\x21\\\n\\x60\\x45\\x4c\\xbd\\x14\\x63\\x6c\\x38\\x42\\xc6\\x39\\x14\\x1e\\x7c\\xf4\\x75\\\n\\x2b\\x41\\x0a\\x8b\\xc5\\xb6\\xec\\x61\\x05\\xe0\\xcf\\xe1\\x4f\\x3f\\x4b\\xf2\\\n\\xd1\\x43\\xd1\\x65\\x83\\x8f\\xb0\\x1c\\x21\\xc1\\xc0\\xed\\x7e\\x13\\x61\\xf9\\\n\\x51\\x28\\x86\\x10\\x72\\x64\\xf2\\x8d\\x50\\x4b\\xa1\\x74\\x11\\x97\\xbb\\x3a\\\n\\x62\\x86\\x04\\x25\\x8c\\xfa\\x80\\x11\\x8a\\x00\\x79\\x8e\\x4b\\x15\\x5f\\x3a\\\n\\x9c\\x05\\xb4\\x2e\\x30\\xaa\\xc7\\x14\\xe9\\x07\\x48\\xf9\\x18\\x34\\x76\\xc1\\\n\\xb3\\x4f\\xc3\\xd5\\x6b\\x58\\x28\\xa1\\x9d\\x42\\xcb\\x63\\x56\\x11\\x9d\\xfe\\\n\\x08\\xc6\\x25\\xf8\\x80\\x85\\xaa\\x5d\\x63\\x0e\\xcc\\x61\\x21\\x8d\\x5e\\x20\\\n\\x25\\xb6\\xd1\\xc5\\x9f\\x79\\x0c\\xbd\\xed\\x04\\x94\\x63\\xa4\\x3e\\x5b\\x59\\\n\\x40\\x04\\xf6\\x9c\\x80\\xb3\\xff\\x1c\\x3b\\x05\\xc5\\x10\\x69\\x1c\\x42\\x67\\\n\\xf6\\x13\\x56\\xae\\xc4\\x3a\\xe7\\xa4\\x7a\\xd4\\x20\\x29\\xb1\\x32\\x47\\x2d\\\n\\x80\\x73\\x04\\x13\\xd4\\x59\\x2c\\x56\\x45\\x13\\xb1\\x16\\x36\\xec\\xc3\\xc6\\\n\\x19\\x68\\xd5\\x90\\xb9\\xe3\\xd8\\xf3\\xcf\\x62\\xbd\\x01\\xa2\\x86\\x6d\\x64\\\n\\x48\\x96\\x21\\x33\\x75\\x2c\\x49\\x62\\x06\\xcb\\x4b\\x18\\x7b\\xc4\\xc7\\x0c\\\n\\x67\\xa2\\x11\\xa9\\x26\\x8d\\xa8\\x63\\xf1\\x10\\x0c\\xfa\\xeb\\x91\\xbd\\x15\\\n\\x03\\x6c\\xe6\\x3e\\x84\\xed\\x18\\xe8\\xec\\x07\\x49\\xb1\\xc1\\x1a\\x32\\xea\\\n\\xc6\\xea\\xd9\\x39\\x88\\x5d\\x5d\\x46\\x7c\\x81\\x49\\x89\\x38\\x05\\x4b\\x50\\\n\\x97\\x10\\x7c\\x86\\x69\\x41\\x90\\x80\\x9a\\x20\\x89\\x43\\xa5\\x86\\x2f\\x6b\\\n\\xc8\\xe6\\x75\\x44\\x20\\x24\\x82\\x90\\x61\\x53\\x01\\x1b\\x74\\x09\\xc3\\x21\\\n\\xe2\\x81\\x34\\x41\\x5a\\x8d\\x18\\xa0\\xed\\x3a\\xda\\x32\\xc8\\x8d\\x50\\x8e\\\n\\xb0\\xdc\\xc7\\x0c\\xe4\\x1c\\x9a\\xd6\\x62\\x8b\\x45\\x32\\x84\\x31\\x26\\x0e\\\n\\xd2\\x0c\\xc6\\x3d\\xc8\\xbb\\xe8\\xfc\\x03\\x6c\\x83\\x77\\xc0\\xa1\\x47\\x3f\\\n\\x00\\xbd\\xab\\x30\\x58\\xc5\\xf2\\x1e\\x32\\x7b\\x2f\\x22\\x01\\x24\\x47\\xb4\\\n\\xc0\\xc4\\x13\\x1c\\xe0\\x1c\\xa4\\x8a\\xa9\\x20\\x89\\x22\\x4d\\x87\\xd6\\x1a\\\n\\x58\\x09\\x6c\\x6d\\x41\\x39\\x26\\xe4\\x23\\xa4\\x18\\x57\\x8d\\xac\\xc8\\x7d\\\n\\xc5\\x02\\x58\\xb4\\x9e\\x0d\\x7a\\xd0\\xef\\x23\\x5b\\x23\\x18\\x34\\xa0\\xdd\\\n\\x86\\x46\\x0a\\x59\\x82\\xa8\\x02\\x0e\\x21\\x03\\xd2\\x8a\\x92\\x56\\x45\\x62\\\n\\x66\\xbe\\x22\\x4d\\x43\\x98\\xbe\\x6b\\x5b\\x80\\xea\\x81\\xdb\\xdf\\x8b\\x0c\\\n\\x7b\\x58\\xf7\\x32\\x0c\\xaf\\xa3\\x73\\x0f\\xa3\\x7b\\x16\\xc0\\xe7\\x58\\x31\\\n\\x46\\xfc\\x18\\xb5\\x02\\xc3\\x23\\x4e\\x10\\xa7\\x88\\x4b\\xd0\\x46\\x13\\xc6\\\n\\x63\\x64\\x34\\x82\\x72\\x8c\\x2f\\x73\\xd4\\x57\\x30\\xd9\\x35\\x60\\xf3\\x3a\\\n\\xb8\\x10\\xc9\\xab\\xc6\\x4a\\x65\\x65\\x49\\x18\\x0d\\xb1\\xfe\\x80\\xd0\\x1b\\\n\\x20\\xd6\\x44\\x26\\x5a\\x51\\x88\\x5a\\x8a\\x66\\x31\\x75\\x1b\\x20\\xc1\\x47\\\n\\xee\\x70\\xf0\\x36\\x74\\x72\\x01\\x06\\xab\\xe8\\xf4\\x3d\\x31\\x95\\x56\\x25\\\n\\x35\\xb2\\x1b\\xa9\\xc1\\x3d\\xbf\\x0c\\x9b\\x8b\\xb0\\x75\\x19\\xcb\\x47\\xc8\\\n\\x81\\x77\\xc4\\x97\\x86\\x21\\xe6\\x87\\x58\\x88\\x41\\x0e\\x01\\xcd\\x12\\x92\\\n\\x2c\\xc3\\xb6\\x7a\\x58\\x51\\x10\\x42\\xc5\\xc8\\x42\\xc0\\xaa\\x5f\\xd2\\x89\\\n\\xd8\\xd0\\x4a\\x04\\xd2\\xf8\\x1a\\x4d\\x24\\x56\\xda\\xb2\\x84\\xe1\\x10\\x19\\\n\\x0f\\xf1\\x1b\\x5b\\x90\\x4e\\x21\\xed\\x1a\\xd2\\x70\\x58\\x26\\x20\\x86\\x58\\\n\\xd5\\xb1\\x08\\x63\\xe4\\xde\\x77\\xc2\\xe0\\x1a\\x74\\xaf\\x20\\x6f\\xf8\\xe8\\\n\\x0d\\x56\\xb6\\x83\\xd0\\x38\\xe4\\xd0\\x23\\x80\\x8b\\x0d\\xa6\\xde\\x0a\\xba\\\n\\xfb\\x3d\\xe8\\xc2\\xfd\\x50\\xe6\\xe0\\xfb\\x50\\x0e\\x20\\x0c\\x23\\x64\\x2e\\\n\\x4b\\xfc\\xa0\\x87\\xe5\\x3e\\x56\\x50\\x51\\x24\\x48\\x84\\xe0\\xa5\\x21\\x1e\\\n\\x24\\xcd\\xb0\\xcd\\x25\\x24\\x55\\x2c\\x11\\x2c\\x01\\x8f\\x80\\x13\\x24\\x05\\\n\\x73\\x12\\xd3\\xa5\\x29\\xd6\\x1d\\x21\\xfb\\x8e\\x40\\xa6\\xd1\\x75\\x19\\x43\\\n\\x39\\xc0\\xf2\\x3e\\x7a\\xfc\\x04\\x3a\\x7d\\x2c\\x16\\xb1\\xe6\\x6e\\x98\\xbc\\\n\\xfd\\x95\\x8c\\x6c\\xbb\\xf7\\xa9\\xc8\\x9b\\x3f\\x05\\xeb\\x17\\xb1\\xcd\\x45\\\n\\x6c\\xb8\\x8e\\xbb\\xfb\\x93\\xe8\\xb1\\xfb\\x2b\\x70\\x35\\x42\\xa7\\x26\\xb1\\\n\\xde\\x10\\xc6\\x23\\x7c\\x6e\\xb1\\x31\\x4d\\x42\\xb0\\x34\\x56\\xd7\\x32\\x16\\\n\\x45\\x11\\x08\\x3e\\x44\\x9f\\x77\\x1a\\x15\\xa6\\x82\\x4b\\x40\\x52\\x87\\x25\\\n\\x2e\\x16\\x33\\x95\\xe8\\x07\\x02\\xb6\\x7c\\x05\\x39\\x76\\x17\\x86\\x8f\\x29\\\n\\xbd\\xe8\\x23\\x47\\x8e\\xa2\\xf7\\xfe\\x3c\\xd6\\x5d\\x84\\x8d\\x25\\xe4\\xbe\\\n\\x4f\\x56\\x28\\x52\\x5f\\x49\\x29\\xe3\\xad\\x4c\\x1c\\x81\\x83\\x8f\\xc0\\xfa\\\n\\x79\\xd8\\x5c\\xc2\\xf2\\x12\\x77\\xfc\\x33\\xb8\\x07\\x7e\\x0d\\x3d\\xfc\\x20\\\n\\xe1\\xda\\x2a\\x36\\x1a\\x42\\x29\\xb8\\x90\\x62\\xd2\\x46\\x74\\x02\\x71\\x6d\\\n\\x4c\\x6a\\x90\\x24\\x11\\x4f\\x89\\xa0\\x12\\x62\\xf6\\xca\\x04\\x4d\\x04\\x49\\\n\\x05\\xcb\\x14\\xab\\x39\\x68\\x24\\x50\\x77\\x48\\x4d\\x23\\x34\\x31\\x43\\x7c\\\n\\x81\\x8c\\x0c\\xbd\\xfd\\xee\\x58\\x8f\\x8e\\xde\\x8d\\x7b\\xe8\\xd7\\xa1\\x7f\\\n\\x0d\\xd6\\xce\\xc3\\xa1\\x87\\x91\\xce\\xc1\\x1b\\xfe\\xcf\\xab\\xbb\\xa7\\x0e\\\n\\xc4\\xa1\\xf7\\xfc\\x52\\x24\\xe5\\xeb\\xe7\\xa0\\xbb\\x0c\\xa3\\x1e\\x3a\\xf3\\\n\\x20\\x76\\xe1\\x09\\x6c\\x3c\\x82\\x00\\x66\\x09\\xa6\\x4d\\xc4\\xb5\\x23\\x14\\\n\\x77\\x0d\\x48\\x5b\\x58\\x16\\x85\\x10\\x0c\\x0b\\x01\\xd2\\x0a\\x3b\\x69\\xd5\\\n\\x69\\x4f\\x04\\x49\\x35\\xae\\x2c\\x42\\x10\\x28\\x20\\xe4\\x58\\x59\\x60\\xcb\\\n\\xa7\\x49\\xde\\xf1\\x9b\\xe8\\x3b\\x3f\\x82\\x7b\\xf8\\x13\\x30\\xdc\\xc0\\xd6\\\n\\x5e\\x00\\xa9\\xa3\\x6f\\xfc\\xd5\\x9b\\x3a\\x12\\xdf\\x47\\x00\\xbb\\x21\\x88\\\n\\xbe\\xf5\\x33\\xb0\\xbe\\x8c\\xad\\x9d\\xc5\\xba\\xcb\\xe0\\xa6\\x90\\x3b\\x3f\\\n\\x5c\\x35\\x96\\xb6\\x97\\xc3\\x4c\\x10\\x33\\x50\\x87\\x24\\x09\\xea\\x32\\x42\\\n\\x05\\xb1\\x05\\xc0\\x55\\xfe\\x9f\\x41\\x48\\x88\\xc1\\xec\\x42\\x5c\\x8c\\xc1\\\n\\xfa\\xb1\\x55\\x93\\xaf\\x83\\xef\\x23\\x6f\\xf9\\x20\\x56\\xdb\\x87\\xde\\xf9\\\n\\x7e\\xd8\\xbc\\x84\\x5d\\x3b\\x0d\\x83\\x0d\\xf4\\xa7\\x3f\\x5b\\x6d\\xb6\\x1f\\\n\\xd4\\x5a\\xdc\\x31\\x25\\xa9\\xcf\\xc2\\x83\\xbf\\x8b\\x7d\\xfb\\xb3\\x91\\x23\\\n\\x88\\xe0\\x6e\\xfb\\x60\\xc4\\x3c\\xa7\\xfe\\x91\\x10\\x72\\x24\\x0c\\x51\\x09\\\n\\x31\\xcd\\x89\\x8b\\x1a\\x57\\x43\\xac\\xe2\\x08\\x49\\x0a\\x95\\xf6\\x49\\x04\\\n\\xc9\\x62\\xdb\\xf2\\xc6\\xc8\\x41\\x5f\\x6e\\x49\\xc9\\xd4\\x34\\x72\\xe2\\x57\\\n\\x70\\x87\\x7e\\x0e\\xdb\\xba\\x0a\\xdd\\x45\\x6c\\xf5\\x0c\\xf4\\xd6\\xd0\\x87\\\n\\x3e\\x1f\\x3b\\x23\\xc8\\x0f\\x6a\\x2d\\xbe\\xaa\\xef\\x8b\\x4e\\xbf\\x01\\xbb\\\n\\xef\\xe3\\xd8\\x93\\x5f\\xc4\\xcc\\xb0\\x50\\xa2\\x07\\xde\\x8f\\xb4\\x0f\\xa1\\\n\\x4f\\xfd\\x15\\xbe\\x7b\\x95\\x60\\x39\\x14\\x55\\x8e\\x34\\x30\\x72\\x74\\xe2\\\n\\x08\\xd6\\x3f\\x03\\x49\\x13\\x24\\x20\\x49\\x88\\xc1\\x5d\\x8b\\xbe\\x1e\\x19\\\n\\x56\\x75\\xdf\\x6c\\xa1\\xc7\\x1e\\xc6\\xdd\\xf5\\x61\\x4c\\xdb\\xd8\\xfa\\x05\\\n\\xd8\\xb8\\x88\\x5d\\x3f\\x07\\xa5\\xa1\\x3f\\xf3\\x45\\x48\\x3a\\xaf\\x39\\x2b\\\n\\xb8\\x85\\xf9\\x80\\xc7\\x2e\\x7f\\x03\\xfb\\xde\\x17\\x60\\xe6\\x08\\x32\\x73\\\n\\x04\\x3a\\xfb\\x62\\x3b\\x60\\xf1\\x4b\\x70\\xe1\\x6b\\x84\\xfe\\xf5\\xc8\\x23\\\n\\xb6\\x39\\x81\\x82\\xb4\\x26\\xe1\\x8e\\xf7\\xc1\\xd3\\xff\\x14\\x41\\x5c\\x91\\\n\\x47\\x24\\x69\\x91\\x20\\xc9\\xdc\\x31\\x64\\xfe\\x04\\xee\\xd0\\x23\\x90\\x4c\\\n\\x62\\x83\\x75\\xe8\\x5f\\xc5\\x36\\x2f\\xc2\\xfa\\x8b\\xd8\\xae\\x07\\x70\\xc7\\\n\\x3f\\xf6\\x43\\x67\\x04\\x3f\\x7c\\x42\\x53\\xf1\\x51\\xeb\\xbd\\x48\\xf8\\xc6\\\n\\x1f\\x20\\xed\\xaa\\xc1\\x34\\xb9\\x1f\\x69\\xec\\x42\\x1c\\x84\\x95\\x6f\\x62\\\n\\xd7\\xcf\\x60\\xfd\\xcb\\x91\\xe0\\xd4\\x77\\xa1\\x0b\\x3f\\x0b\\xcd\\x03\\x84\\\n\\xc5\\x47\\xa1\\x77\\x39\\x6a\\x3b\\x6d\\x41\\xe7\\x00\\x6e\\xea\\x18\\x34\\x0f\\\n\\x63\\xc5\\x00\\x86\\xdd\\x38\\x0c\\xdc\\xba\\x02\\xdd\\xa5\\x48\\x88\\x8e\\x7f\\\n\\x02\\x9d\\xbb\\xf7\\x96\\xa6\\x34\\xb7\\x38\\xe4\\xab\\xe6\\x05\\xe5\\x00\\xff\\\n\\x3f\\x9f\\x43\\x36\\x4e\\xc3\\xe4\\x21\\xa4\\x33\\x0f\\xad\\x39\\xa8\\xc7\\x4e\\\n\\x81\\x24\\xb5\\xa8\\x31\\x75\\x15\\xbb\\x0b\\x15\\x1d\\x8c\\x9c\\x36\\xce\\x10\\\n\\x46\\x50\\x0c\\xaa\\x41\\xdf\\x06\\x36\\x58\\x85\\xde\\x0a\\x8c\\xfb\\x70\\xec\\\n\\x23\\xe8\\xe1\\xf7\\x54\\x5a\\xbf\\xb5\\x39\\xd9\\x8f\\x30\\xa5\\xdc\\x31\\x52\\\n\\x5d\\x7b\\x86\\x70\\xea\\x4f\\xa1\\xdc\\x8c\\xd3\\x9a\\xd6\\x6c\\x6c\\xc9\\x64\\\n\\x2d\\x48\\x9b\\x71\\xc0\\xa7\\x49\\x7c\\xde\\x17\\x50\\x6e\\x1f\\xba\\x87\\xe5\\\n\\xbd\\x78\\xf8\\xd1\\x06\\x36\\xdc\\x44\\x1a\\x73\\xc8\\xb1\\x0f\\x21\\x7b\\x1f\\\n\\xac\\xf6\\xdc\\xda\\x6c\\xec\\x75\\x8e\\x59\\x77\\xd0\\xce\\x50\\x62\\xbd\\x0b\\\n\\x84\\xd3\\x7f\\x8b\\x5c\\x3f\\x0d\\x2e\\x81\\x7a\\x27\\x0a\\x91\\xd4\\xab\\xc3\\\n\\x50\\x0d\\x41\\xfa\\x71\\xac\\x5a\\x0e\\x22\\xe6\\x9a\\x38\\x84\\xee\\x7b\\x1b\\\n\\x32\\xff\\x36\\xd0\\x7a\\x65\\x2d\\xe5\\x55\\xad\\xfe\\x5b\\x11\\xa0\\x2c\\x4b\\\n\\x33\\x33\\x42\\x08\\x37\\x2d\\xef\\x3d\\xde\\xfb\\x9b\\xee\\x6f\\xfe\\x5b\\x49\\\n\\x08\\x1e\\xef\\x4b\\xbc\\x2f\\xf0\\x5b\\x4b\\xd8\\xfa\\x79\\x6c\\xfd\\x79\\xca\\\n\\xfe\\x2a\\x58\\x4e\\x28\\xc6\\xf1\\x70\\xb3\\xf7\\xc0\\xfc\\x5b\\x63\\x93\\x4b\\\n\\x1c\\xaa\\x09\\xce\\x39\\x54\\x05\\xd5\\x04\\x55\\x87\\xaa\\xe2\\x9c\\xfb\\xbe\\\n\\x2b\\x49\\x12\\x54\\xf5\\xc6\\x35\\xee\\x55\\x54\\x15\\x19\\x8f\\xc7\\xb6\\xf3\\\n\\x90\\x65\\x59\\xde\\x58\\x45\\x51\\xdc\\x74\\xdd\\x5e\\xde\\x7b\\x8a\\x0a\\x81\\\n\\x7a\\x5f\\x54\\xfb\\xe2\\x7e\\xb3\\x78\\x6f\\xb6\\x4d\\x11\\xab\\xa4\\x2f\\x16\\\n\\xa7\\x31\\xe2\\x48\\x5c\\x82\\x3a\\x21\\x49\\xb2\\x1b\\x07\\x7c\\xe5\\x4a\\xd3\\\n\\xf4\\xc6\\xda\\xfe\\xbc\\xf3\\xff\\xdb\\xc2\\xfd\\x1f\\xf9\\xc5\\x9c\\x0f\\xe2\\\n\\xc6\\xe5\\x9e\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x07\\\n\\x07\\x3b\\xe0\\xb3\\\n\\x00\\x70\\\n\\x00\\x6c\\x00\\x75\\x00\\x67\\x00\\x69\\x00\\x6e\\x00\\x73\\\n\\x00\\x0e\\\n\\x03\\x97\\x32\\xef\\\n\\x00\\x63\\\n\\x00\\x65\\x00\\x6e\\x00\\x73\\x00\\x6f\\x00\\x5f\\x00\\x73\\x00\\x65\\x00\\x67\\x00\\x6d\\x00\\x65\\x00\\x6e\\x00\\x74\\x00\\x6f\\\n\\x00\\x08\\\n\\x0a\\x61\\x5a\\xa7\\\n\\x00\\x69\\\n\\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\"\n\nqt_resource_struct_v1 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x14\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\\n\\x00\\x00\\x00\\x36\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\"\n\nqt_resource_struct_v2 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x14\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x36\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01\\x7b\\xe0\\x34\\x94\\x5a\\\n\"\n\nqt_version = [int(v) for v in QtCore.qVersion().split('.')]\nif qt_version < [5, 8, 0]:\n rcc_version = 1\n qt_resource_struct = qt_resource_struct_v1\nelse:\n rcc_version = 2\n qt_resource_struct = qt_resource_struct_v2\n\ndef qInitResources():\n QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n", "id": "1815446", "language": "Python", "matching_score": 0.006981034763157368, "max_stars_count": 0, "path": "resources.py" }, { "content": "#\n# optimización\n#\n\n# fin de definiciones\n\n\nimport psycopg2\nimport operator\nimport time\n\nconn = psycopg2.connect(\n database = \"comuna11\",\n user = \"segmentador\",\n password = \"<PASSWORD>\",\n host = \"localhost\",\n port = \"5432\")\n\n# obtener prov, depto, frac que estan en segmentacion.conteos\nwith open('radios.sql') as file:\n sql = file.read()\ncur = conn.cursor()\ncur.execute(sql)\nradios = cur.fetchall()\n\ndef sql_where_fr(frac, radio):\n return (\"\\nwhere frac::integer = \" + str(frac)\n + \"\\n and radio::integer = \" + str(radio))\n\ndef sql_where_PPDDDLLLMMM(frac, radio, cpte, side):\n if type(cpte) is int:\n mza = cpte\n elif type(cpte) is tuple:\n (mza, lado) = cpte\n where_mza = (\"\\nwhere (mza\" + side + \",9,2)::integer = \" + str(frac)\n + \"\\n and substr(mza\" + side + \",11,2)::integer = \" + str(radio)\n + \"\\n and substr(mza\" + side + \",13,3)::integer = \" + str(mza)\n )\n if type(cpte) is tuple:\n where_mza = (where_mza\n + \"\\n and lado\" + side + \"::integer = \" + str(lado))\n return where_mza\n\nfor frac, radio in radios:\n print\n print \"radio: \"\n print frac, radio\n cur = conn.cursor()\n \n sql = (\"select mza_comuna as mza, count(*) as conteo from comuna11\"\n + sql_where_fr(frac, radio)\n + \"\\ngroup by mza_comuna;\")\n cur.execute(sql)\n conteos_mzas = cur.fetchall()\n manzanas = [mza for mza, conteo in conteos_mzas]\n\n# print >> sys.stderr, \"conteos_mzas\"\n# print >> sys.stderr, conteos_mzas\n\n sql = (\"select mza_comuna as mza, lado, count(*) as conteo from comuna11\"\n + sql_where_fr(frac, radio)\n + \"\\ngroup by mza_comuna, lado;\")\n cur.execute(sql)\n result = cur.fetchall()\n conteos_lados = [((mza, lado), conteo) for mza, lado, conteo in result]\n lados = [(mza, lado) for mza, lado, conteo in result]\n\n# print >> sys.stderr, \"conteos_lados\"\n# print >> sys.stderr, conteos_lados\n\n\n sql = (\"select mza_comuna as mza, max(lado) from comuna11\"\n + sql_where_fr(frac, radio)\n + \"\\ngroup by mza_comuna;\")\n cur.execute(sql)\n mza_ultimo_lado = cur.fetchall()\n\n sql = (\"select mza, mza_ady from adyacencias_mzas\"\n + sql_where_fr(frac, radio)\n + \"\\ngroup by mza, mza_ady;\")\n cur.execute(sql)\n adyacencias_mzas_mzas = cur.fetchall()\n\n sql = (\"select mza, mza_ady, lado_ady from adyacencias_mzas\"\n + sql_where_fr(frac, radio)\n + \"\\n and mza != mza_ady\"\n + \";\")\n cur.execute(sql)\n result = cur.fetchall()\n adyacencias_mzas_lados = [(mza, (mza_ady, lado_ady)) for mza, mza_ady, lado_ady in result]\n\n sql = (\"select mza, lado, mza_ady from segmentacion.adyacencias\"\n + sql_where_pdfr(prov, depto, frac, radio)\n + \"\\n and mza != mza_ady\"\n + \";\")\n cur.execute(sql)\n result = cur.fetchall()\n adyacencias_lados_mzas= [((mza, lado), mza_ady) for mza, lado, mza_ady in result]\n\n sql = (\"select mza, lado, mza_ady, lado_ady from segmentacion.adyacencias\"\n + sql_where_pdfr(prov, depto, frac, radio)\n + \"\\n and mza != mza_ady\"\n + \";\")\n cur.execute(sql)\n result = cur.fetchall()\n lados_enfrentados = [((mza, lado), (mza_ady, lado_ady)) for mza, lado, mza_ady, lado_ady in result]\n\n lados_contiguos = []\n for mza, lado in lados:\n ultimo_lado = next(ultimo for mza, ultimo in mza_ultimo_lado)\n if lado == 1:\n lados_contiguos.append(((mza, lado),(mza, ultimo_lado)))\n lados_contiguos.append(((mza, lado),(mza, lado + 1)))\n elif lado == ultimo_lado:\n lados_contiguos.append(((mza, lado),(mza, lado - 1)))\n lados_contiguos.append(((mza, lado),(mza, 1)))\n else:\n lados_contiguos.append(((mza, lado),(mza, lado - 1)))\n lados_contiguos.append(((mza, lado),(mza, lado + 1)))\n\n conteos = conteos_mzas\n adyacencias = adyacencias_mzas_mzas\n\n conteos_excedidos = [(manzana, conteo) for (manzana, conteo) in conteos_mzas\n if conteo > cantidad_de_viviendas_maxima_deseada_por_segmento]\n mzas_excedidas = [mza for mza, conteo in conteos_excedidos]\n\n componentes = [mza for mza in manzanas if mza not in mzas_excedidas]\n conteos = [(mza, conteo) for (mza, conteo) in conteos if mza not in mzas_excedidas]\n adyacencias = [(mza, mza_ady) for (mza, mza_ady) in adyacencias\n if mza not in mzas_excedidas and mza_ady not in mzas_excedidas]\n # se eliminana manzanas excedidas\n\n componentes.extend([(mza, lado) for (mza, lado) in lados if mza in mzas_excedidas])\n conteos.extend([((mza, lado), conteo) for ((mza, lado), conteo) in conteos_lados\n if mza in mzas_excedidas])\n adyacencias.extend([((mza, lado), mza_ady) for (mza, lado), mza_ady in adyacencias_lados_mzas\n if mza in mzas_excedidas and mza_ady not in mzas_excedidas])\n adyacencias.extend([(mza, (mza_ady, lado_ady))\n for mza, (mza_ady, lado_ady) in adyacencias_mzas_lados\n if mza not in mzas_excedidas and mza_ady in mzas_excedidas])\n adyacencias.extend([((mza, lado), (mza_ady, lado_ady))\n for (mza, lado), (mza_ady, lado_ady) in lados_enfrentados\n if mza in mzas_excedidas and mza_ady in mzas_excedidas])\n adyacencias.extend([((mza, lado), (mza_ady, lado_ady))\n for (mza, lado), (mza_ady, lado_ady) in lados_contiguos])\n # se agregan los lados correspondientes a esas manzanas\n\n# print >> sys.stderr, \"componentes\"\n# print >> sys.stderr, componentes\n\n#---- hasta acá\n\n if adyacencias:\n start = time.time()\n# print adyacencias\n\n # crea los dictionary\n componentes_en_adyacencias = list(set([cpte for cpte, cpte_ady in adyacencias]))\n todos_los_componentes = list(set(componentes + componentes_en_adyacencias))\n\n # print \"no están en listado\", manzanas_sin_viviendas\n # hay que ponerle 0 viviendas\n viviendas = dict()\n for cpte in componentes:\n viviendas[cpte] = 0\n for cpte, conteo in conteos:\n viviendas[cpte] = int(conteo)\n\n componentes_no_en_adyacencias = list(set(todos_los_componentes) - set(componentes_en_adyacencias))\n # print \"no están en cobertura\", manzanas_no_en_adyacencias\n # hay que ponerle nula la lista de adyacencias\n adyacentes = dict()\n for cpte in todos_los_componentes:\n adyacentes[cpte] = list([])\n for cpte, adyacente in adyacencias:\n adyacentes[cpte] = adyacentes[cpte] + [adyacente]\n# for manzana in sorted(adyacentes.iterkeys()):\n# print manzana, adyacentes[manzana]\n\n # optimización\n\n ##############################\n # soluciones iniciales\n soluciones_iniciales = []\n # iniciando de un extremo de la red de segmentaciones: segmento único igual a todo el radio\n todos_juntos = [componentes]\n soluciones_iniciales.append(todos_juntos)\n # iniciando del otro extremo de la red de segmentaciones: un segmento por manzana\n todos_separados = [[cpte] for cpte in componentes]\n soluciones_iniciales.append(todos_separados)\n ##############################\n\n # TODO: cargar el segmento de la segmentación anterior sgm en segmentacio.conteos para el caso de lados\n\n costo_minimo = float('inf')\n for solucion in soluciones_iniciales:\n # algoritmo greedy\n vecinos = list(vecindario(solucion))\n costo_actual = costo_segmentacion(solucion)\n costos_vecinos = map(costo_segmentacion, vecinos)\n\n while min(costos_vecinos) < costo_actual: # se puede mejorar\n min_id, mejor_costo = min(enumerate(costos_vecinos), key=operator.itemgetter(1))\n solucion = vecinos[min_id] # greedy\n# print >> sys.stderr, mejor_costo\n vecinos = list(vecindario(solucion))\n costo_actual = mejor_costo\n costos_vecinos = map(costo_segmentacion, vecinos)\n if costo_actual < costo_minimo:\n costo_minimo = costo_actual\n mejor_solucion = solucion\n\n #muestra warnings\n if componentes_no_en_adyacencias:\n print \"Cuidado: \"\n print\n print \"no están en adyacencias, cobertura con errores, quizás?\", componentes_no_en_adyacencias\n print \"no se les asignó componentes adyacentes y quedaron aisladas\"\n print\n\n # muestra solución\n print \"---------\"\n print \"mínimo local\"\n print \"costo\", costo_minimo\n for s, segmento in enumerate(mejor_solucion):\n print [\"segmento\", s+1,\n \"carga\", carga(segmento),\n \"costo\", costo(segmento),\n \"componentes\", segmento]\n\n print \"deseada: %d, máxima: %d, mínima: %d\" % (cantidad_de_viviendas_deseada_por_segmento,\n cantidad_de_viviendas_maxima_deseada_por_segmento,\n cantidad_de_viviendas_minima_deseada_por_segmento)\n\n\n\n end = time.time()\n print str(end - start) + \" segundos\"\n\n # actualiza los valores de segmento en la tabla de polygons para representar graficamente\n segmentos = {}\n for s, segmento in enumerate(solucion):\n for cpte in segmento:\n segmentos[cpte] = s + 1\n\n # por ahora solo junin de los andes buscar la tabla usando una relacion prov, depto - aglomerado\n\n#------\n# update shapes.eAAAAa (usando lados)\n#------\n for cpte in componentes:\n sql = (\"update shapes.\" + _table + \"a\"\n + \" set segi = \" + str(segmentos[cpte])\n + sql_where_PPDDDLLLMMM(prov, depto, frac, radio, cpte, 'i')\n + \"\\n;\")\n cur.execute(sql)\n sql = (\"update shapes.\" + _table + \"a\"\n + \" set segd = \" + str(segmentos[cpte])\n + sql_where_PPDDDLLLMMM(prov, depto, frac, radio, cpte, 'd')\n + \"\\n;\")\n cur.execute(sql)\n conn.commit()\n# raw_input(\"Press Enter to continue...\")\n else:\n print \"sin adyacencias\"\n# else:\n# print \"radio Null\"\n\nconn.close()\n\n", "id": "9440949", "language": "Python", "matching_score": 3.472398519515991, "max_stars_count": 1, "path": "segmentador/optimzacion_con_consultas.py" }, { "content": "import psycopg2\nconn = psycopg2.connect(\n database = \"comuna11\",\n user = \"segmentador\",\n password = \"<PASSWORD>\",\n host = \"172.26.67.239\",\n port = \"5432\")\n\nwith open('listado.sql') as file:\n sql = file.read()\nprint sql\n\ncur = conn.cursor()\ncur.execute(sql)\n\nresult = cur.fetchall()\nprint result\n\n", "id": "5816949", "language": "Python", "matching_score": 0.34238865971565247, "max_stars_count": 1, "path": "comuna11/consultas.py" }, { "content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# este programa segmenta un circuito (manzana, o secuencia de lados) con el recorrido ordenado\n# sacado de una tabla sql\n\n# hecho para ejemplo comuna11,\n# TODO: generalizar\n\nimport sys\nimport psycopg2\n\nTablaListado = 'listado'\n# cotas de semgmentos\nMinimo, Maximo = 17, 23\nif len(sys.argv) > 3:\n Minimo, Maximo = int(sys.argv[1]), int(sys.argv[2])\n\nSQLConnect = psycopg2.connect(\n database = comuna11,\n user = segmentador,\n password = password,\n host = localhost\n)\n\n\"\"\"\nalgunos select para ir probando...\n\nselect comunas, frac_comun, radio_comu, mza_comuna, count(*)\nfrom listado\n-- factible TODO: revisar para caso deferente de Minimo, Maximo = 17, 23\ngroup by comunas, frac_comun, radio_comu, mza_comuna\nhaving count(*) >= 17\nand count(*) not between 24 and 33\norder by comunas, frac_comun, radio_comu, mza_comuna\n;\n\n# TODO: son tipo char, no integer, hacer ::integer de todos los campos pertinentes\n# comunas, frac_comun, radio_comu, mza_comuna, clado, hn (con CASE...), hp tabién CASE x PB -> 0\n\nwith posibles as (\n select comunas, frac_comun, radio_comu, mza_comuna\n from listado\n -- factible TODO: revisar para caso deferente de Minimo, Maximo = 17, 23\n group by comunas, frac_comun, radio_comu, mza_comuna\n having count(*) >= 17\n and count(*) not between 24 and 33\n order by comunas, frac_comun, radio_comu::integer, mza_comuna::integer\n )\nselect comunas, frac_comun, radio_comu, mza_comuna, clado, cnombre, hn, hp, hd, row_number() \n over (\n partition by comunas, frac_comun, radio_comu::integer, mza_comuna::integer\n order by comunas, frac_comun, radio_comu::integer, mza_comuna::integer, \n clado, \n case \n when hn::integer % 2 = 1 then hn::integer\n else -hn::integer \n end, \n cnombre, \n -- cuerpo, !!!! FALTA ESTE DATO Y ES IMPRESCINDIBLE EN TORRES Y CONJUNTOS DE MONOBLOCKS\n hp\n )\nfrom listado\nwhere (comunas, frac_comun, radio_comu, mza_comuna) in (select * from posibles)\n;\n\n\"\"\"\n\n\"\"\"\n--- usnado windows para ayudar a calcular cortes\ncreate or replace view segmentando_facil as \nwith separados as (\n SELECT frac_comun, radio_comu::integer, mza_comuna::integer, clado, hn, hp, hd,\n row_number() OVER w as row, rank() OVER w as rank\n FROM listado\n WINDOW w AS (PARTITION BY comunas, frac_comun, radio_comu::integer, mza_comuna::integer\n ORDER BY comunas, frac_comun, radio_comu::integer, mza_comuna::integer, clado,\n case \n when hn::integer % 2 = 1 then hn::integer\n else -hn::integer \n end, \n hp)\n ),\n sumados as (\n select frac_comun, radio_comu::integer, mza_comuna::integer, count(*) as cant\n from listado\n group by comunas, frac_comun, radio_comu::integer, mza_comuna::integer\n ),\n parejo as (\n select ceil(cant/20)*20 as redondo\n from sumados\n )\nselect frac_comun, radio_comu, mza_comuna, clado, hn, hp, ceil(rank/20) + 1 as segmento_manzana\nfrom separados\nleft join sumados\nusing(frac_comun, radio_comu, mza_comuna)\norder by frac_comun, radio_comu::integer, mza_comuna::integer, clado,\n case \n when hn::integer % 2 = 1 then hn::integer\n else -hn::integer \n end, \n hp\n;\n\nalter table listado add column segmento_en_manzana integer;\nupdate listado l\nset segmento_en_manzana = segmento_manzana\nfrom segmentando_facil f\nwhere (l.frac_comun, l.radio_comu::integer, l.mza_comuna::integer, l.clado, l.hn, case when l.hp is Null then '' else l.hp end) = \n (f.frac_comun, f.radio_comu, f.mza_comuna, f.clado, f.hn, case when f.hp is Null then '' else f.hp end)\n;\n\nselect frac_comun, radio_comu, mza_comuna, clado, hn, hp, segmento_en_manzana \nfrom listado\norder by frac_comun, radio_comu, mza_comuna, clado,\n case \n when hn::integer % 2 = 1 then hn::integer\n else -hn::integer \n end, \n hp\n;\n\n\"\"\"\n", "id": "3632349", "language": "Python", "matching_score": 1.7198201417922974, "max_stars_count": 1, "path": "segmentar listado.py" }, { "content": "#!/usr/bin/python\n\n# este programa segmenta un circuito (manzana, o secuencia de lados) con el recorrido ordenado\n\n# lee el cvs con el recorrido\nimport csv\nimport sys\n\nlistados = sys.argv[1]\nlistado = []\nwith open( listados, \"rb\" ) as csvFile:\n reader = csv.DictReader( csvFile )\n for line in reader:\n listado.append(line)\n\ncircuitList, circuits = [], []\ndepto, frac, radio = listado[0]['depto'], listado[0]['frac'], listado[0]['radio']\nmanzana = listado[0]['mnza']\nfor line in listado:\n if (line['depto'] == depto and line['frac'] == frac and line['radio'] == radio\n and line[\"mnza\"] == manzana):\n circuitList.append(int(line[\"count\"]))\n else:\n circuits.append({'blocks':[manzana], 'circuitList':circuitList,\n 'depto':depto, 'frac':frac, 'radio':radio})\n circuitList = [int(line[\"count\"])]\n depto, frac, radio = line['depto'], line['frac'], line['radio']\n manzana = line[\"mnza\"]\ncircuits.append({'blocks':[manzana], 'circuitList':circuitList,\n 'depto':depto, 'frac':frac, 'radio':radio})\n\ndepto, frac, radio = circuits[0]['depto'], circuits[0]['frac'], circuits[0]['radio']\nCircuitosDelRadio = []\nCircuitosPorRadio = []\nfor circuit in circuits:\n if (depto, frac, radio) == (circuit['depto'], circuit['frac'], circuit['radio']):\n DatosRadio = {'depto':depto, 'frac':frac, 'radio':radio}\n CircuitosDelRadio.append(circuit['circuitList'])\n else:\n depto, frac, radio = circuit['depto'], circuit['frac'], circuit['radio']\n DatosRadio['circuitos'] = CircuitosDelRadio\n CircuitosPorRadio.append(CircuitosDelRadio)\n CircuitosDelRadio = []\n# print DatosRadio\nif ((circuits[-1]['depto'], circuits[-1]['frac'], circuits[-1]['radio']) == (depto, frac, radio)):\n # la ultima linea es igual, no fue apendeada la ultima porque no cambia\n DatosRadio['circuitos'] = CircuitosDelRadio\n CircuitosPorRadio.append(CircuitosDelRadio)\n \n\n\n#print \"cantidad de manzanas: \", len(circuits)\n#print\n#print \"cantidad de paquetes (pisos) indivisibles por manzana\"\ncircuitLists = []\nfor circuit in circuits:\n circuitLists.append(circuit['circuitList'])\n#print map(len, circuitLists), sum(map(len, circuitLists))\n#print \"cantidad de paquetes (pisos) indivisibles: \", len(listado)\n#print\n#print \"cantidad de viviendas por manzana\"\n#print map(sum, circuitLists), sum(map(sum, circuitLists))\n#print \"cantidad de viviendas: \", sum([int(line[\"count\"]) for line in listado])\n#print\nprint [circuit['blocks'] for circuit in circuits]\n\nfrom segMakerDynamic import segMaker, NoFactiblePartirBloque\n# cotas de semgmentos\nn, m = 17, 23\nif len(sys.argv) > 3:\n n, m = int(sys.argv[2]), int(sys.argv[3])\n\nSegmentacionPorRadio = {}\nSegmentacionDDDFFRR = {}\nfor circuit in circuits:\n if circuit['depto'] not in SegmentacionDDDFFRR:\n SegmentacionDDDFFRR[circuit['depto']] = {}\n if circuit['frac'] not in SegmentacionDDDFFRR[circuit['depto']]:\n SegmentacionDDDFFRR[circuit['depto']][circuit['frac']] = {}\n if circuit['radio'] not in SegmentacionDDDFFRR[circuit['depto']][circuit['frac']]:\n SegmentacionDDDFFRR[circuit['depto']][circuit['frac']][circuit['radio']] = True\n \n if NoFactiblePartirBloque(circuit['circuitList'],n,m):\n print NoFactiblePartirBloque(circuit['circuitList'],n,m)\n SegmentacionPorRadio[circuit['depto']+'.'+circuit['frac']+'.'+circuit['radio']] = 'no se puede segmentar'\n SegmentacionDDDFFRR[circuit['depto']][circuit['frac']][circuit['radio']] = None \n\n segmtsCircuit = segMaker(circuit['circuitList'],n,m)\n if circuit['depto']+'.'+circuit['frac']+'.'+circuit['radio'] not in SegmentacionPorRadio:\n SegmentacionPorRadio[circuit['depto']+'.'+circuit['frac']+'.'+circuit['radio']] = 'pudo segmentar'\n if segmtsCircuit:\n circuit['segmtsCircuit'] = segmtsCircuit\n else: \n circuit['segmtsCircuit'] = '* ' + str(sum(circuit['circuitList'])) + '*'\n if SegmentacionPorRadio[circuit['depto']+'.'+circuit['frac']+'.'+circuit['radio']] != 'no se puede segmentar':\n SegmentacionPorRadio[circuit['depto']+'.'+circuit['frac']+'.'+circuit['radio']] = 'no pudo segmentar'\n SegmentacionDDDFFRR[circuit['depto']][circuit['frac']][circuit['radio']] = False\n\nsegmntsList = [circuit['segmtsCircuit'] for circuit in circuits]\n#print segmntsList \n#print [caso for caso in SegmentacionPorRadio]\nPudo = [caso for caso in SegmentacionPorRadio if SegmentacionPorRadio[caso] == 'pudo segmentar']\nNoPudo = [caso for caso in SegmentacionPorRadio if SegmentacionPorRadio[caso] == 'no pudo segmentar']\nNoSePuede = [caso for caso in SegmentacionPorRadio if SegmentacionPorRadio[caso] == 'no se puede segmentar']\nprint '-----------------------------------------------------------------'\nprint 'No es posible segmentar ' + str(len(NoSePuede)) + ' radios'\nprint 'No pudo segmentar ' + str(len(NoPudo)) + ' radios'\nprint 'Se segmentaron ' + str(len(Pudo)) + ' radios'\nprint '-----------------------------------------------------------------'\nprint\n\nRadiosSegmentados = {}\nRadiosNoSegmentados = {}\nRadiosNoSegmentables = {}\nfor Comuna in SegmentacionDDDFFRR:\n RadiosSegmentados[Comuna] = 0\n RadiosNoSegmentados[Comuna] = 0\n RadiosNoSegmentables[Comuna] = 0\n for Frac in SegmentacionDDDFFRR[Comuna]:\n for Radio in SegmentacionDDDFFRR[Comuna][Frac]:\n if SegmentacionDDDFFRR[Comuna][Frac][Radio]:\n RadiosSegmentados[Comuna] += 1\n elif SegmentacionDDDFFRR[Comuna][Frac][Radio] is None:\n RadiosNoSegmentables[Comuna] += 1\n else:\n RadiosNoSegmentados[Comuna] += 1\n\nprint 'Discriminados por Comuna'\nprint 'Segmentados'\nprint RadiosSegmentados\nprint 'No segmentados'\nprint RadiosNoSegmentados\nprint 'No segmentables'\nprint RadiosNoSegmentables\n\nprint '-----------------------------------------------------------------'\n\n\n\nprint 'Cantidad de manzanas: ' + str(len(segmntsList))\nprint ' segmentadas: ' + str(len([sgmnts for sgmnts in segmntsList if type(sgmnts) is list]))\nprint ' con problemas: ' + str(len([sgmnts for sgmnts in segmntsList if type(sgmnts) is not list]))\n\nprint '-----------------------------------------------------------------'\nprint\n\n\nradiosCircuits = []\nfor circuit in circuits:\n depto, frac, radio = circuit['depto'], circuit['frac'], circuit['radio']\n\n\n#for i, load in enumerate(circuit['segmtsCircuit']):\n# print i, load\n\nprint '-----------------------------------------------------------------'\nprint \n\nj = 0\nline = listado[j]\nfor circuit in circuits:\n print\n print 'R3 ->', ' depto: ', circuit['depto'], ' fraccion: ', circuit['frac'], ' radio: ', circuit['radio']\n print\n manzana = line['mnza']\n if circuit['segmtsCircuit']:\n# print circuit['blocks'], circuit['segmtsCircuit']\n for i, load in enumerate(circuit['segmtsCircuit']):\n print 'segmento: ', i+1, 'manzana: ', manzana, ' cantidad de viviendas: ', load\n direccion = listado[j]\n print 'desde: ', direccion['nombre'], ' ', direccion['numero'], ' ', direccion['cuerpo'], ' ', direccion['piso']\n s = 0\n while j < len(listado)-1 and s < load:\n s += int(listado[j]['count'])\n j += 1\n direccion = listado[j-1]\n print 'hasta: ', direccion['nombre'], ' ', direccion['numero'], ' ', direccion['cuerpo'], ' ', direccion['piso']\n else:\n while j < len(listado) and listado[j] == manzana:\n j += 1\n if circuit['segmtsCircuit'] is tuple:\n print \"No se puede segmentar con metodo segMaker entre \" + str(n) + \" y \" + str(m)\n manzana = listado[j-1]['mnza']\n\n if j < len(listado):\n line = listado[j]\n\n", "id": "6145499", "language": "Python", "matching_score": 0.6186772584915161, "max_stars_count": 1, "path": "segmentCircuitDynamic.py" }, { "content": "# definicón del vecindario de una segmentacíon para definir y recorrer la red de segementaciones\n# vecindario devuelve array de vecinos usando extraer y transferir\ndef vecindario(segmentacion):\n # devuelve array de vecinos\n vecindario = []\n # extracciones\n for segmento in segmentacion:\n sgms = list(segmentacion)\n sgms.remove(segmento) # el resto no considerado de la segmentación\n if len(segmento) == 2: # segmento binario se parte, no se analizan los 2 casos, ya que son el mismo\n este = segmento[0]; ese = segmento[1]\n vecino = [[este], [ese]] + sgms\n vecindario.append(vecino)\n elif len(segmento) > 2:\n for este in segmento:\n vecino = [[este]] + extraer(este, segmento) + sgms\n vecindario.append(vecino)\n # transferencias\n if len(segmentacion) >= 2: # se puede hacer una transferencia\n for i, este in enumerate(segmentacion):\n esa = list(segmentacion) # copia para preservar la original\n esa.remove(este) # elimino de la copia de la segmentacion a este segmento\n for j, ese in enumerate(esa): # busco otro segmento\n aquella = list(esa) # copia de para eliminar a ese\n aquella.remove(ese) # copia de segmentacion sin este ni ese\n if len(este) == 1 and len(ese) == 1 and i < j:\n pass # si no se repiten cuando este y ese se permuten\n else:\n for cada in este:\n transferencia = transferir(cada, este, ese)\n if transferencia: # se pudo hacer\n vecino = transferencia + aquella\n vecindario.append(vecino)\n # fusión de 2 segmentos evitando repeticiones\n #(cuando alguno es una solo elemento la fusion es considerada en la transferencia)\n if len(este) > 1 and len(ese) > 1 and conectados(este + ese):\n vecino = [este + ese] + aquella\n vecindario.append(vecino) # analizar fusiones\n return vecindario\n# no devuelve repeticiones\n", "id": "2626442", "language": "Python", "matching_score": 1.8915486335754395, "max_stars_count": 1, "path": "segmentador/definicion_de_vecindario_de_segmentaciones.py" }, { "content": "/*\ntitulo: calcular_soluciones_adyacentes.py\ndescripción: define las funciones de operacion entre soluciones\npara calcular las segmentaciones/soluciones adyacentes\nautor: -h\nfecha: 2019-05-09 Ju\n\n#definición de funciones de adyacencia y operaciones sobre manzanas\n\n*/\n\n\ndef son_adyacentes(este, aquel):\n return aquel in adyacentes[este]\n\n# calcula el componente conexo que contiene a este,\n# para calcular las componentes conexas o contiguas luego de una extracción\ndef clausura_conexa(este, esos):\n # se puede ir de este a ese para todo ese en esos\n if este not in esos:\n return [] # caso seguro\n else:\n clausura = [este] # al menos contiene a este\n i = 0\n while i < len(clausura): # i es el puntero lo que que falta expandir\n # i se incrementa de a 1 expandiendo de a 1 las adyacencias\n # hasta que la variable clausura no se expande más,\n # queda en un puntos fijo, i.e. es una clausura\n adyacentes_i = [ese for ese in adyacentes[clausura[i]] if ese in esos]\n # los adyacentes a la i-ésimo elemento de la clausura que están en la coleccion\n nuevos = [ese for ese in adyacentes_i if ese not in clausura] # no agragados aún\n clausura.extend(nuevos) # se agregan al final las adyacencias no agregadas\n i = i + 1\n return clausura\n\ndef conectados(estos):\n # True si coleccion es conexo, no hay partes separadas,\n if not estos: # es vacio\n return True\n else:\n este = estos[0] # este es cualquiera, se elije el primero\n return len(clausura_conexa(este, estos)) == len(estos)\n\n# extraer un componente\ndef extraer(este, estos):\n # devuelve la lista de partes conexas resultado de remover la manzana m del segmento\n if este not in estos:\n return []\n else:\n esos = list(estos) # copia para no modificar el original\n esos.remove(este)\n partes = []\n while esos: # es no vacia\n ese = esos[0] # se elige uno cualquiera, se usa el 1ro\n clausura_de_ese_en_esos = clausura_conexa(ese, esos)\n for aquel in clausura_de_ese_en_esos:\n if aquel not in esos: # (?) cómo puede ser?????\n # pass\n raise Exception(\"elemento \" + str(aquel) + \" no está en \" + str(esos)\n + \"\\nclausura_de_ese_en_esos \" + str(clausura_de_ese_en_esos))\n else: # para que no se rompa acá....\n esos.remove(aquel) # en esos queda el resto no conexo a aquel\n partes.append(clausura_de_ese_en_esos)\n return partes\n\n# transferir un componente de un conjunto a otro\ndef transferir(este, estos, esos):\n # transferir este del segmento origen al segmento destino\n # devuelve una lista con 2 elementoe ... los nuevos estos y esos\n if not conectados(esos + [este]): # no puedo transferir\n return False\n elif len(estos) == 1: # no queda resto, se fusiona origen con destino\n return [estos + esos]\n else:\n return extraer(este, estos) + [esos + [este]]\n\ndef carga(estos):\n conteos = [viviendas[este] for este in estos]\n return sum(conteos)\n", "id": "2388138", "language": "Python", "matching_score": 1.618078589439392, "max_stars_count": 1, "path": "segmentador/calcular_soluciones_adyacentes.py" }, { "content": "from segmentaciones import *\nfrom random import *\n\n\n\"\"\"\nfor i in range(3):\n componentes.append(Componente(i, randrange(10)))\nfor c_i in componentes:\n for c_j in componentes:\n if c_i.id != c_j.id and random() < 0.2:\n c_i.agregar_adyacencia(c_j)\n\"\"\"\n\n#componentes = [1, 2, 3, 4, 5]\n#adyacencias = [(5,4), (1,2), (2,3), (3,4)]\n\n\nc1 = Componente(1, randrange(10))\nc2 = Componente(2, randrange(10))\nc3 = Componente(3, randrange(10))\nc4 = Componente(4, randrange(10))\nc5 = Componente(5, randrange(10))\n\"\"\"\nc1 = Componente(1, 8)\nc2 = Componente(2, 5)\nc3 = Componente(3, 7)\nc4 = Componente(4, 10)\nc5 = Componente(5, 10)\n\"\"\"\nc2.agregar_adyacencia(c1)\nc2.agregar_adyacencia(c3)\nc2.agregar_adyacencia(c4)\nc5.agregar_adyacencia(c4)\n\ncomps = Componentes([c1, c2, c3, c4, c5])\nprint ('-----------------------comps--------------------------------')\nprint (comps)\nprint ('-----------------------componentes.componentes()------------')\nprint (comps.componentes())\n\nsgms = comps.segmentos()\nrecs = comps.recorridos()\nprint ('-----------------------comps.segmentos() iterado------------')\n\nfor s in sgms:\n sgm = Segmento(s)\n print (sgm)\n\nprint ('-----------------------sgms---------------------------------')\nprint(sgms)\n\nprint ('-----------------------comps.recorridos() iterado------------')\n\nfor s in recs:\n sgm = Segmento(s)\n print (sgm)\n\nprint ('-----------------------recs---------------------------------')\nprint(recs)\n\n\ntodos = Segmentos(sgms)\ntodos.ordenar()\nprint ('-----------------------todos-ordenados por costo -----------')\nprint (todos)\n\nsg1 = Segmento([c1, c2])\nprint ('-----------------------sg1.componentes()--------------------')\nprint (sg1.componentes())\n\nsg2 = Segmento([c3])\nunos = Segmentos([sg1, sg2])\n\n\nprint ('-----------------------unos---------------------------------')\nprint (unos)\nprint ('-----------------------unos.componentes()-------------------')\nprint (unos.componentes())\nprint ('-----------------------unos.componentes()[0]----------------')\nprint (unos.componentes()[0])\nprint ('-----------------------unos[0][0]---------------------------')\nprint(unos[0][0])\nprint ('-----------------------unos[0][0] is c1---------------------')\nprint(unos[0][0] is c1)\nprint ('-----------------------unos.componentes()[0] is c1----------')\nprint(unos.componentes()[0] is c1)\nprint ('-----------------------unos.componentes().ids()-------------')\nprint (unos.componentes().ids())\n\nresto = Componentes(set(comps) - set(unos.componentes()))\nprint ('---resto = Componentes(set(comps) - set(unos.componentes()))')\nprint ('-----------------------resto---------------------------------')\nprint (resto)\n\nprint ('-----------------------sgms , resto -----------------')\nlista = []\nfor s in todos:\n lista.append([Segmento(s), Componentes(set(comps) - set(s))]) \nfor [s, r] in lista:\n print (str(s) + ' - ' + str(r))\n\n[s, r] = lista[0]\nprint ('-----------------------r.segmentos()------------------------')\nprint (r.segmentos())\nprint ('-----------------------2da vuelta con el 1ro----------------')\nfor n in r.segmentos():\n quedan = Componentes(set(r) - set(n))\n print (str(s) + ' + ' + str(n) + ' - ' + str(quedan)) \n\nprint ('-----------------------segmenta----------------')\nsoluciones = []\nprint (segmenta(Segmentos(), comps, soluciones))\nprint ('-----------------------soluciones----------------')\nfor s in soluciones:\n print(s)\n\nprint ('-----------------------unicas-------------------')\nss = []\nsols = []\nfor sol in soluciones:\n en_set = set(map(tuple, sol))\n if en_set not in ss:\n ss.append(en_set)\n sols.append(sol)\nfor sol in sols:\n print (sol)\n", "id": "5380111", "language": "Python", "matching_score": 2.4634275436401367, "max_stars_count": 1, "path": "sandbox/tests.py" }, { "content": "from segmentaciones import *\nfrom random import *\nfrom sys import *\n\nn = int(argv[1])\ncomponentes = Componentes()\nfor i in range(n):\n componentes.append(Componente(i, randrange(10)))\nfor c_i in componentes:\n r = int(random()*3) + 1\n while r > 0:\n s = int(random()*len(componentes))\n if c_i != componentes[s] and componentes[s] not in c_i.adyacentes:\n c_i.agregar_adyacencia(componentes[s])\n r = r - 1\n\n\nprint ('-----------------------componentes-------------')\nprint ('-----------------------adyacencias-------------')\nfor c in componentes:\n print (c.id, c.vivs)\n adys = [] \n for a in c.adyacentes:\n adys.append(a.id)\n print (' ', adys)\n\nprint ('-----------------------segmenta----------------')\nsoluciones = []\nsegmenta(Segmentos(), componentes, soluciones)\n\"\"\"\nprint ('-----------------------soluciones----------------')\nfor s in soluciones:\n print(s)\n\n\"\"\"\nprint ('-----------------------unicas-------------------')\nss = []\nsols = []\nfor sol in soluciones:\n en_set = set(map(tuple, sol))\n if en_set not in ss:\n ss.append(en_set)\n sols.append(sol)\n#for sol in sols:\n# print (sol)\nprint (sols[0])\n\n", "id": "9044348", "language": "Python", "matching_score": 2.0358078479766846, "max_stars_count": 1, "path": "sandbox/test_eficiencia.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nejemplo de 2 x 2 manzanas\ncon componentes lados\n\n +--11--+ +--21--+\n | | | |\n 14 12 24 22\n | | | |\n +--13--+ +--23--+\n\n +--31--+ +--41--+\n | | | |\n 34 32 44 42\n | | | |\n +--33--+ +--43--+\n\n\"\"\"\nfrom segmentaciones import *\nfrom random import *\n\nc11 = Componente(11, randrange(20))\nc12 = Componente(12, randrange(20))\nc13 = Componente(13, randrange(20))\nc14 = Componente(14, randrange(20))\nc21 = Componente(21, randrange(20))\nc22 = Componente(22, randrange(20))\nc23 = Componente(23, randrange(20))\nc24 = Componente(24, randrange(20))\nc31 = Componente(31, randrange(20))\nc32 = Componente(32, randrange(20))\nc33 = Componente(33, randrange(20))\nc34 = Componente(34, randrange(20))\nc41 = Componente(41, randrange(20))\nc42 = Componente(42, randrange(20))\nc43 = Componente(43, randrange(20))\nc44 = Componente(44, randrange(20))\n\n# doblar\nc11.agregar_adyacencia(c12)\nc12.agregar_adyacencia(c13)\nc13.agregar_adyacencia(c14)\nc14.agregar_adyacencia(c11)\n\nc21.agregar_adyacencia(c22)\nc22.agregar_adyacencia(c23)\nc23.agregar_adyacencia(c24)\nc24.agregar_adyacencia(c21)\n\nc31.agregar_adyacencia(c32)\nc32.agregar_adyacencia(c33)\nc33.agregar_adyacencia(c34)\nc34.agregar_adyacencia(c31)\n\nc41.agregar_adyacencia(c42)\nc42.agregar_adyacencia(c43)\nc43.agregar_adyacencia(c44)\nc44.agregar_adyacencia(c41)\n\n# volver\nc12.agregar_adyacencia(c24)\nc24.agregar_adyacencia(c12)\n\nc13.agregar_adyacencia(c31)\nc31.agregar_adyacencia(c13)\n\nc23.agregar_adyacencia(c41)\nc41.agregar_adyacencia(c23)\n\nc32.agregar_adyacencia(c44)\nc44.agregar_adyacencia(c32)\n\n# cruzar\nc11.agregar_adyacencia(c21)\nc23.agregar_adyacencia(c13)\nc31.agregar_adyacencia(c41)\nc43.agregar_adyacencia(c33)\n\nc34.agregar_adyacencia(c14)\nc12.agregar_adyacencia(c32)\nc44.agregar_adyacencia(c24)\nc22.agregar_adyacencia(c42)\n\ncomponentes = Componentes([\n c11, c21,\nc14, c12, c24, c22,\n c13, c23,\n\n c31, c41,\nc34, c32, c44, c42,\n c33, c43,\n])\n\nset_segmentacion_deseada(40)\n\nprint ('---------------componentes-con-adyacencias---')\nfor c in componentes:\n adys = Componentes()\n for a in c.adyacentes:\n adys.append(a.id)\n print (c.id, '(', c.vivs,')', adys)\n\nprint ('-----------------------segmenta----------------')\nsoluciones = Segmentaciones()\nsegmenta(Segmentacion(), componentes, soluciones)\nprint ('\\n---------------------soluciones-------')\nfor s in soluciones:\n print(s)\nprint ('\\n---------------------diferentes-------')\nunicas = soluciones.diferentes()\nfor u in unicas:\n print(u)\n\n\n\n", "id": "9533719", "language": "Python", "matching_score": 2.794752597808838, "max_stars_count": 1, "path": "sandbox/2x2-random.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"\nejemplo de 2 x 2 manzanas\ncon componentes lados\n\n +--11--+ +--21--+\n | | | |\n 14 12 24 22\n | | | |\n +--13--+ +--23--+\n\n +--31--+ +--41--+\n | | | |\n 34 32 44 42\n | | | |\n +--33--+ +--43--+\n\n\"\"\"\nfrom segmentaciones import *\nfrom random import *\n\nc11 = Componente(11, 10)\nc12 = Componente(12, 10)\nc13 = Componente(13, 10)\nc14 = Componente(14, 0)\nc21 = Componente(21, 0)\nc22 = Componente(22, 0)\nc23 = Componente(23, 10)\nc24 = Componente(24, 0)\nc31 = Componente(31, 0)\nc32 = Componente(32, 0)\nc33 = Componente(33, 0)\nc34 = Componente(34, 10)\nc41 = Componente(41, 0)\nc42 = Componente(42, 10)\nc43 = Componente(43, 10)\nc44 = Componente(44, 10)\n\n# doblar\nc11.agregar_adyacencia(c12)\nc12.agregar_adyacencia(c13)\nc13.agregar_adyacencia(c14)\nc14.agregar_adyacencia(c11)\n\nc21.agregar_adyacencia(c22)\nc22.agregar_adyacencia(c23)\nc23.agregar_adyacencia(c24)\nc24.agregar_adyacencia(c21)\n\nc31.agregar_adyacencia(c32)\nc32.agregar_adyacencia(c33)\nc33.agregar_adyacencia(c34)\nc34.agregar_adyacencia(c31)\n\nc41.agregar_adyacencia(c42)\nc42.agregar_adyacencia(c43)\nc43.agregar_adyacencia(c44)\nc44.agregar_adyacencia(c41)\n\n# volver\nc12.agregar_adyacencia(c24)\nc24.agregar_adyacencia(c12)\n\nc13.agregar_adyacencia(c31)\nc31.agregar_adyacencia(c13)\n\nc23.agregar_adyacencia(c41)\nc41.agregar_adyacencia(c23)\n\nc32.agregar_adyacencia(c44)\nc44.agregar_adyacencia(c32)\n\n# cruzar\nc11.agregar_adyacencia(c21)\nc23.agregar_adyacencia(c13)\nc31.agregar_adyacencia(c41)\nc43.agregar_adyacencia(c33)\n\nc34.agregar_adyacencia(c14)\nc12.agregar_adyacencia(c32)\nc44.agregar_adyacencia(c24)\nc22.agregar_adyacencia(c42)\n\ncomponentes = Componentes([\n c11, c21,\nc14, c12, c24, c22,\n c13, c23,\n\n c31, c41,\nc34, c32, c44, c42,\n c33, c43,\n])\n\nset_segmentacion_deseada(20)\n\nprint ('---------------componentes-con-adyacencias---')\nfor c in componentes:\n adys = Componentes()\n for a in c.adyacentes:\n adys.append(a.id)\n print (c.id, '(', c.vivs,')', adys)\n\nprint ('-----------------------segmenta----------------')\nsoluciones = Segmentaciones()\nsegmenta(Segmentacion(), componentes, soluciones)\nprint ('\\n---------------------soluciones-------')\nfor s in soluciones:\n print(s)\nprint ('\\n---------------------diferentes-------')\nunicas = soluciones.diferentes()\nfor u in unicas:\n print(u)\n\n\n\n", "id": "8276505", "language": "Python", "matching_score": 0.6943835616111755, "max_stars_count": 1, "path": "sandbox/2x2-lados_con_10_o_0.py" }, { "content": "\"\"\"\ntítulo: conjuntos_factibles.py\ndescripción: calcula la suma de conjuntos generados por adyacencias y la intersecta con las particiones\nquedan todas las particiones de los componentes que respetan las secuencias de adyacencias\nautor: -h\nfecha: 2019-06\n\"\"\"\n\nimport particiones\nimport conjuntos_adyacentes\n\n\ncomponentes = [1, 2, 3, 4, 5]\nadyacencias = [(5,4), (1,2), (2,3), (3,4)]\nfactibles = []\n\nc_adys = conjuntos_adyacentes.conjuntos_adyacentes(componentes, adyacencias)\nfor c in c_adys:\n c.sort()\nfor p in particiones.partition(componentes):\n incluida = True\n for c_p in p:\n if c_p not in c_adys:\n incluida = False\n break\n if incluida:\n factibles.append(p)\n\nfor c in c_adys:\n print(c)\n\nprint('---------------------')\n\nfor p in factibles:\n print(p)\n\n", "id": "246133", "language": "Python", "matching_score": 2.059931993484497, "max_stars_count": 1, "path": "sandbox/conjuntos_factibles.py" }, { "content": "\"\"\"\ntítulo: conjuntos_adyacentes.py\ndescripción: calcula todos los conjuntos que se pueden generar siguiendo la relación de adyacencias\n(no reflexiva)\nautor: -h\nfecha: 2019-06\n\"\"\"\n\ndef conjuntos_adyacentes(componentes, adyacencias):\n conjuntos = []\n for cmpt in componentes:\n conjuntos.append([cmpt])\n for (i, j) in adyacencias:\n conjuntos.append([i, j])\n cantidad = 0\n while cantidad < len(conjuntos):\n cantidad = len(conjuntos)\n for c in conjuntos:\n for (i, j) in adyacencias:\n if i in c and j not in c:\n b = list(c)\n b.append(j)\n b.sort()\n if b not in conjuntos:\n conjuntos.append(b)\n return conjuntos\n\n\n\n#componentes = [1, 2, 3, 4, 5]\n#adyacencias = [(5,4), (1,2), (2,3), (3,4)]\n\n#print(conjuntos_adyacentes(componentes, adyacencias))\n\n \n", "id": "10397277", "language": "Python", "matching_score": 0.06994937360286713, "max_stars_count": 1, "path": "sandbox/conjuntos_adyacentes.py" }, { "content": "non_segmentable_tails = []\n\ndef segMaker(seq, n, m, d=20):\n def pref(s, t):\n x, y = s[1][0], t[1][0]\n return abs(y-d) - abs(x-d) \n if sum(seq) < n: # too short\n non_segmentable_tails.append(seq)\n return None # base case returns not segmentable\n if n <= sum(seq) <= m: # right size\n return [sum(seq)] # base case returns the segment length\n else:\n i, s, heads = 0, 0, [] # init variables\n while i < len(seq) and s < n: # get upto sgm len lower bound\n s += seq[i]\n i += 1\n while i < len(seq) and n <= s <= m: # while feasible\n heads.append((i, [s])) # add candidates to explore\n s += seq[i]\n i += 1\n # call a function to sort heads with heuristic\n heads.sort(pref)\n while heads:\n i, candidate = heads.pop()\n tail = seq[i:]\n if tail not in non_segmentable_tails:\n sgms = segMaker(tail,n,m)\n if sgms:\n candidate.extend(sgms)\n return candidate\n else:\n non_segmentable_tails.append(tail)\nimport math\n\ndef NoFactibleCantidad(seq, n, m):\n v = sum(seq)\n for s in range(0, int(math.ceil((n - 1)/(m - n))) + 1):\n if s*m < v < (s + 1)*n:\n return s + 1\n return False\n\ndef NoFactiblePartirBloque(seq, n, m):\n if len(seq) > 1:\n for i, b in enumerate(seq):\n if b > m:\n return [b] \n if b < n:\n if 0 < i < len(seq) - 1:\n if seq[i - 1] + b > m and seq[i + 1] + b > m:\n return seq[i - 1:i + 2]\n elif i == 0:\n if seq[1] + b > m:\n return seq[0:2]\n else:\n if seq[i - 1] + b > m:\n return seq[i - 1:i + 1]\n return False\n\n\n\n", "id": "7136865", "language": "Python", "matching_score": 3.8374505043029785, "max_stars_count": 1, "path": "segMakerDynamic.py" }, { "content": "\"\"\"\r\ndeprecado\r\nalgoritmo base que fue usado en presentación del método en varias conferencias\r\n\"\"\"\r\ndef segMaker(seq,n,m):\r\n if sum(seq) < n: # too short\r\n return None # base case returns not segmentable\r\n if n <= sum(seq) <= m: # right size\r\n return [sum(seq)] # base case returns the segment length\r\n else:\r\n i, s, heads = 0, 0, [] # init variables\r\n # crear una lista de ramas ya exploradas\r\n while i < len(seq) and s < n: # get upto sgm len lower bound\r\n i, s = i+1, s+seq[i]\r\n while i < len(seq) and n <= s <= m: # while feasible\r\n # chequear que el candidato no haya sido ya explorado\r\n heads.append((i, [s])) # add candidates to explore\r\n i, s = i+1, s+seq[i]\r\n # call a function to sort heads with heuristic\r\n while heads:\r\n i, candidate = heads.pop()\r\n tail = seq[i:]\r\n sgms = segMaker(tail,n,m)\r\n if sgms:\r\n candidate.extend(sgms)\r\n return candidate\r\n", "id": "8552860", "language": "Python", "matching_score": 3.292311668395996, "max_stars_count": 1, "path": "segMaker.py" }, { "content": "#!/usr/bin/python\n\nimport sys\n\nn, m = 5, 8\n\ndef segMaker(seq,n=n,m=m):\n\tif sum(seq) < n:\t\t# too short\n\t\treturn None\t\t# base case returns not segmentable\n\tif n <= sum(seq) <= m:\t\t# right size\n\t\treturn [seq]\t# base case returns the segment \n\telse:\n\t\ti, s, heads = 0, [], []\t\t\t# init variables\n\t\twhile i < len(seq) and sum(s) < n:\t\t# get upto sgm len lower bound\n\t\t\ts.append(seq[i])\n\t\t\ti = i+1\n\t\twhile i < len(seq) and n <= sum(s) <= m:\t# while feasible\n\t\t\theads.append((i, s[:]))\t\t# add candidates to explore\n\t\t\ts.append(seq[i])\n\t\t\ti = i+1\n\n# sort heads with heuristic\n\t\ttemp = []\n\t\tif len(sys.argv) >= 2:\n\t\t\tif sys.argv[1] == 'reverse': \n\t\t\t\theads.reverse()\n\t\twhile heads:\n\t\t\ti, candidate = heads.pop()\n\t\t\ttail = seq[i:]\n\t\t\tsgms = segMaker(tail)\n\t\t\tif sgms:\n\t\t\t\ttemp.append(candidate)\n\t\t\t\ttemp.extend(sgms)\n\t\t\t\treturn temp \n\n\ncases = [\n\t[2,8,3,4,1,1,3],\n\t[2,2,3,4,8,1,3],\n\t[2,3,3,4,1,1,3],\n\t[2,3,4,1,1,3],\n\t[2,4,1,1,3],\n\t[1,3,1,2,2,2,2,1,3,2,1,2,3,1,3,4,2,4,3,4,5,1,1,4,4,4,1,1,2,3,1,3 ],\n\t[4,5,4,5,4],\n\t[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]\n\t]\n\nfor case in cases:\n\tprint ('-- --') \n\tprint (case, ' => ', segMaker(case))\n", "id": "11949102", "language": "Python", "matching_score": 3.606095790863037, "max_stars_count": 1, "path": "segmentMaker.py" } ]
1.668949
biezhi
[ { "content": "\"\"\"\n使用方法:命令行模式\n\npython control_cpu_usage.py -c 20 -t 0.1\n\n-c 指定cpu核数:不指定-c参数默认为所有核数。\n-t 数值越大,cpu使用率越低。\n\"\"\"\nimport argparse\nimport time\nfrom multiprocessing import Process\nfrom multiprocessing import cpu_count\n\n\ndef exec_func(bt):\n while True:\n for i in range(0, 9600000):\n pass\n try:\n time.sleep(bt)\n except:\n break\n\nif __name__ == \"__main__\":\n parse = argparse.ArgumentParser(description='runing')\n parse.add_argument(\n \"-c\",\n \"--count\",\n default=cpu_count(),\n help='cpu count'\n )\n\n parse.add_argument(\n \"-t\",\n \"--time\",\n default=0.01,\n help='cpu time'\n )\n\n args = parse.parse_args()\n\n cpu_logical_count = int(args.count)\n\n cpu_sleep_time = args.time\n\n try:\n cpu_sleep_time = int(args.time)\n except ValueError:\n try:\n cpu_sleep_time = float(args.time)\n except ValueError as ex:\n raise ValueError(ex)\n\n print('\\n====================占用CPU核数{}.===================='.format(cpu_logical_count))\n print('\\n资源浪费starting......')\n print(cpu_sleep_time)\n\n try:\n p = Process(target=exec_func, args=(\"bt\",))\n ps_list = []\n for i in range(0, cpu_logical_count):\n ps_list.append(Process(target=exec_func, args=(cpu_sleep_time,)))\n\n for p in ps_list:\n p.start()\n\n for p in ps_list:\n p.join()\n except KeyboardInterrupt:\n print(\"手工结束!\")\n", "id": "2273803", "language": "Python", "matching_score": 0, "max_stars_count": 6, "path": "control_cpu_usage.py" }, { "content": "import json\nimport os\nimport sys\nfrom functools import partial\n\nimport psutil\nfrom PySide6.QtCore import QTimer, QCoreApplication, QSettings\nfrom PySide6.QtGui import QIcon, QAction\nfrom PySide6.QtWidgets import QApplication, QWidget, QSystemTrayIcon, QMenu\n\n\nclass TrayIcon(QSystemTrayIcon):\n\n def __init__(self, parent=None):\n super(TrayIcon, self).__init__(parent)\n\n self.current_index = 0\n self.current_icons = []\n\n self.init_setting()\n self.init_ui()\n\n self.cpu_timer = QTimer(self)\n self.cpu_timer.setInterval(3000)\n self.cpu_timer.timeout.connect(self.cpu_tick)\n self.cpu_timer.start()\n\n self.animate_timer = QTimer(self)\n self.animate_timer.setInterval(200)\n self.animate_timer.timeout.connect(self.update_animate)\n self.animate_timer.start()\n\n def init_setting(self):\n self.settings = QSettings('runcat.ini', QSettings.IniFormat)\n if not self.settings.contains(\"current_theme\"):\n self.settings.setValue('current_theme', '白猫')\n\n self.themes_path = dict()\n\n scan_path = self.resource_path(os.path.join('resources', 'themes'))\n g = os.walk(scan_path)\n for path, dir_list, file_list in g:\n for dir_name in dir_list:\n self.themes_path[dir_name] = self.resource_path(os.path.join(path, dir_name))\n\n with open(self.resource_path(os.path.join('resources', 'themes', 'speed.json')), 'r', encoding='utf-8') as f:\n speed_data = f.read()\n self.speed = json.loads(speed_data)\n\n def init_ui(self):\n self.tp_menu = QMenu()\n self.theme_menu = QMenu()\n self.theme_menu.setTitle('切换主题')\n self.themes_action = dict()\n\n for item in self.themes_path:\n action = QAction(item, self)\n action.setCheckable(True)\n action.triggered.connect(partial(self.change_theme, action))\n self.themes_action[item] = action\n self.theme_menu.addAction(action)\n\n self.tp_menu.addMenu(self.theme_menu, )\n\n self.auto_startup_act = QAction('开机启动')\n self.auto_startup_act.setCheckable(True)\n auto_startup_bool = self.settings.value(\"auto_startup\", True, type=bool)\n self.auto_startup_act.setChecked(auto_startup_bool)\n self.settings.setValue('auto_startup', auto_startup_bool)\n self.settings.sync()\n\n self.auto_startup_act.triggered[bool].connect(self.auto_startup)\n\n self.exit_act = QAction('退出', self, triggered=self.quit_app)\n self.tp_menu.addAction(self.auto_startup_act)\n self.tp_menu.addAction(self.exit_act)\n\n self.setIcon(QIcon(self.resource_path(os.path.join('runcat.ico'))))\n self.setToolTip(u'CPU使用率 ' + str(psutil.cpu_percent(None)) + '%')\n\n self.change_theme(None)\n self.setContextMenu(self.tp_menu)\n\n def update_animate(self):\n if len(self.current_icons) == 0:\n return\n if len(self.current_icons) <= self.current_index:\n self.current_index = 0\n self.setIcon(self.current_icons[self.current_index])\n self.current_index = (self.current_index + 1) % len(self.current_icons)\n\n def cpu_tick(self):\n cpu_usage = self.get_cpu_usage()\n self.setToolTip(cpu_usage[1])\n\n interval = 200.0 / max(1, min(20, cpu_usage[0] / 5))\n\n current_theme = self.settings.value('current_theme')\n speed_rate = 1\n if current_theme in self.speed.keys():\n speed_rate = self.speed[current_theme]\n\n speed_fps = int(interval * speed_rate)\n\n self.animate_timer.stop()\n self.animate_timer.setInterval(speed_fps)\n self.animate_timer.start()\n\n def change_theme(self, action):\n new_theme = self.settings.value('current_theme')\n if action is not None:\n new_theme = action.text()\n\n self.settings.setValue('current_theme', new_theme)\n self.settings.sync()\n\n for (name, _action) in self.themes_action.items():\n _action.setChecked(new_theme == name)\n\n g = os.walk(self.resource_path(self.themes_path[new_theme]))\n self.current_icons = []\n for path, dir_list, file_list in g:\n for file_name in file_list:\n icon_path = self.resource_path(os.path.join(path, file_name))\n self.current_icons.append(QIcon(icon_path))\n\n def get_cpu_usage(self):\n percent = psutil.cpu_percent(None)\n return (percent, str(percent) + '%',)\n\n def auto_startup(self, checked):\n self.auto_startup_act.setChecked(checked)\n self.settings.setValue('auto_startup', checked)\n self.settings.sync()\n\n def quit_app(self):\n self.cpu_timer.stop()\n self.animate_timer.stop()\n QCoreApplication.instance().quit()\n self.setVisible(False)\n\n def resource_path(self, relative_path):\n if getattr(sys, 'frozen', False): # 是否Bundle Resource\n base_path = sys._MEIPASS\n else:\n # base_path = os.path.abspath(\".\")\n base_path = os.path.dirname(os.path.abspath(__file__))\n return os.path.join(base_path, relative_path)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n QApplication.setQuitOnLastWindowClosed(False)\n\n w = QWidget()\n tray = TrayIcon(w)\n tray.show()\n\n sys.exit(app.exec())\n", "id": "12357831", "language": "Python", "matching_score": 0, "max_stars_count": 6, "path": "run_cat.py" } ]
0
13625025773
[ { "content": "\"\"\"Routes registered when the kill switch is enabled.\"\"\"\nfrom pyramid.response import Response\nfrom pyramid.view import notfound_view_config\n\n\n@notfound_view_config()\ndef not_found(_exc, _request):\n \"\"\"Handle any request we get with the shortest possible response.\"\"\"\n return Response(status=\"429 Offline\", headerlist=[])\n\n\ndef includeme(config):\n config.scan(__name__)\n", "id": "3981642", "language": "Python", "matching_score": 1.4496550559997559, "max_stars_count": 0, "path": "h/streamer/kill_switch_views.py" }, { "content": "from h.streamer import streamer, views\n\n\ndef test_websocket_view_adds_auth_state_to_environ(pyramid_config, pyramid_request):\n pyramid_config.testing_securitypolicy(\"ragnar\", groupids=[\"foo\", \"bar\"])\n pyramid_request.get_response = lambda _: None\n\n views.websocket_view(pyramid_request)\n env = pyramid_request.environ\n\n assert env[\"h.ws.authenticated_userid\"] == \"ragnar\"\n assert env[\"h.ws.effective_principals\"] == pyramid_request.effective_principals\n\n\ndef test_websocket_view_adds_registry_reference_to_environ(pyramid_request):\n pyramid_request.get_response = lambda _: None\n\n views.websocket_view(pyramid_request)\n env = pyramid_request.environ\n\n assert env[\"h.ws.registry\"] == pyramid_request.registry\n\n\ndef test_websocket_view_adds_work_queue_to_environ(pyramid_request):\n pyramid_request.get_response = lambda _: None\n\n views.websocket_view(pyramid_request)\n env = pyramid_request.environ\n\n assert env[\"h.ws.streamer_work_queue\"] == streamer.WORK_QUEUE\n", "id": "2005044", "language": "Python", "matching_score": 1.8899883031845093, "max_stars_count": 0, "path": "tests/h/streamer/views_test.py" }, { "content": "# -*- coding: utf-8 -*-\n\nimport copy\nimport json\nimport logging\nimport weakref\nfrom collections import namedtuple\n\nimport jsonschema\nfrom gevent.queue import Full\nfrom ws4py.websocket import WebSocket as _WebSocket\n\nfrom h import storage\nfrom h.streamer.filter import FILTER_SCHEMA, SocketFilter\n\nlog = logging.getLogger(__name__)\n\n# Mapping incoming message type to handler function. Handlers are added inline\n# below.\nMESSAGE_HANDLERS = {}\n\n\n# An incoming message from a WebSocket client.\nclass Message(namedtuple(\"Message\", [\"socket\", \"payload\"])):\n def reply(self, payload, ok=True):\n \"\"\"\n Send a response to this message.\n\n Sends a reply message back to the client, with the passed `payload`\n and reporting status `ok`.\n \"\"\"\n reply_to = self.payload.get(\"id\")\n # Short-circuit if message is missing an ID or has a non-numeric ID.\n if not isinstance(reply_to, (int, float)):\n return\n data = copy.deepcopy(payload)\n data[\"ok\"] = ok\n data[\"reply_to\"] = reply_to\n self.socket.send_json(data)\n\n\nclass WebSocket(_WebSocket):\n # All instances of WebSocket, allowing us to iterate over open websockets\n instances = weakref.WeakSet()\n\n # Instance attributes\n client_id = None\n filter = None\n query = None\n\n def __init__(self, sock, protocols=None, extensions=None, environ=None):\n super(WebSocket, self).__init__(\n sock,\n protocols=protocols,\n extensions=extensions,\n environ=environ,\n heartbeat_freq=30.0,\n )\n\n self.authenticated_userid = environ[\"h.ws.authenticated_userid\"]\n self.effective_principals = environ[\"h.ws.effective_principals\"]\n self.registry = environ[\"h.ws.registry\"]\n\n self._work_queue = environ[\"h.ws.streamer_work_queue\"]\n\n def __new__(cls, *args, **kwargs):\n instance = super(WebSocket, cls).__new__(cls)\n cls.instances.add(instance)\n return instance\n\n def received_message(self, msg):\n try:\n payload = json.loads(msg.data)\n except ValueError:\n self.close(reason=\"invalid message format\")\n return\n try:\n self._work_queue.put(Message(socket=self, payload=payload), timeout=0.1)\n except Full:\n log.warning(\n \"Streamer work queue full! Unable to queue message from \"\n \"WebSocket client having waited 0.1s: giving up.\"\n )\n\n def closed(self, code, reason=None):\n try:\n self.instances.remove(self)\n except KeyError:\n pass\n\n def send_json(self, payload):\n if not self.terminated:\n self.send(json.dumps(payload))\n\n\ndef handle_message(message, session=None):\n \"\"\"\n Handle an incoming message from a client websocket.\n\n Receives a :py:class:`~h.streamer.websocket.Message` instance, which holds\n references to the :py:class:`~h.streamer.websocket.WebSocket` instance\n associated with the client connection, as well as the message payload.\n\n It updates state on the :py:class:`~h.streamer.websocket.WebSocket`\n instance in response to the message content.\n\n It may also passed a database session which *must* be used for any\n communication with the database.\n \"\"\"\n payload = message.payload\n type_ = payload.get(\"type\")\n\n # FIXME: This code is here to tolerate old and deprecated message formats.\n if type_ is None:\n if \"messageType\" in payload and payload[\"messageType\"] == \"client_id\":\n type_ = \"client_id\"\n if \"filter\" in payload:\n type_ = \"filter\"\n\n # N.B. MESSAGE_HANDLERS[None] handles both incorrect and missing message\n # types.\n handler = MESSAGE_HANDLERS.get(type_, MESSAGE_HANDLERS[None])\n handler(message, session=session)\n\n\ndef handle_client_id_message(message, session=None):\n \"\"\"A client telling us its client ID.\"\"\"\n if \"value\" not in message.payload:\n message.reply(\n {\n \"type\": \"error\",\n \"error\": {\"type\": \"invalid_data\", \"description\": '\"value\" is missing'},\n },\n ok=False,\n )\n return\n message.socket.client_id = message.payload[\"value\"]\n\n\nMESSAGE_HANDLERS[\"client_id\"] = handle_client_id_message # noqa: E305\n\n\ndef handle_filter_message(message, session=None):\n \"\"\"A client updating its streamer filter.\"\"\"\n if \"filter\" not in message.payload:\n message.reply(\n {\n \"type\": \"error\",\n \"error\": {\"type\": \"invalid_data\", \"description\": '\"filter\" is missing'},\n },\n ok=False,\n )\n return\n\n filter_ = message.payload[\"filter\"]\n try:\n jsonschema.validate(filter_, FILTER_SCHEMA)\n except jsonschema.ValidationError:\n message.reply(\n {\n \"type\": \"error\",\n \"error\": {\n \"type\": \"invalid_data\",\n \"description\": \"failed to parse filter\",\n },\n },\n ok=False,\n )\n return\n\n if session is not None:\n # Add backend expands for clauses\n _expand_clauses(session, filter_)\n\n SocketFilter.set_filter(message.socket, filter_)\n\n\nMESSAGE_HANDLERS[\"filter\"] = handle_filter_message # noqa: E305\n\n\ndef handle_ping_message(message, session=None):\n \"\"\"A client requesting a pong.\"\"\"\n message.reply({\"type\": \"pong\"})\n\n\nMESSAGE_HANDLERS[\"ping\"] = handle_ping_message # noqa: E305\n\n\ndef handle_whoami_message(message, session=None):\n \"\"\"A client requesting information on its auth state.\"\"\"\n message.reply({\"type\": \"whoyouare\", \"userid\": message.socket.authenticated_userid})\n\n\nMESSAGE_HANDLERS[\"whoami\"] = handle_whoami_message # noqa: E305\n\n\ndef handle_unknown_message(message, session=None):\n \"\"\"Message type missing or not recognised.\"\"\"\n type_ = json.dumps(message.payload.get(\"type\"))\n message.reply(\n {\n \"type\": \"error\",\n \"error\": {\n \"type\": \"invalid_type\",\n \"description\": \"invalid message type: \" \"{:s}\".format(type_),\n },\n },\n ok=False,\n )\n\n\nMESSAGE_HANDLERS[None] = handle_unknown_message # noqa: E305\n\n\ndef _expand_clauses(session, filter_):\n for clause in filter_[\"clauses\"]:\n if \"field\" in clause and clause[\"field\"] == \"/uri\":\n _expand_uris(session, clause)\n\n\ndef _expand_uris(session, clause):\n uris = clause[\"value\"]\n expanded = set()\n\n if not isinstance(uris, list):\n uris = [uris]\n\n for item in uris:\n expanded.update(storage.expand_uri(session, item))\n\n clause[\"value\"] = list(expanded)\n", "id": "76076", "language": "Python", "matching_score": 1.0087158679962158, "max_stars_count": 0, "path": "h/streamer/websocket.py" }, { "content": "# -*- coding: utf-8 -*-\n\"\"\"Functions for updating the search index.\"\"\"\n\nimport logging\nimport time\nfrom collections import namedtuple\n\nimport sqlalchemy as sa\nfrom elasticsearch import helpers as es_helpers\nfrom sqlalchemy.orm import subqueryload\n\nfrom h import models, presenters\nfrom h.events import AnnotationTransformEvent\nfrom h.util.query import column_windows\n\nlog = logging.getLogger(__name__)\n\nES_CHUNK_SIZE = 100\nPG_WINDOW_SIZE = 2000\n\n\nclass Window(namedtuple(\"Window\", [\"start\", \"end\"])):\n pass\n\n\ndef index(es, annotation, request, target_index=None):\n \"\"\"\n Index an annotation into the search index.\n\n A new annotation document will be created in the search index or,\n if the index already contains an annotation document with the same ID as\n the given annotation then it will be updated.\n\n :param es: the Elasticsearch client object to use\n :type es: h.search.Client\n\n :param annotation: the annotation to index\n :type annotation: h.models.Annotation\n\n :param target_index: the index name, uses default index if not given\n :type target_index: unicode\n \"\"\"\n presenter = presenters.AnnotationSearchIndexPresenter(annotation, request)\n annotation_dict = presenter.asdict()\n\n event = AnnotationTransformEvent(request, annotation, annotation_dict)\n request.registry.notify(event)\n\n if target_index is None:\n target_index = es.index\n\n es.conn.index(\n index=target_index,\n doc_type=es.mapping_type,\n body=annotation_dict,\n id=annotation_dict[\"id\"],\n )\n\n\ndef delete(es, annotation_id, target_index=None, refresh=False):\n \"\"\"\n Mark an annotation as deleted in the search index.\n\n This will write a new body that only contains the ``deleted`` boolean field\n with the value ``true``. It allows us to rely on Elasticsearch to complain\n about dubious operations while re-indexing when we use `op_type=create`.\n\n :param es: the Elasticsearch client object to use\n :type es: h.search.Client\n\n :param annotation_id: the annotation id whose corresponding document to\n delete from the search index\n :type annotation_id: str\n\n :param target_index: the index name, uses default index if not given\n :type target_index: unicode\n\n :param refresh: Force this deletion to be immediately visible to search operations\n :type refresh: bool\n\n \"\"\"\n\n if target_index is None:\n target_index = es.index\n\n es.conn.index(\n index=target_index,\n doc_type=es.mapping_type,\n body={\"deleted\": True},\n id=annotation_id,\n refresh=refresh,\n )\n\n\nclass BatchIndexer:\n \"\"\"\n A convenience class for reindexing all annotations from the database to\n the search index.\n \"\"\"\n\n def __init__(self, session, es_client, request, target_index=None, op_type=\"index\"):\n self.session = session\n self.es_client = es_client\n self.request = request\n self.op_type = op_type\n\n # By default, index into the open index\n if target_index is None:\n self._target_index = self.es_client.index\n else:\n self._target_index = target_index\n\n def index(\n self, annotation_ids=None, windowsize=PG_WINDOW_SIZE, chunk_size=ES_CHUNK_SIZE\n ):\n \"\"\"\n Reindex annotations.\n\n :param annotation_ids: a list of ids to reindex, reindexes all when `None`.\n :type annotation_ids: collection\n :param windowsize: the number of annotations to index in between progress log statements\n :type windowsize: integer\n :param chunk_size: the number of docs in one chunk sent to ES\n :type chunk_size: integer\n\n :returns: a set of errored ids\n :rtype: set\n \"\"\"\n if annotation_ids is None:\n annotations = _all_annotations(session=self.session, windowsize=windowsize)\n else:\n annotations = _filtered_annotations(\n session=self.session, ids=annotation_ids\n )\n\n # Report indexing status as we go\n annotations = _log_status(annotations, log_every=windowsize)\n\n indexing = es_helpers.streaming_bulk(\n self.es_client.conn,\n annotations,\n chunk_size=chunk_size,\n raise_on_error=False,\n expand_action_callback=self._prepare,\n )\n errored = set()\n for ok, item in indexing:\n if not ok:\n status = item[self.op_type]\n\n was_doc_exists_err = \"document already exists\" in status[\"error\"]\n if self.op_type == \"create\" and was_doc_exists_err:\n continue\n\n errored.add(status[\"_id\"])\n return errored\n\n def _prepare(self, annotation):\n action = {\n self.op_type: {\n \"_index\": self._target_index,\n \"_type\": self.es_client.mapping_type,\n \"_id\": annotation.id,\n }\n }\n data = presenters.AnnotationSearchIndexPresenter(\n annotation, self.request\n ).asdict()\n\n event = AnnotationTransformEvent(self.request, annotation, data)\n self.request.registry.notify(event)\n\n return (action, data)\n\n\ndef _all_annotations(session, windowsize=2000):\n # This is using a windowed query for loading all annotations in batches.\n # It is the most performant way of loading a big set of records from\n # the database while still supporting eagerloading of associated\n # document data.\n windows = column_windows(\n session=session,\n column=models.Annotation.updated, # implicit ASC\n windowsize=windowsize,\n where=_annotation_filter(),\n )\n query = _eager_loaded_annotations(session).filter(_annotation_filter())\n\n for window in windows:\n for a in query.filter(window):\n yield a\n\n\ndef _filtered_annotations(session, ids):\n annotations = (\n _eager_loaded_annotations(session)\n .execution_options(stream_results=True)\n .filter(_annotation_filter())\n .filter(models.Annotation.id.in_(ids))\n )\n\n for a in annotations:\n yield a\n\n\ndef _annotation_filter():\n \"\"\"Default filter for all search indexing operations.\"\"\"\n return sa.not_(models.Annotation.deleted)\n\n\ndef _eager_loaded_annotations(session):\n return session.query(models.Annotation).options(\n subqueryload(models.Annotation.document).subqueryload(\n models.Document.document_uris\n ),\n subqueryload(models.Annotation.document).subqueryload(models.Document.meta),\n subqueryload(models.Annotation.moderation),\n subqueryload(models.Annotation.thread).load_only(\"id\"),\n )\n\n\ndef _log_status(stream, log_every=1000):\n i = 0\n then = time.time()\n for item in stream:\n yield item\n i += 1\n if i % log_every == 0:\n now = time.time()\n delta = now - then\n then = now\n rate = log_every / delta\n log.info(\"indexed {:d}k annotations, rate={:.0f}/s\".format(i // 1000, rate))\n", "id": "303406", "language": "Python", "matching_score": 1.5095900297164917, "max_stars_count": 0, "path": "h/search/index.py" }, { "content": "import datetime\nfrom unittest import mock\n\nimport pytest\nfrom h_matchers import Any\n\nfrom h.tasks import indexer\n\n\nclass FakeSettingsService:\n def __init__(self):\n self._data = {}\n\n def get(self, key):\n return self._data.get(key)\n\n def put(self, key, value):\n self._data[key] = value\n\n\nclass TestAddAnnotation:\n def test_it_fetches_the_annotation(self, storage, annotation, celery):\n id_ = \"test-annotation-id\"\n storage.fetch_annotation.return_value = annotation\n\n indexer.add_annotation(id_)\n\n storage.fetch_annotation.assert_called_once_with(celery.request.db, id_)\n\n def test_it_calls_index_with_annotation(self, storage, annotation, index, celery):\n id_ = \"test-annotation-id\"\n storage.fetch_annotation.return_value = annotation\n\n indexer.add_annotation(id_)\n\n index.assert_any_call(celery.request.es, annotation, celery.request)\n\n def test_it_skips_indexing_when_annotation_cannot_be_loaded(\n self, storage, index, celery\n ):\n storage.fetch_annotation.return_value = None\n\n indexer.add_annotation(\"test-annotation-id\")\n\n assert index.called is False\n\n def test_during_reindex_adds_to_current_index(\n self, storage, annotation, index, celery, settings_service\n ):\n settings_service.put(\"reindex.new_index\", \"hypothesis-xyz123\")\n storage.fetch_annotation.return_value = annotation\n\n indexer.add_annotation(\"test-annotation-id\")\n\n index.assert_any_call(\n celery.request.es,\n annotation,\n celery.request,\n target_index=\"hypothesis-xyz123\",\n )\n\n def test_during_reindex_adds_to_new_index(\n self, storage, annotation, index, celery, settings_service\n ):\n settings_service.put(\"reindex.new_index\", \"hypothesis-xyz123\")\n storage.fetch_annotation.return_value = annotation\n\n indexer.add_annotation(\"test-annotation-id\")\n\n index.assert_any_call(\n celery.request.es,\n annotation,\n celery.request,\n target_index=\"hypothesis-xyz123\",\n )\n\n def test_it_indexes_thread_root(self, storage, reply, delay):\n storage.fetch_annotation.return_value = reply\n\n indexer.add_annotation(\"test-annotation-id\")\n\n delay.assert_called_once_with(\"root-id\")\n\n @pytest.fixture\n def annotation(self):\n return mock.Mock(spec_set=[\"is_reply\"], is_reply=False)\n\n @pytest.fixture\n def reply(self):\n return mock.Mock(\n spec_set=[\"is_reply\", \"thread_root_id\"],\n is_reply=True,\n thread_root_id=\"root-id\",\n )\n\n @pytest.fixture\n def delay(self, patch):\n return patch(\"h.tasks.indexer.add_annotation.delay\")\n\n\nclass TestDeleteAnnotation:\n def test_it_deletes_from_index(self, delete, celery):\n id_ = \"test-annotation-id\"\n indexer.delete_annotation(id_)\n\n delete.assert_any_call(celery.request.es, id_)\n\n def test_during_reindex_deletes_from_current_index(\n self, delete, celery, settings_service\n ):\n settings_service.put(\"reindex.new_index\", \"hypothesis-xyz123\")\n\n indexer.delete_annotation(\"test-annotation-id\")\n\n delete.assert_any_call(\n celery.request.es, \"test-annotation-id\", target_index=\"hypothesis-xyz123\"\n )\n\n def test_during_reindex_deletes_from_new_index(\n self, delete, celery, settings_service\n ):\n settings_service.put(\"reindex.new_index\", \"hypothesis-xyz123\")\n\n indexer.delete_annotation(\"test-annotation-id\")\n\n delete.assert_any_call(\n celery.request.es, \"test-annotation-id\", target_index=\"hypothesis-xyz123\"\n )\n\n\nclass TestReindexUserAnnotations:\n def test_it_creates_batch_indexer(self, BatchIndexer, annotation_ids, celery):\n userid = list(annotation_ids.keys())[0]\n\n indexer.reindex_user_annotations(userid)\n\n BatchIndexer.assert_any_call(\n celery.request.db, celery.request.es, celery.request\n )\n\n def test_it_reindexes_users_annotations(self, BatchIndexer, annotation_ids):\n userid = list(annotation_ids.keys())[0]\n\n indexer.reindex_user_annotations(userid)\n\n args, _ = BatchIndexer.return_value.index.call_args\n actual = args[0]\n expected = annotation_ids[userid]\n assert sorted(expected) == sorted(actual)\n\n @pytest.fixture\n def annotation_ids(self, factories):\n userid1 = \"acct:<EMAIL>\"\n userid2 = \"acct:<EMAIL>\"\n\n return {\n userid1: [\n a.id for a in factories.Annotation.create_batch(3, userid=userid1)\n ],\n userid2: [\n a.id for a in factories.Annotation.create_batch(2, userid=userid2)\n ],\n }\n\n\nclass TestReindexAnnotationsInDateRange:\n def test_it(self, BatchIndexer, celery, matching_annotations_ids):\n indexer.reindex_annotations_in_date_range(\n datetime.datetime.utcnow() - datetime.timedelta(days=7),\n datetime.datetime.utcnow(),\n )\n\n BatchIndexer.assert_called_once_with(\n celery.request.db, celery.request.es, celery.request,\n )\n BatchIndexer.return_value.index.assert_called_once_with(Any())\n indexed_annotations = list(BatchIndexer.return_value.index.call_args[0][0])\n assert sorted(indexed_annotations) == sorted(matching_annotations_ids)\n\n @pytest.fixture(autouse=True)\n def matching_annotations_ids(self, factories):\n \"\"\"Annotations that're within the timeframe that we're reindexing.\"\"\"\n return [\n annotation.id\n for annotation in factories.Annotation.create_batch(\n 3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=3)\n )\n ]\n\n @pytest.fixture(autouse=True)\n def not_matching_annotations(self, factories):\n \"\"\"Annotations that're outside the timeframe that we're reindexing.\"\"\"\n before_annotations = factories.Annotation.build_batch(\n 3, updated=datetime.datetime.utcnow() - datetime.timedelta(days=14)\n )\n after_annotations = factories.Annotation.build_batch(\n 3, updated=datetime.datetime.utcnow() + datetime.timedelta(days=14)\n )\n return before_annotations + after_annotations\n\n\npytestmark = pytest.mark.usefixtures(\"settings_service\")\n\n\[email protected](autouse=True)\ndef BatchIndexer(patch):\n return patch(\"h.tasks.indexer.BatchIndexer\")\n\n\[email protected](autouse=True)\ndef celery(patch, pyramid_request):\n cel = patch(\"h.tasks.indexer.celery\")\n cel.request = pyramid_request\n return cel\n\n\[email protected](autouse=True)\ndef delete(patch):\n return patch(\"h.tasks.indexer.delete\")\n\n\[email protected](autouse=True)\ndef index(patch):\n return patch(\"h.tasks.indexer.index\")\n\n\[email protected]\ndef pyramid_request(pyramid_request):\n pyramid_request.es = mock.Mock()\n return pyramid_request\n\n\[email protected]\ndef settings_service(pyramid_config):\n service = FakeSettingsService()\n pyramid_config.register_service(service, name=\"settings\")\n return service\n\n\[email protected](autouse=True)\ndef storage(patch):\n return patch(\"h.tasks.indexer.storage\")\n", "id": "45399", "language": "Python", "matching_score": 3.197004556655884, "max_stars_count": 0, "path": "tests/h/tasks/indexer_test.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom h import models, storage\nfrom h.celery import celery, get_task_logger\nfrom h.models import Annotation\nfrom h.search.index import BatchIndexer, delete, index\n\nlog = get_task_logger(__name__)\n\n\[email protected]\ndef add_annotation(id_):\n annotation = storage.fetch_annotation(celery.request.db, id_)\n if annotation:\n index(celery.request.es, annotation, celery.request)\n\n # If a reindex is running at the moment, add annotation to the new index\n # as well.\n future_index = _current_reindex_new_name(celery.request, \"reindex.new_index\")\n if future_index is not None:\n index(\n celery.request.es, annotation, celery.request, target_index=future_index\n )\n\n if annotation.is_reply:\n add_annotation.delay(annotation.thread_root_id)\n\n\[email protected]\ndef delete_annotation(id_):\n delete(celery.request.es, id_)\n\n # If a reindex is running at the moment, delete annotation from the\n # new index as well.\n future_index = _current_reindex_new_name(celery.request, \"reindex.new_index\")\n if future_index is not None:\n delete(celery.request.es, id_, target_index=future_index)\n\n\[email protected]\ndef reindex_user_annotations(userid):\n ids = [\n a.id\n for a in celery.request.db.query(models.Annotation.id).filter_by(userid=userid)\n ]\n\n indexer = BatchIndexer(celery.request.db, celery.request.es, celery.request)\n errored = indexer.index(ids)\n if errored:\n log.warning(\"Failed to re-index annotations into ES6 %s\", errored)\n\n\[email protected]\ndef reindex_annotations_in_date_range(start_date, end_date, max_annotations=250000):\n \"\"\"Re-index annotations from Postgres to Elasticsearch in a date range.\n\n :param start_date: Begin at this time (greater or equal)\n :param end_date: End at this time (less than or equal)\n :param max_annotations: Maximum number of items to process overall\n\n \"\"\"\n log.info(f\"Re-indexing from {start_date} to {end_date}...\")\n\n indexer = BatchIndexer(celery.request.db, celery.request.es, celery.request)\n errored = indexer.index(\n annotation.id\n for annotation in celery.request.db.query(Annotation.id)\n .filter(Annotation.updated >= start_date)\n .filter(Annotation.updated <= end_date)\n .limit(max_annotations)\n )\n\n if errored:\n log.warning(\"Failed to re-index annotations into ES6 %s\", errored)\n\n log.info(\n \"Re-index from %s to %s complete.\", start_date, end_date,\n )\n\n\ndef _current_reindex_new_name(request, new_index_setting_name):\n settings = celery.request.find_service(name=\"settings\")\n new_index = settings.get(new_index_setting_name)\n\n return new_index\n", "id": "1055736", "language": "Python", "matching_score": 2.397664785385132, "max_stars_count": 0, "path": "h/tasks/indexer.py" }, { "content": "from dateutil.parser import isoparse\nfrom pyramid.view import view_config, view_defaults\n\nfrom h.tasks.indexer import reindex_annotations_in_date_range\n\n\n@view_defaults(\n route_name=\"admin.search\", permission=\"admin_search\",\n)\nclass SearchAdminViews:\n def __init__(self, request):\n self.request = request\n\n @view_config(\n request_method=\"GET\", renderer=\"h:templates/admin/search.html.jinja2\",\n )\n def get(self):\n return {}\n\n @view_config(\n request_method=\"POST\",\n request_param=\"reindex_date\",\n require_csrf=True,\n renderer=\"h:templates/admin/search.html.jinja2\",\n )\n def reindex_date(self):\n start_date = isoparse(self.request.params[\"start\"].strip())\n end_date = isoparse(self.request.params[\"end\"].strip())\n\n task = reindex_annotations_in_date_range.delay(start_date, end_date)\n self.request.session.flash(\n f\"Began reindexing from {start_date} to {end_date}\", \"success\"\n )\n\n return {\"indexing\": True, \"task_id\": task.id}\n", "id": "919285", "language": "Python", "matching_score": 3.160126209259033, "max_stars_count": 0, "path": "h/views/admin/search.py" }, { "content": "import datetime\n\nimport pytest\n\nfrom h.views.admin.search import SearchAdminViews\n\n\nclass TestSearchAdminViews:\n def test_get(self, views):\n assert views.get() == {}\n\n def test_reindex_date(\n self, views, reindex_annotations_in_date_range, pyramid_request\n ):\n pyramid_request.params = {\n \"start\": \"2020-09-09\",\n \"end\": \"2020-10-10\",\n }\n\n template_variables = views.reindex_date()\n\n reindex_annotations_in_date_range.delay.assert_called_once_with(\n datetime.datetime(2020, 9, 9, 0, 0), datetime.datetime(2020, 10, 10, 0, 0)\n )\n assert pyramid_request.session.peek_flash(\"success\") == [\n \"Began reindexing from 2020-09-09 00:00:00 to 2020-10-10 00:00:00\"\n ]\n assert template_variables == {\"indexing\": True, \"task_id\": 23}\n\n @pytest.fixture\n def views(self, pyramid_request):\n return SearchAdminViews(pyramid_request)\n\n\[email protected](autouse=True)\ndef reindex_annotations_in_date_range(patch):\n reindex_annotations_in_date_range = patch(\n \"h.views.admin.search.reindex_annotations_in_date_range\"\n )\n reindex_annotations_in_date_range.delay.return_value.id = 23\n return reindex_annotations_in_date_range\n", "id": "9222363", "language": "Python", "matching_score": 2.466956615447998, "max_stars_count": 0, "path": "tests/h/views/admin/search_test.py" }, { "content": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom unittest import mock\n\nimport pytest\nfrom pyramid import httpexceptions\nfrom webob.multidict import MultiDict\n\nfrom h.views.badge import Blocklist, badge\n\n\nclass TestBlocklist:\n @pytest.mark.parametrize(\"domain\", Blocklist.BLOCKED_DOMAINS)\n @pytest.mark.parametrize(\"prefix\", (\"http://\", \"https://\", \"httpx://\", \"//\"))\n @pytest.mark.parametrize(\"suffix\", (\"\", \"/\", \"/path?a=b\"))\n def test_it_blocks_bad_domains(self, domain, prefix, suffix):\n assert Blocklist.is_blocked(f\"{prefix}{domain}{suffix}\")\n\n @pytest.mark.parametrize(\"scheme\", Blocklist.BLOCKED_SCHEMES)\n @pytest.mark.parametrize(\"suffix\", (\"://about\", \"://newtab\"))\n def test_it_blocks_bad_schema(self, scheme, suffix):\n assert Blocklist.is_blocked(f\"{scheme}://{suffix}\")\n\n @pytest.mark.parametrize(\n \"acceptable_url\",\n (\n \"http://example.com/this/is/fine\",\n \"http://example.com//facebook.com\",\n \"http://facebook.com.om.nom\",\n \"file://c/my/magical_file.pdf\",\n \"chrome-extension://blah\",\n ),\n )\n def test_it_allows_non_blocked_items(self, acceptable_url):\n assert not Blocklist.is_blocked(acceptable_url)\n\n def test_regex_golden_master(self):\n\n # This is a golden master test intended to facilitate refactoring\n # It just states that the regex is what it last was, this allows you\n # to change how it's generated and test if you have changed what is\n # generated\n assert Blocklist._PATTERN.pattern == (\n r\"^(?:(?:chrome)://)|(?:(?:http[sx]?:)?//\"\n r\"(?:(?:facebook\\.com)|(?:www\\.facebook\\.com)|(?:mail\\.google\\.com))\"\n r\"(?:/|$))\"\n )\n\n def test_its_fast(self):\n # Check any modifications haven't made this significantly slower\n reps = 10000\n\n start = datetime.utcnow()\n for _ in range(reps):\n Blocklist.is_blocked(\"http://example.com/this/is/fine\")\n\n diff = datetime.utcnow() - start\n\n seconds = diff.seconds + diff.microseconds / 1000000\n calls_per_second = int(reps // seconds)\n\n # Handy to know while tinkering\n # print(\n # f\"Calls per second: {calls_per_second}, \"\n # f\"{1000000 / calls_per_second:.03f} μs/call\"\n # )\n\n # It should be above this number by quite a margin (20x), but we\n # don't want flaky tests\n assert calls_per_second > 50000\n\n\nclass TestBadge:\n def test_it_returns_0_if_blocked(\n self, badge_request, Blocklist, search_run,\n ):\n result = badge_request(\"http://example.com\", annotated=True, blocked=True)\n\n Blocklist.is_blocked.assert_called_with(\"http://example.com\")\n search_run.assert_not_called()\n assert result == {\"total\": 0}\n\n def test_it_sets_cache_headers_if_blocked(\n self, badge_request, Blocklist, pyramid_request\n ):\n badge_request(\"http://example.com\", annotated=True, blocked=True)\n\n cache_control = pyramid_request.response.cache_control\n\n assert cache_control.prevent_auto\n assert cache_control.public\n assert cache_control.max_age > 0\n\n def test_it_returns_0_if_uri_never_annotated(self, badge_request, search_run):\n result = badge_request(\"http://example.com\", annotated=False, blocked=False)\n\n search_run.assert_not_called()\n assert result == {\"total\": 0}\n\n def test_it_returns_number_from_search(self, badge_request, search_run):\n result = badge_request(\"http://example.com\", annotated=True, blocked=False)\n\n search_run.assert_called_once_with(\n MultiDict({\"uri\": \"http://example.com\", \"limit\": 0})\n )\n assert result == {\"total\": search_run.return_value.total}\n\n def test_it_raises_if_no_uri(self):\n with pytest.raises(httpexceptions.HTTPBadRequest):\n badge(mock.Mock(params={}))\n\n @pytest.fixture\n def badge_request(self, pyramid_request, factories, Blocklist):\n def caller(uri, annotated=True, blocked=False):\n if annotated:\n factories.DocumentURI(uri=uri)\n pyramid_request.db.flush()\n\n Blocklist.is_blocked.return_value = blocked\n\n pyramid_request.params[\"uri\"] = uri\n return badge(pyramid_request)\n\n return caller\n\n @pytest.fixture(autouse=True)\n def Blocklist(self, patch):\n return patch(\"h.views.badge.Blocklist\")\n\n @pytest.fixture(autouse=True)\n def search_run(self, patch):\n search_lib = patch(\"h.views.badge.search\")\n\n search_run = search_lib.Search.return_value.run\n search_run.return_value = mock.Mock(total=29)\n return search_run\n", "id": "7348548", "language": "Python", "matching_score": 1.4652386903762817, "max_stars_count": 0, "path": "tests/h/views/badge_test.py" } ]
1.889988
takaoh
[ { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nThis is ALL SENSOR use node.\nMainly echo sensor value in tarminal.\nPlease Use for your script base.\n\nby <NAME> @dashimaki360\n'''\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import Imu\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import JointState\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import String\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport numpy as np\nimport math\nimport random\nimport tf\nimport actionlib\nfrom move_base_msgs.msg import MoveBaseAction, MoveBaseGoal\nimport actionlib_msgs\nimport time\nimport json\nfrom std_msgs.msg import String\n\n#LiDARの閾値\nDETECT_POINT_NUM = 7\n\n#SEARCHの初期化時間\nSEARCH_INIT_TIME = 3\nclass MainState():\n \"\"\"\n 1. NAVIにてフィールドマーカを取得\n 2. 取得後,SEARCHを行い敵を捜索\n 2-1. 敵を見つけた場合\n ->相手がいる方向に回転(ATTACK)し,距離を走った(MOVE)後,手順2に遷移\n 2-2. 敵を見つけてない場合(一度でもATTACKになったことがない)\n 2-2-1. 手順1に遷移  \n 2-3. 敵を見つけてない場合(一度でもATTACKになったことがある)\n 2-3-1. 得点が負けている場合は手順1に遷移\n 2-3-1. 得点が勝っている場合は手順2に遷移\n \"\"\"\n INIT = 0 # 初期状態\n NAVI = 1 # Navigationによる移動\n SEARCH = 2 # 停止してLiDARによる検索\n ATTACK = 3 # 相手方向に回転\n DEFFENCE = 4 # Opencvによるディフェンス\n STOP = 5 # 停止(degug)\n MOVE = 6 # 相手方向に直進\n \n\nclass NaviTarget():\n \"\"\"\n 移動する先のフィールドのマーカをemunで定義している \n \"\"\"\n INIT = 0\n LEFTLOWER_S = 1 \n LEFTLOWER_N = 2\n LEFTUPPER_S = 3 \n LEFTUPPER_N = 4 \n CENTER_N = 5 \n CENTER_S = 6 \n CENTER_E = 7 \n CENTER_W = 8 \n RIGHTLOWER_S = 9 \n RIGHTLOWER_N = 10\n RIGHTUPPER_S = 11 \n RIGHTUPPER_N = 12\n\nclass AllSensorBot(object):\n def __init__(self, \n use_lidar=False, use_camera=False, use_imu=False,\n use_odom=False, use_joint_states=False):\n\n # velocity publisher\n self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)\n\n # State\n self.main_state = MainState.INIT # メイン状態\n self.prev_main_state = MainState.INIT # 前回メイン状態\n self.next_state = MainState.INIT # 次状態\n\n # Navigation\n self.navi_target = NaviTarget.INIT # 目指すターゲット\n self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)\n\n #Score\n self.myColor = None\n self.myScore = 0\n self.enemyColor = None\n self.enemyScore = None\n self.warState = None\n self.wartime = 0\n self.score_sub = rospy.Subscriber('/war_state', String, self.warStateCallback, queue_size=1)\n\n # lidar scan subscriber\n if use_lidar:\n self.scan = LaserScan()\n self.scanned = LaserScan()\n self.RadarRatio = 50\n self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)\n \n #LiDAR検知用グローバル変数\n self.npScanRanges = np.array(self.scan.ranges)\n self.npSubRanges = np.array(self.scan.ranges)\n\n # camera subscribver\n # please uncoment out if you use camera\n if use_camera:\n # for convert image topic to opencv obj\n self.img = None\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)\n\n # imu subscriber\n if use_imu:\n self.imu_sub = rospy.Subscriber('imu', Imu, self.imuCallback)\n\n # odom subscriber\n if use_odom:\n self.odom_sub = rospy.Subscriber('odom', Odometry, self.odomCallback)\n\n # joint_states subscriber\n if use_joint_states:\n self.odom_sub = rospy.Subscriber('joint_states', JointState, self.jointstateCallback)\n\n ####################################\n #MOVINGで使用する関数\n ####################################\n # Navigation\n def setGoal(self,x,y,yaw):\n self.client.wait_for_server()\n\n goal = MoveBaseGoal()\n goal.target_pose.header.frame_id = \"map\"\n goal.target_pose.header.stamp = rospy.Time.now()\n goal.target_pose.pose.position.x = x\n goal.target_pose.pose.position.y = y\n\n # Euler to Quartanion\n q=tf.transformations.quaternion_from_euler(0,0,yaw) \n goal.target_pose.pose.orientation.x = q[0]\n goal.target_pose.pose.orientation.y = q[1]\n goal.target_pose.pose.orientation.z = q[2]\n goal.target_pose.pose.orientation.w = q[3]\n\n self.client.send_goal(goal)\n wait = self.client.wait_for_result()\n if not wait:\n rospy.logerr(\"Action server not available!\")\n rospy.signal_shutdown(\"Action server not available!\")\n else:\n return self.client.get_result()\n\n\n ####################################\n #SEARCHで使用する関数\n ####################################\n def is_detect_ememy_by_LiDAR(self):\n \"\"\"\n LiDARで敵を検知したかを確認する\n ・閾値以上の距離が移動している点が3つ以上ある場合,検知したと判断する\n \n 戻り値 True:検知/False:未検知\n \"\"\"\n count = sum(self.npSubRanges)\n if count > DETECT_POINT_NUM:\n print(\"[Ture]Detect_EMEMY:%d \" % count)\n return True\n else:\n print(\"[Flase]Detect_EMEMY:%d \" % count)\n return False\n \n ####################################\n #得点管理\n ####################################\n #以下を参考にさせていただきました\n #https://raw.githubusercontent.com/rhc-ipponmanzoku/burger_war/master/burger_war/scripts/testRun.py\n def warStateCallback(self,data):\n warState = data\n jsonWarState = json.loads(warState.data)\n self.warState = jsonWarState[\"scores\"]\n\n # which team?\n if jsonWarState[\"players\"][\"r\"] == \"you\":\n self.myColor = \"r\"\n self.enemyColor = \"b\"\n else:\n self.myColor = \"b\"\n self.enemyColor = \"r\"\n\n #update myScore\n self.myScore = jsonWarState[\"scores\"][self.myColor]\n\n #update enemyScore\n self.enemyScore = jsonWarState[\"scores\"][self.enemyColor]\n\n #update war time\n self.wartime = jsonWarState[\"time\"]\n \n print('=================================')\n print('myScore: {0}'.format(self.myScore))\n print('enemyScore: {0}'.format(self.enemyScore))\n print('wartime: {0}'.format(self.wartime))\n print('=================================')\n\n ####################################\n #状態処理関数\n ####################################\n def func_state_init(self):\n self.next_state = MainState.NAVI\n return\n\n def func_state_navigation(self):\n \"\"\"\n 1. 順番にマーカを取得しに行く\n \"\"\"\n if self.navi_target == NaviTarget.INIT: \n self.navi_target = NaviTarget.LEFTLOWER_S\n return\n\n if self.navi_target == NaviTarget.LEFTLOWER_S: \n self.setGoal(-0.9,0.5,0)\n self.navi_target = NaviTarget.LEFTLOWER_N\n self.next_state = MainState.SEARCH\n return\n\n if self.navi_target == NaviTarget.LEFTLOWER_N:\n self.setGoal(-0.2,0.4,3.1415)\n self.navi_target = NaviTarget.CENTER_W\n self.next_state = MainState.SEARCH\n return\n\n if self.navi_target == NaviTarget.CENTER_W:\n self.setGoal(-0.2,0.4,-3.1415/2)\n self.navi_target = NaviTarget.LEFTUPPER_S\n self.next_state = MainState.SEARCH\n return\n\n if self.navi_target == NaviTarget.LEFTUPPER_S: \n self.setGoal(-0.2,0.4,0)\n self.navi_target = NaviTarget.RIGHTLOWER_N\n self.next_state = MainState.SEARCH\n return\n\n if self.navi_target == NaviTarget.RIGHTLOWER_N: \n self.setGoal(-0.2,-0.4,3.1415)\n self.navi_target = NaviTarget.CENTER_E\n self.next_state = MainState.SEARCH\n return\n\n if self.navi_target == NaviTarget.CENTER_E: \n self.setGoal(-0.2,-0.4,3.1415/2)\n self.navi_target = NaviTarget.RIGHTUPPER_S\n self.next_state = MainState.SEARCH\n return\n\n if self.navi_target == NaviTarget.RIGHTUPPER_S: \n self.setGoal(-0.2,-0.4,0)\n self.navi_target = NaviTarget.RIGHTLOWER_S\n self.next_state = MainState.SEARCH\n return\n\n if self.navi_target == NaviTarget.RIGHTLOWER_S:\n self.setGoal(-0.9,-0.5,0)\n #取得したいマーカをすべて獲得したので停止\n self.navi_target = NaviTarget.INIT\n self.next_state = MainState.SEARCH\n return\n \n def func_state_search(self):\n \"\"\"\n 1. WAITを入れて移動していたときのLiDARの取得情報を初期化\n 2. 閾値以上の距離が移動している点が3つ以上ある場合\n -> ATTACKモードに移行\n 閾値以上の距離が移動している点が3つ以上ない場合\n -> NAVIモードに移行 or SEARCHモードに移行\n \"\"\"\n \n time_count = 0\n\n #移動していたときのLiDARの取得情報を初期化\n while time_count <= SEARCH_INIT_TIME:\n \n # Radarの情報を取得\n if len(self.scan.ranges) != 0:\n bot.Radar()\n \n # 1秒Wait\n rospy.sleep(1)\n time_count = time_count + 1\n\n if self.is_detect_ememy_by_LiDAR():\n self.next_state = MainState.ATTACK\n return\n\n if self.prev_main_state == MainState.NAVI:\n self.next_state = MainState.NAVI\n else:\n if self.myScore > self.enemyScore:\n self.next_state = MainState.SEARCH\n else:\n self.next_state = MainState.NAVI\n\n def func_state_attack(self):\n \n #変数初期化\n npMaskedRanges = self.npScanRanges*self.npSubRanges\n total_angle = 0\n count = 0\n \n for i in range(len(npMaskedRanges)):\n if npMaskedRanges[i] != 0: \n total_angle = total_angle + i\n count = count + 1\n if count >= DETECT_POINT_NUM:\n break\n\n #出現数をカウント\n #count = sum(self.npSubRanges)\n #敵がいる角度の計算\n angle_deg = total_angle / count\n print(\"Ememy Position Angle(deg):%f \" % angle_deg)\n \n angle_rad = np.deg2rad(angle_deg) \n print(\"Ememy Position Angle(rad):%f \" % angle_rad)\n \n print \"**************\"\n print \"ATTACK!!!!!!!!!\"\n print \"**************\"\n\n #旋回開始\n twist = Twist()\n twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = angle_rad\n \n # publish twist topic\n self.vel_pub.publish(twist)\n self.next_state = MainState.STOP\n return\n\n def func_state_defence(self):\n return\n\n def func_state_stop(self):\n print \"**************\"\n print \"STOP!!!!!!!!!\"\n print \"**************\"\n twist = Twist()\n twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0\n # publish twist topic\n self.vel_pub.publish(twist)\n if self.prev_main_state == MainState.ATTACK:\n self.next_state = MainState.MOVE\n else:\n self.next_state = MainState.SEARCH\n return\n \n def func_state_moving(self):\n print \"**************\"\n print \"MOVING!!!!!!!!!\"\n print \"**************\"\n twist = Twist()\n twist.linear.x = 20; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0\n # publish twist topic\n self.vel_pub.publish(twist)\n\n self.next_state = MainState.STOP\n return\n\n def strategy(self):\n\n r = rospy.Rate(1)\n \n while not rospy.is_shutdown():\n # メイン状態処理を行う\n if self.main_state == MainState.INIT:\n # 初期化時\n self.func_state_init()\n elif self.main_state == MainState.NAVI:\n # 移動\n self.func_state_navigation()\n elif self.main_state == MainState.SEARCH:\n # 敵捜索\n self.func_state_search()\n elif self.main_state == MainState.ATTACK:\n # 敵に向けて回転\n self.func_state_attack()\n elif self.main_state == MainState.DEFFENCE:\n # 守り\n self.func_state_defence()\n elif self.main_state == MainState.STOP:\n # 停止\n self.func_state_stop()\n elif self.main_state == MainState.MOVE:\n # 敵に向けて全身\n self.func_state_moving()\n else:\n pass\n\n # DEBUG Print\n print('main_state = ',self.main_state)\n print('next_state = ',self.next_state)\n\n # メイン状態を次の状態に更新\n self.prev_main_state = self.main_state \n self.main_state = self.next_state\n # 1秒Wait\n r.sleep()\n\n def Radar(self):\n \"\"\"\n Radar map from LIDAR\n \"\"\"\n print \"Radar func\"\n if len(self.scanned.ranges) == 0:\n self.scanned.ranges = self.scan.ranges[:]\n self.npScanRanges = np.array(self.scan.ranges)\n npScannedRanges = np.array(self.scanned.ranges)\n\n #LiDAR検知用グローバル変数に変更\n self.npSubRanges = abs(self.npScanRanges - npScannedRanges)\n for i in range(len(self.npSubRanges)):\n if self.npSubRanges[i] < 0.15:\n self.npSubRanges[i] = 0\n else:\n self.npSubRanges[i] = 1\n \n npMaskedRanges = self.npScanRanges*self.npSubRanges\n\n \"\"\"\n if self.npSubRanges[i] != 0:\n print \"i=%d Range=%f\" %(i,self.npSubRanges[i])\n print self.npSubRanges\n \"\"\"\n \"\"\"\n Create blank image with 701x701[pixel]\n \"\"\"\n height = int(self.scan.range_max * self.RadarRatio * 2 + 1)\n width = int(self.scan.range_max * self.RadarRatio * 2 + 1)\n radar = np.ones((height,width,3),np.uint8)*40\n origin_x = int(self.scan.range_max * self.RadarRatio)\n origin_y = int(self.scan.range_max * self.RadarRatio)\n #radar.itemset((origin_x,origin_y,2),255)\n #radar[origin_x,origin_y] = [255,255,255]\n \n for n in range(0,width):\n radar.itemset((origin_y,n,2),255)\n radar.itemset((n,origin_x,2),255)\n \n \"\"\"\n for i in range(len(npMaskedRanges)):\n if npMaskedRanges[i] != 0:\n if i <= 90:\n ang = np.deg2rad(90 - i)\n x = origin_x - int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y - int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n elif i > 90 and i <= 180:\n ang = np.deg2rad(i - 90)\n x = origin_x - int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y + int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n elif i > 180 and i <= 270:\n ang = np.deg2rad(270 - i)\n x = origin_x + int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y + int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n elif i > 270 and i <= 359:\n ang = np.deg2rad(i - 270)\n x = origin_x + int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y - int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n #print \"ang:%f x:%d y:%d\" %(np.rad2deg(ang),x,y)\n radar.itemset((y,x,1),255)\n \"\"\"\n #cv2.imshow('Radar',radar)\n cv2.waitKey(1)\n self.scanned.ranges = self.scan.ranges[:]\n return\n\n # lidar scan topic call back sample\n # update lidar scan state\n def lidarCallback(self, data):\n self.scan = data\n #print self.scan.range_min\n #rospy.loginfo(self.scan)\n\n # camera image call back sample\n # comvert image topic to opencv object and show\n def imageCallback(self, data):\n try:\n self.img = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n rospy.logerr(e)\n\n cv2.imshow(\"Image window\", self.img)\n cv2.waitKey(1)\n\n # imu call back sample\n # update imu state\n def imuCallback(self, data):\n self.imu = data\n rospy.loginfo(self.imu)\n\n # odom call back sample\n # update odometry state\n def odomCallback(self, data):\n self.pose_x = data.pose.pose.position.x\n self.pose_y = data.pose.pose.position.y\n rospy.loginfo(\"odom pose_x: {}\".format(self.pose_x))\n rospy.loginfo(\"odom pose_y: {}\".format(self.pose_y))\n\n # jointstate call back sample\n # update joint state\n def jointstateCallback(self, data):\n self.wheel_rot_r = data.position[0]\n self.wheel_rot_l = data.position[1]\n rospy.loginfo(\"joint_state R: {}\".format(self.wheel_rot_r))\n rospy.loginfo(\"joint_state L: {}\".format(self.wheel_rot_l))\n\nif __name__ == '__main__':\n rospy.init_node('all_sensor_sample')\n bot = AllSensorBot(use_lidar=True, use_camera=False, use_imu=False,\n use_odom=False, use_joint_states=False)\n bot.strategy()\n\n\n", "id": "10915710", "language": "Python", "matching_score": 7.540803909301758, "max_stars_count": 1, "path": "burger_war_dev/scripts/main.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nThis is ALL SENSOR use node.\nMainly echo sensor value in tarminal.\nPlease Use for your script base.\n\nby <NAME> @dashimaki360\n'''\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import Imu\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import JointState\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import String\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport numpy as np\nimport math\n\n\nclass AllSensorBot(object):\n def __init__(self, \n use_lidar=False, use_camera=False, use_imu=False,\n use_odom=False, use_joint_states=False):\n\n # velocity publisher\n self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)\n\n # lidar scan subscriber\n if use_lidar:\n self.scan = LaserScan()\n self.scanned = LaserScan()\n self.RadarRatio = 50\n self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)\n\n # camera subscribver\n # please uncoment out if you use camera\n if use_camera:\n # for convert image topic to opencv obj\n self.img = None\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)\n\n # imu subscriber\n if use_imu:\n self.imu_sub = rospy.Subscriber('imu', Imu, self.imuCallback)\n\n # odom subscriber\n if use_odom:\n self.odom_sub = rospy.Subscriber('odom', Odometry, self.odomCallback)\n\n # joint_states subscriber\n if use_joint_states:\n self.odom_sub = rospy.Subscriber('joint_states', JointState, self.jointstateCallback)\n\n def strategy(self):\n '''\n calc Twist and publish cmd_vel topic\n '''\n r = rospy.Rate(1)\n\n while not rospy.is_shutdown():\n # update twist\n twist = Twist()\n twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0\n if len(self.scan.ranges) != 0:\n bot.Radar()\n # publish twist topic\n self.vel_pub.publish(twist)\n r.sleep()\n\n def Radar(self):\n \"\"\"\n Radar map from LIDAR\n \"\"\"\n print \"Radar func\"\n if len(self.scanned.ranges) == 0:\n self.scanned.ranges = self.scan.ranges[:]\n npScanRanges = np.array(self.scan.ranges)\n npScannedRanges = np.array(self.scanned.ranges)\n npSubRanges = abs(npScanRanges - npScannedRanges)\n for i in range(len(npSubRanges)):\n if npSubRanges[i] < 0.15:\n npSubRanges[i] = 0\n else:\n npSubRanges[i] = 1\n npMaskedRanges = npScanRanges*npSubRanges\n \"\"\"\n if npSubRanges[i] != 0:\n print \"i=%d Range=%f\" %(i,npSubRanges[i])\n print npSubRanges\n \"\"\"\n \"\"\"\n Create blank image with 701x701[pixel]\n \"\"\"\n height = int(self.scan.range_max * self.RadarRatio * 2 + 1)\n width = int(self.scan.range_max * self.RadarRatio * 2 + 1)\n radar = np.ones((height,width,3),np.uint8)*40\n origin_x = int(self.scan.range_max * self.RadarRatio)\n origin_y = int(self.scan.range_max * self.RadarRatio)\n #radar.itemset((origin_x,origin_y,2),255)\n #radar[origin_x,origin_y] = [255,255,255]\n \n for n in range(0,width):\n radar.itemset((origin_y,n,2),255)\n radar.itemset((n,origin_x,2),255)\n \n \n for i in range(len(npMaskedRanges)):\n if npMaskedRanges[i] != 0:\n if i <= 90:\n ang = np.deg2rad(90 - i)\n x = origin_x - int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y - int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n elif i > 90 and i <= 180:\n ang = np.deg2rad(i - 90)\n x = origin_x - int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y + int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n elif i > 180 and i <= 270:\n ang = np.deg2rad(270 - i)\n x = origin_x + int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y + int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n elif i > 270 and i <= 359:\n ang = np.deg2rad(i - 270)\n x = origin_x + int(self.RadarRatio * npMaskedRanges[i] * math.cos(ang))\n y = origin_y - int(self.RadarRatio * npMaskedRanges[i] * math.sin(ang))\n print \"i:%d ang:%f x:%d y:%d range:%f\" %(i, np.rad2deg(ang),x,y,npMaskedRanges[i])\n #print \"ang:%f x:%d y:%d\" %(np.rad2deg(ang),x,y)\n radar.itemset((y,x,1),255)\n \n cv2.imshow('Radar',radar)\n cv2.waitKey(1)\n self.scanned.ranges = self.scan.ranges[:]\n return\n\n # lidar scan topic call back sample\n # update lidar scan state\n def lidarCallback(self, data):\n self.scan = data\n #print self.scan.range_min\n #rospy.loginfo(self.scan)\n\n # camera image call back sample\n # comvert image topic to opencv object and show\n def imageCallback(self, data):\n try:\n self.img = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n rospy.logerr(e)\n\n cv2.imshow(\"Image window\", self.img)\n cv2.waitKey(1)\n\n # imu call back sample\n # update imu state\n def imuCallback(self, data):\n self.imu = data\n rospy.loginfo(self.imu)\n\n # odom call back sample\n # update odometry state\n def odomCallback(self, data):\n self.pose_x = data.pose.pose.position.x\n self.pose_y = data.pose.pose.position.y\n rospy.loginfo(\"odom pose_x: {}\".format(self.pose_x))\n rospy.loginfo(\"odom pose_y: {}\".format(self.pose_y))\n\n # jointstate call back sample\n # update joint state\n def jointstateCallback(self, data):\n self.wheel_rot_r = data.position[0]\n self.wheel_rot_l = data.position[1]\n rospy.loginfo(\"joint_state R: {}\".format(self.wheel_rot_r))\n rospy.loginfo(\"joint_state L: {}\".format(self.wheel_rot_l))\n\nif __name__ == '__main__':\n rospy.init_node('all_sensor_sample')\n bot = AllSensorBot(use_lidar=True, use_camera=False, use_imu=False,\n use_odom=False, use_joint_states=False)\n bot.strategy()\n\n\n", "id": "8237402", "language": "Python", "matching_score": 6.638664245605469, "max_stars_count": 1, "path": "burger_war_dev/scripts/Radar.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nThis is ALL SENSOR use node.\nMainly echo sensor value in tarminal.\nPlease Use for your script base.\n\nby <NAME> @dashimaki360\n'''\n\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Image\nfrom sensor_msgs.msg import Imu\nfrom sensor_msgs.msg import LaserScan\nfrom sensor_msgs.msg import JointState\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import String\nfrom cv_bridge import CvBridge, CvBridgeError\nimport cv2\nimport numpy as np\n\nclass AllSensorBot(object):\n def __init__(self, \n use_lidar=False, use_camera=False, use_imu=False,\n use_odom=False, use_joint_states=False):\n\n # velocity publisher\n self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)\n\n # lidar scan subscriber\n if use_lidar:\n self.scan = LaserScan()\n self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)\n\n # camera subscribver\n # please uncoment out if you use camera\n if use_camera:\n # for convert image topic to opencv obj\n self.img1 = None\n self.img2 = None\n self.img3 = None\n self.bridge = CvBridge()\n self.image_sub = rospy.Subscriber('image_raw', Image, self.imageCallback)\n\n # imu subscriber\n if use_imu:\n self.imu_sub = rospy.Subscriber('imu', Imu, self.imuCallback)\n\n # odom subscriber\n if use_odom:\n self.odom_sub = rospy.Subscriber('odom', Odometry, self.odomCallback)\n\n # joint_states subscriber\n if use_joint_states:\n self.odom_sub = rospy.Subscriber('joint_states', JointState, self.jointstateCallback)\n\n def strategy(self):\n '''\n calc Twist and publish cmd_vel topic\n '''\n r = rospy.Rate(1)\n\n while not rospy.is_shutdown():\n # update twist\n twist = Twist()\n twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0\n twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = 0.0\n\n # publish twist topic\n self.vel_pub.publish(twist)\n if self.img3 is not None:\n bot.Move_Track()\n r.sleep()\n\n\n # lidar scan topic call back sample\n # update lidar scan state\n def lidarCallback(self, data):\n self.scan = data\n rospy.loginfo(self.scan)\n\n # camera image call back sample\n # comvert image topic to opencv object and show\n def imageCallback(self, data):\n # Temp. to get 2 images.\n try:\n self.img3 = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n \n except CvBridgeError as e:\n rospy.logerr(e)\n\n def Move_Track(self):\n if self.img2 is None:\n self.img2 = self.img3\n if self.img1 is None:\n self.img1 = self.img2\n\n\n # make diff image\n gray1 = cv2.cvtColor(self.img1, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(self.img2, cv2.COLOR_BGR2GRAY)\n gray3 = cv2.cvtColor(self.img3, cv2.COLOR_BGR2GRAY)\n diff1 = cv2.absdiff(gray2, gray1)\n diff2 = cv2.absdiff(gray3, gray2)\n # get and image\n #im = cv2.bitwise_and(diff1,diff2)\n # apply thresh\n diff1_th = cv2.threshold(diff1, 60, 255, cv2.THRESH_BINARY)[1]\n diff2_th = cv2.threshold(diff2, 60, 255, cv2.THRESH_BINARY)[1]\n diff_and = cv2.bitwise_and(diff1_th,diff2_th)\n\n contours1, hierarchy1 = cv2.findContours(diff1_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1:]\n contours2, hierarchy2 = cv2.findContours(diff2_th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1:]\n contours_and, hierarchy_and = cv2.findContours(diff_and, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1:]\n\n contours1 = list(filter(lambda x: cv2.contourArea(x) > 200, contours1))\n contours2 = list(filter(lambda x: cv2.contourArea(x) > 200, contours2))\n contours_and = list(filter(lambda x: cv2.contourArea(x) > 200, contours_and))\n #print(\"Contorus Num: \" + str(len(contours1)) + str(len(contours2)))\n #print(\"Contorus Num: \" + str(len(contours_and)))\n big_inx = 0 \n big_area = 0\n big_area_found = False\n for x in range(0, len(contours_and)):\n if len(contours_and[x]>0):\n cont_area = cv2.contourArea(contours_and[x])\n #print('x=%d area=%d' %(x,cont_area))\n if cont_area > big_area:\n big_area = cont_area\n big_inx = x\n big_area_found = True\n #print('big_inx=%d big_area=%d' %(big_inx,big_area))\n if big_area_found == True: \n rect_and = contours_and[big_inx]\n x,y,w,h = cv2.boundingRect(rect_and)\n #print(\"x=%x,y=%d,w=%d,h=%d\" %(x,y,w,h))\n cv2.rectangle(self.img2,(x,y),(x+w,y+h),(0,255,0),5)\n\n cv2.imshow(\"Image window1\", self.img2)\n #cv2.imshow(\"Image window2\", self.img3)\n\n #cv2.imshow(\"Diff Image window1\", diff1_th)\n #cv2.imshow(\"Diff Image window2\", diff2_th)\n #cv2.imshow(\"Diff Image window1\", diff_and)\n cv2.waitKey(1)\n self.img1 = self.img2\n self.img2 = self.img3\n\n # imu call back sample\n # update imu state\n def imuCallback(self, data):\n self.imu = data\n rospy.loginfo(self.imu)\n\n # odom call back sample\n # update odometry state\n def odomCallback(self, data):\n self.pose_x = data.pose.pose.position.x\n self.pose_y = data.pose.pose.position.y\n rospy.loginfo(\"odom pose_x: {}\".format(self.pose_x))\n rospy.loginfo(\"odom pose_y: {}\".format(self.pose_y))\n\n # jointstate call back sample\n # update joint state\n def jointstateCallback(self, data):\n self.wheel_rot_r = data.position[0]\n self.wheel_rot_l = data.position[1]\n rospy.loginfo(\"joint_state R: {}\".format(self.wheel_rot_r))\n rospy.loginfo(\"joint_state L: {}\".format(self.wheel_rot_l))\n\nif __name__ == '__main__':\n rospy.init_node('all_sensor_sample')\n bot = AllSensorBot(use_lidar=False, use_camera=True, use_imu=False,\n use_odom=False, use_joint_states=False)\n bot.strategy()\n\n\n", "id": "7753387", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "burger_war_dev/scripts/MoveTracking.py" }, { "content": "#make test file\n", "id": "7204837", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "burger_war_dev/scripts/nakamura_test.py" } ]
3.319332
gracesco
[ { "content": "import datetime as dt\nimport numpy as np\nimport pandas as pd\nimport os\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n# Database setup\nengine = create_engine(os.path.join(\"sqlite:///Resources/hawaii.sqlite\"))\n# reflect \nBase = automap_base()\nBase.prepare(engine, reflect=True)\n# Save reference to the table\nMeasurement = Base.classes.measurement \nStation = Base.classes.station\n\n# Flask setup\napp = Flask(__name__)\n\n# Flask Routes\[email protected](\"/\")\ndef welcome():\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/start/START_DATE(YYYY-MM-DD)<br/>\"\n f\"/api/v1.0/start/end/START_DATE(YYYY-MM-DD)/END_DATE(YYYY-MM-DD)\"\n )\n\n# Precipitation Route\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).all()\n session.close()\n\n prcp = {}\n for row in results:\n if row[0] in prcp:\n prcp[row[0]].append(row[1])\n else:\n prcp[row[0]]=[row[1]]\n\n return jsonify(prcp)\n\n# Stations Route\[email protected](\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n results = session.query(Station.name, Station.station).all()\n session.close()\n\n station_dict={}\n for row in results:\n station_dict[row[0]]=row[1]\n\n return jsonify(station_dict)\n\n# tobs Route\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n session = Session(engine)\n results = session.query(Measurement.tobs).filter(Measurement.date <= '2017-08-23').filter(Measurement.date >= '2016-08-23').all()\n session.close()\n\n tobs_12_mos = list(np.ravel(results))\n return jsonify(tobs_12_mos)\n\n# Start Date Route\[email protected](\"/api/v1.0/start/<start_date>\")\ndef start(start_date):\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start_date).all()\n session.close()\n\n return jsonify(results)\n\n# Start and End Date Route\[email protected](\"/api/v1.0/start/end/<start_date>/<end_date>\")\ndef start_end(start_date, end_date):\n session = Session(engine)\n results = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n session.close()\n\n return jsonify(results)\n\nif __name__ == \"__main__\":\n app.run(debug=True)\nprint ('works')", "id": "3104700", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Resources/app.py" }, { "content": "# OpenWeatherMap API Key\nweather_api_key = \"YOUR API KEY HERE!\"\n\n# Google API Key\ng_key = \"YOUR API Key HERE! \"\n", "id": "8432284", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "starter_code/api_keys.py" } ]
0
techmod
[ { "content": "adad=int(input())\nprint(int((str(adad % 10)+str((adad//10) % 10)+str(adad // 100)))*2)\n", "id": "3991589", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-1/Inverse.py" }, { "content": "adad=input()\nadad=adad.split()\nfor i in range(len(adad)):\n adad[i]=float(adad[i])\nprint(int(max(adad)-min(adad)))\n\n", "id": "7126274", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-3/Nowruz.py" }, { "content": "adad=int(input())\nadad2=10 - (adad % 10)\nprint(adad2+adad)", "id": "5298209", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-1/Multiple.py" }, { "content": "adad_aval=int(input())\nadad_dovom=int(input())\nif adad_aval>=adad_dovom:\n print(adad_aval)\nelse:\n print(adad_dovom)\n", "id": "11384350", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter-1/Larger.py" }, { "content": "tedad=int(input())\nsherkat_karde= input()\nsherkat_karde=sherkat_karde.split()\nfor i in range(len(sherkat_karde)):\n sherkat_karde[i]=int(sherkat_karde[i])\nsherkat_karde=[i for i in sherkat_karde if i<=2]\nsherkat_karde.sort()\nsherkat_konande=sherkat_karde[:3]\nghabel_sherkat_kardan=int(len(sherkat_karde)/3)\nprint(ghabel_sherkat_kardan)\n\n", "id": "7043839", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-3/Kabaddi.py" }, { "content": "import math\nnumber=int(input())\ntedad=0\nlist1=[]\nwhile tedad != number :\n x=int(input())\n x=abs(x)\n tedad +=1\n a=math.sqrt(x)\n b=('%.15f'%a)\n list1.append(b[:-11])\nfor i in range (len(list1)):\n print(list1[i])", "id": "5411400", "language": "Python", "matching_score": 0.2959790527820587, "max_stars_count": 0, "path": "chapter-4/Square.py" }, { "content": "my_input=int(input())\ntedad=0\nclass_list = dict()\nwhile tedad != my_input:\n data = input()\n tedad +=1\n temp = data.split(' ')\n class_list[temp[0]] = (temp[1])\n\n\njomle = input()\nlst = jomle.split()\nj = ''\n\nfor i in lst:\n if i in class_list:\n j = j + class_list.get(i) + ' '\n else:\n j = j + i + ' '\n\nprint(j)", "id": "5143600", "language": "Python", "matching_score": 1.569983959197998, "max_stars_count": 0, "path": "chapter-3/Translator.py" }, { "content": "import collections\nmy_input=int(input())\ntedad=0\nara={}\nvorodi=[]\nwhile tedad!=my_input:\n y=input()\n tedad+=1\n vorodi.append(y)\nfor letter in vorodi:\n if letter in ara:\n ara[letter]+=1\n else:\n ara[letter]=1\nfor letter in sorted(ara):\n print(letter,ara[letter])", "id": "10672806", "language": "Python", "matching_score": 0.969979465007782, "max_stars_count": 0, "path": "chapter-3/Vote.py" }, { "content": "input_str = input()\nif 'AB' in input_str and 'BA' in input_str:\n x = input_str.index('AB')\n y = input_str.index('BA')\n if abs(y - x) >= 2:\n print('YES')\n elif input_str=='ABABAB':\n print('YES')\n else:\n print('NO')\nelse:\n print('NO')", "id": "4335180", "language": "Python", "matching_score": 1.4142135381698608, "max_stars_count": 0, "path": "chapter-3/Substring.py" }, { "content": "inpt=input()\nh1=inpt.find('h')\nh2=inpt.find('e',h1+1)\nh3=inpt.find('l',h2+1)\nh4=inpt.find('l',h3+1)\nh5=inpt.find('o',h4+1)\nTF=h1>=0 and h2>h1 and h3>h2 and h4>h3 and h5>h4\nif TF:\n print('YES')\nelse:\n print('NO')", "id": "1606848", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter-3/Greets.py" }, { "content": "num=input()\nnum2=int(num)\nlst=[]\nfor i in range(num2):\n lap = list(map(int, input().split()))\n lst.append(lap)\n\nminimumesh = (min(lst))\nmaximumesh = (max(lst))\nif minimumesh[-1] >= maximumesh[-1]:\n print('happy irsa')\nelse:\n print('poor irsa')\n\n", "id": "3898085", "language": "Python", "matching_score": 1.166439175605774, "max_stars_count": 0, "path": "chapter-3/Laptops.py" }, { "content": "num=0\nlst = []\nsecond=[]\nwhile num != -1:\n num = int(input())\n lst.append(num)\nls2=lst.copy()\nls2.remove(max(ls2))\nprint(max(lst),max(ls2))\n\n\n", "id": "10634502", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-2/Candidate.py" }, { "content": "wins = 0\ntotal = 0\nmosavi=0\nfor i in range(30):\n num = int(input())\n if num == 3:\n wins += 1\n total += num\n elif num == 1:\n mosavi += 1\n total += num\nprint(total, wins)\n", "id": "6121040", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-2/Points.py" }, { "content": "num=0\nbozorg=0\nwhile num!=-1:\n num = int(input())\n if num>bozorg:\n bozorg=num\nprint(bozorg)\n\n", "id": "7070911", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-2/Parliament.py" }, { "content": "num = int(input())\nif num > 1:\n for i in range(2, num):\n if (num % i) == 0:\n print('not prime')\n break\n else:\n print('prime')\nelse:\n print('not prime')\n", "id": "4495501", "language": "Python", "matching_score": 0.4449537396430969, "max_stars_count": 0, "path": "chapter-2/Prime.py" }, { "content": "#def divisor is from:\n#https://www.w3resource.com/python-exercises/basic/python-basic-1-exercise-24.php\n\n\ndef divisor(n):\n for i in range(n):\n x = len([i for i in range(1, n+1) if not n % i])\n return x\n\n\nnums = []\ni = 0\nwhile i < 20:\n preNum = int(input())\n if(preNum > 0):\n nums.append([divisor(preNum), preNum])\n i += 1\n \n\nnums.sort()\n\nf=nums[len(nums)-1]\nx=f.copy()\ny = (x[::-1])\nprint(*y, sep=\" \")\n\n", "id": "65367", "language": "Python", "matching_score": 0.9778176546096802, "max_stars_count": 0, "path": "chapter-2/Divisor.py" }, { "content": "let = (str(input()))\nnum_1=let.count('1')\nnum_2=let.count('2')\nnum_3=let.count('3')\nnum_1=num_1*(\"1+\")\nnum_2=num_2*(\"2+\")\nnum_3=num_3*(\"3+\")\nprint(num_1+num_2+num_3[:-1])\n\n", "id": "10937760", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-3/Sum.py" }, { "content": "let = (str(input())).lower()\nlet = let.replace(\"a\", '', -1)\nlet = let.replace(\"i\", '', -1)\nlet = let.replace(\"e\", '', -1)\nlet = let.replace(\"o\", '', -1)\nlet = let.replace(\"u\", '', -1)\nlet = let.replace('', '.')\nlet=let[:-1]\nprint(let)\n", "id": "11170617", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter-3/Strings.py" }, { "content": "import random\na=1\nb=99\nhads=random.randint(a,b)\nprint(hads)\njavab=str(input('please enter your javab: '))\nwhile javab!='d':\n if javab=='b':\n a=hads\n hads = random.randint(a, b)\n print(hads)\n javab = str(input('please enter your javab: '))\n elif javab =='k':\n b=hads\n hads = random.randint(a, b)\n print(hads)\n javab = str(input('please enter your javab: '))\n\n", "id": "4336538", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter-2/game.py" }, { "content": "word=str(input())\nupper=0\nlower=0\nfor i in word:\n if i.isupper():\n upper=upper+1\n elif i.islower():\n lower=lower+1\nif upper>lower:\n print(word.upper())\nelse:\n print(word.lower())", "id": "1131956", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter-3/Lowercase.py" }, { "content": "first=int(input())\nsecond=int(input())\nprint(first*second)\n", "id": "1655876", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter-1/Rectangle.py" }, { "content": "age=int(input())\nif age>0 and age<6:\n print('khordsal')\nelif age>=6 and age<10:\n print('koodak')\nelif age>=10 and age<14:\n print('nojavan')\nelif age>=14 and age<24:\n print('javan')\nelif age>=24 and age<40:\n print('bozorgsal')\nelif age>=40:\n print('miansal')\n", "id": "2422894", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "chapter-1/Determine.py" }, { "content": "import pandas as pd\nimport numpy as np\nimport requests\nimport subprocess\nimport os\nimport json\n\npost_url = \"http://192.168.115.153:8000/api/news/\"\nheaders = {'Content-type': 'application/json'}\n##################### delete previous new_files\nfiles_in_directory = os.listdir('C:/telegram-messages-dump-master/new_NEWS')\nfiltered_files = [file for file in files_in_directory if file.endswith(\".csv\")]\nfor file in filtered_files:\n\tpath_to_file = os.path.join('C:/telegram-messages-dump-master/new_NEWS', file)\n\tos.remove(path_to_file)\n##########################recieve manifest###########################\nurl='http://192.168.115.153:8000/api/manifest/'\nr=requests.get(url)\ndata = r.json()\nprint(data['count'])\nt=data['count']\nprint(t)\nl = data['results'][t-1]['resources']\ntelegarm_channel=[d['name'] for d in l if 'name' in d]\nrecive_manifest=[d['name'] for d in l if 'name' in d]\nprint(telegarm_channel)\n######################################################################\n############################\n\ntelegram_query=[]\ntelegram_query_1=[]\nfor i in recive_manifest:\n #print(\";telegram-messages-dump -p 98910******* -o C:\"+\"\\\\\"+\"telegram-messages-dump-master\\\\{}.csv --continue\".format(i))\n \n telegram_query.append(\";telegram-messages-dump -p 98910******* -o C:\"+\"\\\\\"+\"telegram-messages-dump-master\\\\update\\\\{}.csv --continue\".format(i)) #rename phone number!!!\ntelegram_query_1=''.join(telegram_query) \nprint(telegram_query_1)\nprint(len(telegram_query))\n\n\n#text_file = open(\"/home/armin/Documents/update.ps1\", \"w\")######## please modify aderss\ntext_file = open(\"C:/telegram-messages-dump-master/update.ps1\", \"w\")\ntext_file.write(telegram_query_1)\ntext_file.close()\ntelegram_query.clear()\n\n#crawler batch file\n#subprocess.call([r'/home/armin/Documents/update.bat'])\nsubprocess.call([r'C:/telegram-messages-dump-master/update.bat'])######## please modify aderss\n\n\n################################recieve resources############\nurl='http://192.168.115.153:8000/api/resource/'\nr=requests.get(url)\nresources= r.json()\nt=len(resources['results'])\n\nresource_name=[]\nfor j in range(len(resources['results'])):\n resource_name.append(resources['results'][j]['name'])\nprint(resource_name)\n###############################################################\n\n######################################read base csv########################\nfor i in range(len(telegarm_channel)):\n print(i)\n print(telegarm_channel)\n resource_name[i]= pd.read_csv (r'C:/telegram-messages-dump-master/base/{}.csv'.format(telegarm_channel[i]))#first csv crawled ######## please modify aderss\n\nprint(resource_name[0])\n\n\n########################################## read updated csv ################\nfor i in range(len(telegarm_channel)):\n print(i)\n print(telegarm_channel[i])\n telegarm_channel[i]= pd.read_csv (r'C:/telegram-messages-dump-master/update/{}.csv'.format(telegarm_channel[i])) ######## please modify aderss\n\nprint(telegarm_channel[0])\ndf1=pd.DataFrame()\nfor i in range(len(telegarm_channel)):\n number_of_new_News=(len(telegarm_channel[i].iloc[:,0].values.tolist()))-(len(resource_name[i].iloc[:,0].values.tolist()))\n platform=[]\n platform_name='تلگرام'\n for j in range(number_of_new_News):\n platform.append(platform_name)\n \n link_number=telegarm_channel[i].iloc[:,0].values.tolist()[len(resource_name[i].iloc[:,0].values.tolist()):len(telegarm_channel[i].iloc[:,0].values.tolist())]\n \n News_link=[]\n for k in range(number_of_new_News):\n News_link.append('https://t.me/'+str(recive_manifest[i])+'/'+str(link_number[k])) \n\n json_file = {'Platform':platform,'Message_Link':News_link,'Message_Time':telegarm_channel[i].iloc[:,1].values.tolist()[len(resource_name[i].iloc[:,0].values.tolist()):len(telegarm_channel[i].iloc[:,0].values.tolist())],\n 'Sender_Name':telegarm_channel[i].iloc[:,2].values.tolist()[len(resource_name[i].iloc[:,0].values.tolist()):len(telegarm_channel[i].iloc[:,0].values.tolist())],\n 'Message':telegarm_channel[i].iloc[:,4].values.tolist()[len(resource_name[i].iloc[:,0].values.tolist()):len(telegarm_channel[i].iloc[:,0].values.tolist())]}\n df=pd.DataFrame(json_file)\n pd.DataFrame(json_file).to_csv('C:/telegram-messages-dump-master/new_NEWS/{}.csv'.format(recive_manifest[i]),index=0) ######## please modify aderss\n telegarm_channel[i].to_csv('C:/telegram-messages-dump-master/base/{}.csv'.format(recive_manifest[i]),index=0) ######## please modify aderss\n df1=pd.concat([df1,df])\n print(number_of_new_News)\ndf1.to_csv('C:/telegram-messages-dump-master/new_NEWS/append_News.csv',index=0)#####added\n\n#print(number_of_new_News)\n#print(len(telegarm_channel))\n \n #News_link_number=telegarm_channel[i].iloc[:,0].values.tolist()[len(resource_name[i].iloc[:,0].values.tolist()):len(telegarm_channel[i].iloc[:,0].values.tolist())\n \nfor i in range(len(df1.iloc[:,0])):\n data={'Platform':df1.iloc[i,0],'Message_Link':df1.iloc[i,1],'Message_Time':df1.iloc[i,2],'Sender_Name':df1.iloc[i,3],'Message':df1.iloc[i,4]}\n print(df1.iloc[i,4])\n r = requests.post(post_url, data=json.dumps(data), headers=headers)", "id": "2679285", "language": "Python", "matching_score": 1.3830901384353638, "max_stars_count": 1, "path": "shenasa.py" }, { "content": "from lxml import html #\nimport requests #\nimport re #for filtering digits\nimport arabic_reshaper #pip install arabic_reshaper \nfrom bidi.algorithm import get_display #pip install python-bidi\nimport csv\n\n\n#page id\nsample_id='omidtvclub'\n\n#scrape page\npage = requests.get('http://sapp.ir/'+sample_id)\ntree = html.fromstring(page.text)\nname = tree.xpath('/html/body/div/div/div[1]/h1')\nfollower = tree.xpath('//html/body/div/div/div[1]/h4')\n\n#encode and decode to utf-8\nname_ascii = name[0].text\nname_encode = name_ascii.encode('utf-8')\nname_decode = name_encode.decode(\"utf-8\")\n\n#reshape persian text for viewing in editor\nreshaped_text = arabic_reshaper.reshape(name_decode)\npersian_name = get_display(reshaped_text)\nprint(persian_name)\n\n#encode and decode to utf-8\nfollower_ascii = follower[0].text\nfollwer_encode = follower_ascii.encode('utf-8')\nfollwer_decode = follwer_encode.decode(\"utf-8\")\n\n#filter digits\nfollowers=int(re.sub('\\D', '', follwer_decode))\nprint(followers)\n\n\nscraped_data = [name_decode,followers]\nprint(scraped_data)\nwith open('%s-channel.csv'%(sample_id), 'w',encoding='utf-8-sig', errors='replace') as myfile:\n wr = csv.writer(myfile)\n wr.writerow(scraped_data)\n", "id": "371934", "language": "Python", "matching_score": 0.6810032725334167, "max_stars_count": 0, "path": "soroush.py" }, { "content": "from statistics import mean\nfrom collections import OrderedDict\nimport collections\nimport numpy as np\nimport csv\n#input_file_name=('C://source.csv')\n#output_file_name=('D://1.csv')\n\ndef calculate_averages(input_file_name, output_file_name):\n mydict = OrderedDict()\n rows=[]\n with open(input_file_name, mode='r') as input_file_name1:\n reader = csv.reader(input_file_name1)\n mydict = {rows[0]: rows[1:] for rows in reader}\n \n #print(mydict.values())\n mydict = dict((k, [int(s) for s in v]) for k,v in mydict.items())\n #print(mydict)\n with open(output_file_name, 'w', newline='') as output_file_name1:\n writer = csv.writer(output_file_name1,dialect='excel',delimiter =' ')\n for st,vals in mydict.items():\n row=(\"{},{}\".format(st,mean(vals)))\n #writer.writerows([row])\n print(row)\n writer.writerows([i.strip().split(' ') for i in [row]])\n \n\n#calculate_averages('C://source.csv','D://1.csv') \n\n\ndef calculate_sorted_averages(input_file_name, output_file_name):\n mydict = OrderedDict()\n rows=[]\n with open(input_file_name, mode='r') as input_file_name11:\n reader = csv.reader(input_file_name11)\n mydict = {rows[0]: rows[1:] for rows in reader}\n #print(mydict.values())\n mydict = dict((k, [int(s) for s in v]) for k,v in mydict.items())\n #print(mydict) \n with open(output_file_name, 'w', newline='') as output_file_name11:\n writer = csv.writer(output_file_name11,dialect='excel',delimiter =' ') \n od = collections.OrderedDict(sorted(mydict.items()))\n student_averages = ((sum(scores) / len(scores), s) for s, scores in od.items())\n for average, student in sorted(student_averages, reverse=False):\n row=(\"{},{}\".format(student,average))\n writer.writerows([i.strip().split(' ') for i in [row]])\n\n \n#calculate_sorted_averages('C://source.csv','D://1.csv') \n\ndef calculate_three_best(input_file_name, output_file_name):\n mydict = OrderedDict()\n rows=[]\n with open(input_file_name, mode='r') as input_file_name111:\n reader = csv.reader(input_file_name111)\n mydict = {rows[0]: rows[1:] for rows in reader}\n #print(mydict.values())\n mydict = dict((k, [int(s) for s in v]) for k,v in mydict.items())\n #print(mydict)\n with open(output_file_name, 'w', newline='') as output_file_name111:\n writer = csv.writer(output_file_name111,dialect='excel',delimiter =' ')\n od2 = collections.OrderedDict(sorted(mydict.items()))\n student_averages = ((sum(scores) / len(scores), s) for s, scores in od2.items())\n student_averages=sorted(student_averages, reverse=True)\n student_averages=student_averages[:3]\n for average, student in student_averages:\n row=(\"{},{}\".format(student,average))\n writer.writerows([i.strip().split(' ') for i in [row]])\n \n#calculate_three_best('C://source.csv','D://1.csv')\n\ndef calculate_three_worst(input_file_name, output_file_name):\n mydict = OrderedDict()\n rows=[]\n with open(input_file_name, mode='r') as input_file_name1111:\n reader = csv.reader(input_file_name1111)\n mydict = {rows[0]: rows[1:] for rows in reader}\n #print(mydict.values())\n mydict = dict((k, [int(s) for s in v]) for k,v in mydict.items())\n #print(mydict)\n with open(output_file_name, 'w', newline='') as output_file_name1111:\n writer = csv.writer(output_file_name1111,dialect='excel',delimiter =' ')\n student_averages = ((sum(scores) / len(scores), s) for s, scores in mydict.items())\n student_averages=sorted(student_averages, reverse=False)\n student_averages=student_averages[:3]\n for average, student in student_averages:\n row=(\"{}\".format(average)) \n writer.writerows([i.strip().split(' ') for i in [row]])\n\n#calculate_three_worst('C://source.csv','D://1.csv')\n\ndef calculate_average_of_averages(input_file_name, output_file_name):\n mydict = OrderedDict()\n rows=[]\n with open(input_file_name, mode='r') as input_file_name11111:\n reader = csv.reader(input_file_name11111)\n mydict = {rows[0]: rows[1:] for rows in reader}\n #print(mydict.values())\n mydict = dict((k, [int(s) for s in v]) for k,v in mydict.items())\n #print(mydict)\n with open(output_file_name, 'w', newline='') as output_file_name11111:\n writer = csv.writer(output_file_name11111,dialect='excel',delimiter =' ')\n student_averages = ((sum(scores) / len(scores), s) for s, scores in mydict.items())\n student_averages=sorted(student_averages, reverse=False)\n mydict2=OrderedDict()\n for i,j in student_averages:\n mydict2={j:i for i,j in student_averages}\n test=list(mydict2.values())\n row=(sum(test) / len(test) )\n writer.writerow([row])\n\n#calculate_average_of_averages('C://source.csv','D://1.csv')", "id": "12350715", "language": "Python", "matching_score": 4.0911359786987305, "max_stars_count": 0, "path": "chapter-5/GPA.py" }, { "content": "import hashlib\nimport csv\nimport binascii\nfrom collections import OrderedDict\n\ndef hash_password_hack(input_file_name, output_file_name):\n mydict = OrderedDict()\n mydict1 = OrderedDict()\n rows = []\n with open(input_file_name, mode='r') as input_file_name1:\n reader = csv.reader(input_file_name1)\n mydict1 = {rows[0]: rows[1] for rows in reader}\n with open(output_file_name, 'w', newline='') as output_file_name1:\n writer = csv.writer(output_file_name1, dialect='excel', delimiter=' ')\n for i in range(0, 10000): # 0-10000 shavad\n #print(hashlib.sha256(str(i).encode()).hexdigest())\n row = (hashlib.sha256(str(i).encode()).hexdigest())\n writer.writerow([row])\n for x in row:\n mydict[row] = i\n with open(output_file_name, 'w', newline='') as output_file_name1:\n writer = csv.writer(output_file_name1, dialect='excel')\n for i in range(len(mydict1)):\n #print(len(mydict1))\n if ((list(mydict1.values())[i])) in (list(mydict.keys())):\n #print(mydict.get(str((list(mydict1.values())[i]))))\n #print(list(mydict1.keys())[i],',',mydict.get(str((list(mydict1.values())[i]))))\n row = (list(mydict1.keys())[i], mydict.get(\n str((list(mydict1.values())[i]))))\n writer.writerow(row)\n\n", "id": "8974652", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-6/Rainbow.py" }, { "content": "name=(str(input()).lower())\nfor i in range(0,len(name)):\n e=(i,name[i])\nname2=name[::-1] \nfor j in range(0,len(name2)):\n g=(j,name2[j])\nif g==e:\n print('palindrome')\nelse:\n print('not palindrome')", "id": "8670257", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-3/Palindrome.py" }, { "content": "for i in range(0,10):\n name=(str(input()).lower())\n print(name.capitalize())\n", "id": "2980666", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "chapter-3/Standardization.py" } ]
1
duramato
[ { "content": "import time\nimport os\nimport logging as logger\nfrom selenium import webdriver\nstarttime=time.time()\nlogger.basicConfig(filename='screenshooter.log',level=logger.DEBUG)\nurl = 'http://applemusic.tumblr.com/beats1'\nwhile True:\n #driver = webdriver.PhantomJS()\n #driver.set_window_size(1920, 1080)\n #driver.get(url)\n #driver.save_screenshot('page.jpg')\n try:\n print('hey')\n except Exception:\n logger.info('No File exsits')\n logger.info('screenie init')\n os.system(\"./webshots --width 1080 --height 1920 https://twitter.com/radio_scrobble\")\n os.system(\"mv twitter.com-radio_scrobble.1080x1920.png music_page.png\")\n #print('Screenie taken @ ' + str(time.time()) )\n logger.info('Screenie taken')\n time.sleep(10.0 - ((time.time() - starttime) % 10.0))\n", "id": "4104020", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "screenshooter.py" }, { "content": "import time\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer, test as _test\nfrom SocketServer import ThreadingMixIn\nimport BaseHTTPServer\nfrom urlparse import urlparse, parse_qs\nfrom selenium import webdriver\nimport numpy as np\nimport PIL\nimport re\nfrom PIL import Image\nfrom PIL import ImageFile\nimport threading\nimport urllib\nimport urllib2\nfrom urllib import urlopen\nfrom urlparse import urlparse, urljoin\nimport json\nimport time\nimport os\nimport csv\nimport image2text\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\ntitle_regex = re.compile(r'(?:(?:https?:)?\\/\\/.*\\/(?:images|uploads)?\\/(?:.*\\/)?)(.*)', re.I)\nHOST_NAME = '0.0.0.0'\nPORT_NUMBER = 82\n\nclass TumblerGetter():\n @staticmethod\n def ReadCSVasDict(csv_file):\n try:\n with open(csv_file) as csvfile:\n reader = csv.DictReader(csvfile)\n final_result = []\n items = []\n for row in reader:\n item = {}\n item = {\n 'title': row['title'],\n 'start': row['start'],\n 'end': row['end'],\n 'now': row['now'],\n 'image': row['image'],\n 'file_name': row['file_name']\n }\n items.append(item)\n final_result += items\n except IOError as (errno, strerror):\n print(\"I/O error({0}): {1}\".format(errno, strerror)) \n return final_result\n @staticmethod \n def WriteDictToCSV(csv_file,csv_columns,dict_data):\n try:\n with open(csv_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in dict_data:\n writer.writerow(data)\n except IOError as (errno, strerror):\n print(\"I/O error({0}): {1}\".format(errno, strerror)) \n return \n @staticmethod\n def BootStrap():\n sock = urlopen(\"http://fuse-music.herokuapp.com/api/programs\")\n htmlSource = sock.read()\n sock.close() \n url = htmlSource\n artists = json.loads(url)\n dict = artists.get('programs')\n final_result = []\n items = []\n for artist in dict:\n title = artist.get(\"title\")\n image = artist.get(\"image\")\n if not image:\n image = \"https://applesocial.s3.amazonaws.com/assets/images/branding/on-air-default.jpg\"\n start = int(artist.get(\"start\"))\n time_now = int(time.time())\n start = str(start)[:-3]\n end = int(artist.get(\"end\"))\n end = str(end)[:-3]\n if int(end) < int(time_now):\n #print('Skipping already aired: ' + title)\n continue\n file_name = title_regex.search(image).group(1)\n print('Found \"image\": {0}'.format(image))\n print('Found \"file_name\": {0}'.format(file_name))\n if not os.path.isfile(file_name):\n print(\"Writting {0} to disk\".format(file_name))\n f = open(file_name,'wb')\n image_path = urlparse(image)\n image = \"http://\" + image_path.netloc + image_path.path\n print('Found \"image\": {0}'.format(image))\n request = urllib2.Request(image)\n request.add_header('User-agent', 'Mozilla/5.0 (Linux i686)')\n f.write(urllib2.urlopen(request).read())\n #except Exception as ex:\n #print(\"Ups failed retriving image with {0}, retrying...\".format(ex))\n #f.write(urllib.urlopen(image).read())\n f.close()\n item = {}\n item = {\n 'title': title.encode(\"utf8\"),\n 'start': start,\n 'end': end,\n 'now': time_now,\n 'image': image.encode(\"utf8\"),\n 'file_name': file_name.encode(\"utf8\")\n }\n items.append(item)\n final_result += items\n import csv\n my_dict = final_result\n csv_columns = ['file_name', 'title','start','end','now','image']\n csv_file = \"db.csv\"\n\n TumblerGetter.WriteDictToCSV(csv_file,csv_columns,my_dict)\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n pass\n\nclass SlowHandler(BaseHTTPRequestHandler):\n def do_HEAD(s):\n s.send_response(200)\n s.send_header(\"Content-type\", \"text/html\")\n s.end_headers()\n def do_GET(s):\n \"\"\"Respond to a GET request.\"\"\"\n if s.path.startswith('/audio/wat/showimg.jpg'):\n \n csv_file = \"db.csv\"\n string = TumblerGetter.ReadCSVasDict(csv_file)\n \n # Create show name image\n image2text.main(string[0][\"title\"])\n \n f=open(string[0][\"file_name\"], 'rb')\n s.send_response(200)\n s.send_header('Content-type', 'image/png')\n s.end_headers()\n s.wfile.write(f.read())\n f.close()\n \n elif s.path.startswith('/audio/wat/show.jpg'):\n \n f=open(\"show.png\", 'rb')\n s.send_response(200)\n s.send_header('Content-type', 'image/png')\n s.end_headers()\n s.wfile.write(f.read())\n f.close()\n \n elif s.path.startswith('/audio/wat/art.jpg'):\n #server = parse_qs(urlparse(s.path).query)\n try:\n img = Image.open(\"music_page.png\")\n left = 470\n top = 580\n width = 500\n height = 500\n box = (left, top, left+width, top+height)\n img4 = img.crop(box)\n img4.save(\"cover.jpg\")\n f=open(\"cover.jpg\", 'rb')\n s.send_response(200)\n s.send_header('Content-type', 'image/jpg')\n s.end_headers()\n s.wfile.write(f.read())\n f.close()\n except IOError:\n print(\"Ups on cover.jpg\")\n f=open(\"cover.jpg\", 'rb')\n s.send_response(200)\n s.send_header('Content-type', 'image/jpg')\n s.end_headers()\n s.wfile.write(f.read())\n f.close()\n elif s.path.startswith('/audio/wat/nowz.jpg'):\n #server = parse_qs(urlparse(s.path).query)\n try:\n img = Image.open(\"music_page.png\")\n left = 460\n top = 470\n width = 520\n height = 100\n box = (left, top, left+width, top+height)\n img4 = img.crop(box)\n #img4.save(\"now1.jpg\")\n \n \n #img = Image.open(sys.argv[1])\n img = img4.convert(\"RGBA\")\n\n pixdata = img.load()\n\n # Remove the blue color\n\n #for y in xrange(img.size[1]):\n # for x in xrange(img.size[0]):\n # if pixdata[x, y] == (0, 132, 180, 255):\n # pixdata[x, y] = (255, 255, 255, 0)\n \n # Remove the white background\n\n for y in xrange(img.size[1]):\n for x in xrange(img.size[0]):\n if pixdata[x, y] == (255, 255, 255, 255):\n pixdata[x, y] = (255, 255, 255, 255)\n \n img4 = img\n \n \n basewidth = 375\n img = img4 #Image.open('now1.jpg')\n wpercent = (basewidth/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)\n img.save('now.png')\n \n f=open(\"now.png\", 'rb')\n s.send_response(200)\n s.send_header('Content-type', 'image/png')\n s.end_headers()\n s.wfile.write(f.read())\n f.close()\n except IOError:\n print(\"Ups on now.jpg\")\n f=open(\"now.jpg\", 'rb')\n s.send_response(200)\n s.send_header('Content-type', 'image/jpg')\n s.end_headers()\n s.wfile.write(f.read())\n f.close()\n else:\n f=open(\"page.jpg\", 'rb')\n s.send_response(404)\n s.send_header('Content-type', 'image/jpg')\n s.end_headers()\n s.wfile.write(f.read())\n f.close()\n\n\ndef test(HandlerClass = SlowHandler,\n ServerClass = ThreadedHTTPServer):\n _test(HandlerClass, ServerClass)\n \n \nif __name__ == '__main__':\n server_class = BaseHTTPServer.HTTPServer\n httpd = server_class((HOST_NAME, PORT_NUMBER), SlowHandler)\n print time.asctime(), \"Server Starts - %s:%s\" % (HOST_NAME, PORT_NUMBER)\n import threading\n def UpdateJSON():\n print(\"Updating JSON to CSV\")\n TumblerGetter.BootStrap()\n threading.Timer(60, UpdateJSON).start()\n print(\"Updated JSON to CSV\")\n UpdateJSON()\n \n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n pass\n httpd.server_close()\n print time.asctime(), \"Server Stops - %s:%s\" % (HOST_NAME, PORT_NUMBER)\n", "id": "5547031", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "server.py" } ]
0
m-doescode
[ { "content": "from collections import OrderedDict\nimport pyfuncol\n\nd = {\"a\": 1, \"b\": 2, \"c\": 3}\ndi: OrderedDict[str, int] = OrderedDict(d)\n\n\ndef test_contains():\n assert d.contains(\"a\") == True\n assert d.contains(\"z\") == False\n\n\ndef test_size():\n assert d.size() == 3\n\n\ndef test_filter():\n assert d.filter(lambda kv: kv[1] > 1) == {\"b\": 2, \"c\": 3}\n\n # Test that type is preserved\n di: OrderedDict[str, int] = OrderedDict(d)\n assert di.filter(lambda kv: True) == di\n\n\ndef test_filter_not():\n assert d.filter_not(lambda kv: kv[1] > 1) == {\"a\": 1}\n\n # Test that type is preserved\n di: OrderedDict[str, int] = OrderedDict(d)\n assert di.filter_not(lambda kv: False) == di\n\n\ndef test_flat_map():\n assert d.flat_map(lambda kv: {kv[0]: kv[1] ** 2}) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n # Test that type is preserved\n di: OrderedDict[str, int] = OrderedDict(d)\n assert di.flat_map(lambda kv: {kv[0]: kv[1]}) == di\n\n\ndef test_foreach():\n tester = []\n d.foreach(lambda kv: tester.append(kv))\n assert tester == [(\"a\", 1), (\"b\", 2), (\"c\", 3)]\n\n\ndef test_is_empty():\n assert d.is_empty() == False\n assert {}.is_empty() == True\n\n\ndef test_map():\n assert d.map(lambda kv: (kv[0], kv[1] ** 2)) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n # Test that type is preserved\n di: OrderedDict[str, int] = OrderedDict(d)\n assert di.map(lambda kv: kv) == di\n\n\ndef test_to_list():\n assert d.to_list() == [(\"a\", 1), (\"b\", 2), (\"c\", 3)]\n\n\ndef test_count():\n assert d.count(lambda kv: (kv[0] == \"a\" or kv[0] == \"b\") and kv[1] <= 3) == 2\n\n\ndef test_fold_left():\n assert d.fold_left(\"\", lambda acc, kv: acc + kv[0] + str(kv[1])) == \"a1b2c3\"\n\n\ndef test_fold_right():\n assert d.fold_right(\"\", lambda kv, acc: acc + kv[0] + str(kv[1])) == \"c3b2a1\"\n\n\ndef test_forall():\n assert d.forall(lambda kv: kv[1] <= 3) == True\n\n\ndef test_forall_false():\n assert d.forall(lambda kv: kv[1] < 2) == False\n\n\ndef test_find():\n assert d.find(lambda kv: kv[1] == 2) == (\"b\", 2)\n\n\ndef test_find_none():\n assert d.find(lambda kv: kv[1] == 5) == None\n\n\n# Parallel operations\n\n\ndef test_par_filter():\n assert d.par_filter(lambda kv: kv[1] > 1) == {\"b\": 2, \"c\": 3}\n\n # Test that type is preserved\n assert di.par_filter(lambda kv: True) == di\n\n\ndef test_par_filter_not():\n assert d.par_filter_not(lambda kv: kv[1] <= 1) == {\"b\": 2, \"c\": 3}\n\n # Test that type is preserved\n assert di.par_filter_not(lambda kv: False) == di\n\n\ndef test_par_flat_map():\n assert d.par_flat_map(lambda kv: {kv[0]: kv[1] ** 2}) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n # Test that type is preserved\n assert di.par_flat_map(lambda kv: {kv[0]: kv[1]}) == di\n\n\ndef test_par_map():\n assert d.par_map(lambda kv: (kv[0], kv[1] ** 2)) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n # Test that type is preserved\n assert di.par_map(lambda kv: kv) == di\n\n\n# Pure operations\n\n\ndef test_pure_flat_map():\n assert d.pure_flat_map(lambda kv: {kv[0]: kv[1] ** 2}) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n # Test that type is preserved\n assert di.pure_flat_map(lambda kv: {kv[0]: kv[1] ** 2}) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n\ndef test_pure_map():\n assert d.pure_map(lambda kv: (kv[0], kv[1] ** 2)) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n # Test that type is preserved\n assert di.pure_map(lambda kv: (kv[0], kv[1] ** 2)) == {\"a\": 1, \"b\": 4, \"c\": 9}\n\n\ndef test_pure_filter():\n assert d.pure_filter(lambda kv: kv[1] > 1) == {\"b\": 2, \"c\": 3}\n\n # Test that type is preserved\n assert di.pure_filter(lambda kv: kv[1] > 1) == {\"b\": 2, \"c\": 3}\n\n\ndef test_pure_filter_not():\n assert d.pure_filter_not(lambda kv: kv[1] > 1) == {\"a\": 1}\n\n # Test that type is preserved\n assert d.pure_filter_not(lambda kv: kv[1] > 1) == {\"a\": 1}\n", "id": "2475267", "language": "Python", "matching_score": 4.648919105529785, "max_stars_count": 0, "path": "pyfuncol/tests/test_dict.py" }, { "content": "import pyfuncol\n\ns = {1, 2, 3}\nst = frozenset(s)\n\n\ndef test_map():\n assert s.map(lambda x: x * 2) == {2, 4, 6}\n assert st.map(lambda x: x * 2) == frozenset({2, 4, 6})\n\n\ndef test_filter():\n assert s.filter(lambda x: x >= 2) == {2, 3}\n assert st.filter(lambda x: x >= 2) == frozenset({2, 3})\n\n\ndef test_filter_not():\n assert s.filter_not(lambda x: x < 2) == {2, 3}\n assert st.filter_not(lambda x: x < 2) == frozenset({2, 3})\n\n\ndef test_flat_map():\n assert s.flat_map(lambda x: {x ** 2}) == {1, 4, 9}\n assert st.flat_map(lambda x: {x ** 2}) == frozenset({1, 4, 9})\n\n\ndef test_contains():\n assert s.contains(2) == True\n assert st.contains(2) == True\n\n\ndef test_group_by():\n assert {\"abc\", \"def\", \"e\"}.group_by(lambda s: len(s)) == {\n 3: {\"abc\", \"def\"},\n 1: {\"e\"},\n }\n assert frozenset({\"abc\", \"def\", \"e\"}).group_by(lambda s: len(s)) == {\n 3: {\"abc\", \"def\"},\n 1: {\"e\"},\n }\n\n\ndef test_is_empty():\n assert s.is_empty() == False\n empty = set()\n assert empty.is_empty() == True\n\n assert st.is_empty() == False\n frozen_empty = frozenset()\n assert frozen_empty.is_empty() == True\n\n\ndef test_size():\n assert s.size() == 3\n assert st.size() == 3\n\n\ndef test_find():\n assert s.find(lambda x: x >= 3) == 3\n assert s.find(lambda x: x < 0) == None\n\n assert st.find(lambda x: x >= 3) == 3\n assert st.find(lambda x: x < 0) == None\n\n\ndef test_foreach():\n tester = set()\n s.foreach(lambda x: tester.add(x))\n assert tester == s\n\n frozen_tester = set()\n st.foreach(lambda x: frozen_tester.add(x))\n assert frozen_tester == s\n\n\ndef test_fold_left_plus():\n assert s.fold_left(0, lambda acc, n: acc + n) == 6\n assert st.fold_left(0, lambda acc, n: acc + n) == 6\n\n\ndef test_fold_left_concat():\n a = s.fold_left(\"\", lambda acc, n: acc + str(n))\n assert (\n a == \"123\" or a == \"321\" or a == \"132\" or a == \"213\" or a == \"231\" or a == \"312\"\n )\n\n frozen_a = st.fold_left(\"\", lambda acc, n: acc + str(n))\n assert (\n frozen_a == \"123\"\n or frozen_a == \"321\"\n or frozen_a == \"132\"\n or frozen_a == \"213\"\n or frozen_a == \"231\"\n or frozen_a == \"312\"\n )\n\n\ndef test_fold_right_plus():\n assert s.fold_right(0, lambda n, acc: acc + n) == 6\n assert st.fold_right(0, lambda n, acc: acc + n) == 6\n\n\ndef test_fold_right_concat():\n a = s.fold_right(\"\", lambda n, acc: acc + str(n))\n assert (\n a == \"321\" or a == \"123\" or a == \"132\" or a == \"213\" or a == \"231\" or a == \"312\"\n )\n\n frozen_a = st.fold_right(\"\", lambda n, acc: acc + str(n))\n assert (\n frozen_a == \"321\"\n or frozen_a == \"123\"\n or frozen_a == \"132\"\n or frozen_a == \"213\"\n or frozen_a == \"231\"\n or frozen_a == \"312\"\n )\n\n\ndef test_forall_gt_zero():\n assert s.forall(lambda n: n > 0) == True\n assert st.forall(lambda n: n > 0) == True\n\n\ndef test_forall_gt_two():\n assert s.forall(lambda n: n > 2) == False\n assert st.forall(lambda n: n > 2) == False\n\n\ndef test_length():\n assert s.length() == 3\n assert st.length() == 3\n\n\ndef test_length_equal_size():\n assert s.size() == s.length()\n assert st.size() == st.length()\n\n\n# Parallel operations\n\n\ndef test_par_map():\n assert s.par_map(lambda x: x * 2) == {2, 4, 6}\n assert st.par_map(lambda x: x * 2) == frozenset({2, 4, 6})\n\n\ndef test_par_filter():\n assert s.par_filter(lambda x: x >= 2) == {2, 3}\n assert st.par_filter(lambda x: x >= 2) == frozenset({2, 3})\n\n\ndef test_par_filter_not():\n assert s.par_filter_not(lambda x: x < 2) == {2, 3}\n assert st.par_filter_not(lambda x: x < 2) == frozenset({2, 3})\n\n\ndef test_par_flat_map():\n assert s.par_flat_map(lambda x: [x ** 2]) == {1, 4, 9}\n assert st.par_flat_map(lambda x: {x ** 2}) == frozenset({1, 4, 9})\n\n\n# Pure operations\n\n\ndef test_pure_map():\n assert s.pure_map(lambda x: x * 2) == {2, 4, 6}\n assert st.pure_map(lambda x: x * 2) == {2, 4, 6}\n\n\ndef test_pure_flat_map():\n assert s.pure_flat_map(lambda x: [x ** 2]) == {1, 4, 9}\n assert st.pure_flat_map(lambda x: [x ** 2]) == {1, 4, 9}\n\n\ndef test_pure_filter():\n assert s.pure_filter(lambda x: x >= 2) == {2, 3}\n assert st.pure_filter(lambda x: x >= 2) == {2, 3}\n\n\ndef test_pure_filter_not():\n assert s.pure_filter_not(lambda x: x >= 2) == {1}\n assert st.pure_filter_not(lambda x: x >= 2) == {1}\n", "id": "11037422", "language": "Python", "matching_score": 0.7928369641304016, "max_stars_count": 0, "path": "pyfuncol/tests/test_set.py" }, { "content": "from forbiddenfruit import curse\nfrom collections import defaultdict\nfrom typing import Callable, Dict, Optional, TypeVar, List, cast\nimport functools\nimport dask\n\nA = TypeVar(\"A\")\nB = TypeVar(\"B\")\nK = TypeVar(\"K\")\nU = TypeVar(\"U\")\n\n\ndef map(self: List[A], f: Callable[[A], B]) -> List[B]:\n \"\"\"\n Builds a new list by applying a function to all elements of this list.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new list.\n \"\"\"\n return cast(List[B], type(self)(f(x) for x in self))\n\n\ndef filter(self: List[A], p: Callable[[A], bool]) -> List[A]:\n \"\"\"\n Selects all elements of this list which satisfy a predicate.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered list.\n \"\"\"\n return type(self)(x for x in self if p(x))\n\n\ndef filter_not(self: List[A], p: Callable[[A], bool]) -> List[A]:\n \"\"\"\n Selects all elements of this list which do not satisfy a predicate.\n\n Args:\n p: The predicate to not satisfy.\n\n Returns:\n The filtered list.\n \"\"\"\n return type(self)(x for x in self if not p(x))\n\n\ndef flat_map(self: List[A], f: Callable[[A], List[B]]) -> List[B]:\n \"\"\"\n Builds a new list by applying a function to all elements of this list and using the elements of the resulting collections.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new list.\n \"\"\"\n return cast(List[B], type(self)(y for x in self for y in f(x)))\n\n\ndef flatten(self: List[A]) -> List[B]:\n \"\"\"\n Converts this list of lists into a list formed by the elements of these lists.\n\n Returns:\n The flattened list.\n \"\"\"\n return cast(List[B], type(self)(y for x in self for y in x))\n\n\ndef contains(self: List[A], elem: A) -> bool:\n \"\"\"\n Tests whether this list contains a given value as element.\n\n Args:\n elem: The element to look for.\n\n Returns:\n True if the list contains the element, False otherwise.\n \"\"\"\n return elem in self\n\n\ndef distinct(self: List[A]) -> List[A]:\n \"\"\"\n Selects all the elements of this list ignoring the duplicates.\n\n Returns:\n The list without duplicates.\n \"\"\"\n return type(self)((set(self)))\n\n\ndef foreach(self: List[A], f: Callable[[A], U]) -> None:\n \"\"\"\n Apply f to each element for its side effects.\n\n Args:\n f: The function to apply to all elements for its side effects.\n \"\"\"\n for x in self:\n f(x)\n\n\ndef group_by(self: List[A], f: Callable[[A], K]) -> Dict[K, List[A]]:\n \"\"\"\n Partitions this list into a dict of lists according to some discriminator function.\n\n Args:\n f: The grouping function.\n\n Returns:\n A dictionary where elements are grouped according to the grouping function.\n \"\"\"\n d = defaultdict(type(self))\n for x in self:\n k = f(x)\n d[k].append(x)\n return dict(d)\n\n\ndef is_empty(self: List[A]) -> bool:\n \"\"\"\n Tests whether the list is empty.\n\n Returns:\n True if the list is empty, False otherwise.\n \"\"\"\n return len(self) == 0\n\n\ndef size(self: List[A]) -> int:\n \"\"\"\n Computes the size of this list.\n\n Returns:\n The size of the list.\n \"\"\"\n return len(self)\n\n\ndef find(self: List[A], p: Callable[[A], bool]) -> Optional[A]:\n \"\"\"\n Finds the first element of the list satisfying a predicate, if any.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The first element satisfying the predicate, otherwise None.\n \"\"\"\n for x in self:\n if p(x):\n return x\n return None\n\n\ndef index_of(self: List[A], elem: A) -> int:\n \"\"\"\n Finds index of first occurrence of some value in this list. Returns -1 if none exists.\n\n Args:\n elem: The element whose index is to find.\n\n Returns:\n The index of the first occurrence of the element, or -1 if it does not exists.\n \"\"\"\n for i, x in enumerate(self):\n if x == elem:\n return i\n return -1\n\n\ndef fold_left(self: List[A], z: B, op: Callable[[B, A], B]) -> B:\n \"\"\"\n Applies a binary operator to a start value and all elements of this sequence, going left to right.\n\n Args:\n z: The start value.\n op: The binary operation.\n\n Returns:\n The result of inserting op between consecutive elements of this sequence, going left to right with the start value z on the left:\n op(...op(z, x_1), x_2, ..., x_n)\n where x1, ..., xn are the elements of this sequence. Returns z if this sequence is empty.\n \"\"\"\n acc = z\n for x in self:\n acc = op(acc, x)\n return acc\n\n\ndef fold_right(self: List[A], z: B, op: Callable[[A, B], B]) -> B:\n \"\"\"\n Applies a binary operator to all elements of this list and a start value, going right to left.\n\n Args:\n z: The start value.\n op: The binary operation.\n\n Returns:\n The result of inserting op between consecutive elements of this list, going right to left with the start value z on the right:\n op(x_1, op(x_2, ... op(x_n, z)...))\n where x1, ..., xn are the elements of this list. Returns z if this list is empty.\n \"\"\"\n\n acc = z\n for x in reversed(self):\n acc = op(x, acc)\n return acc\n\n\ndef forall(self: List[A], p: Callable[[A], bool]) -> bool:\n \"\"\"\n Tests whether a predicate holds for all elements of this list.\n\n Args:\n p: The predicate used to test elements.\n\n Returns:\n True if this list is empty or the given predicate p holds for all elements of this list, otherwise False.\n \"\"\"\n for x in self:\n if not p(x):\n return False\n\n return True\n\n\ndef head(self: List[A]) -> A:\n \"\"\"\n Selects the first element of this iterable collection.\n\n Note: might return different results for different runs, unless the underlying collection type is ordered.\n\n Raises:\n IndexError: If the iterable collection is empty.\n \"\"\"\n if not self:\n raise IndexError()\n return self[0]\n\n\ndef tail(self: List[A]) -> List[A]:\n \"\"\"\n The rest of the collection without its first element.\n\n Raises:\n IndexError: If the iterable collection is empty.\n \"\"\"\n if not self:\n raise IndexError()\n return self[1:]\n\n\ndef take(self: List[A], n: int) -> List[A]:\n \"\"\"\n Selects the first n elements.\n\n Args:\n n: The number of elements to take from this list.\n\n Returns:\n A list consisting only of the first n elements of this list, or else the whole list, if it has less than n elements. If n is negative, returns an empty list.\n \"\"\"\n\n if n < 0:\n return type(self)()\n if len(self) <= n:\n return self\n\n return self[0:n]\n\n\ndef length(self: List[A]) -> int:\n \"\"\"\n Returns the length (number of elements) of the list. `size` is an alias for length.\n\n Returns:\n The length of the list\n \"\"\"\n return len(self)\n\n\n# Parallel operations\n\n\ndef par_map(self: List[A], f: Callable[[A], B]) -> List[B]:\n \"\"\"\n Builds a new list by applying a function in parallel to all elements of this list.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new list.\n \"\"\"\n return cast(\n List[B], type(self)((dask.compute(*(dask.delayed(f)(x) for x in self))))\n )\n\n\ndef par_filter(self: List[A], p: Callable[[A], bool]) -> List[A]:\n \"\"\"\n Selects in parallel all elements of this list which satisfy a predicate.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered list.\n \"\"\"\n preds = dask.compute(*(dask.delayed(p)(x) for x in self))\n return type(self)(x for i, x in enumerate(self) if preds[i])\n\n\ndef par_filter_not(self: List[A], p: Callable[[A], bool]) -> List[A]:\n \"\"\"\n Selects in parallel all elements of this list which do not satisfy a predicate.\n\n Args:\n p: The predicate to not satisfy.\n\n Returns:\n The filtered list.\n \"\"\"\n preds = dask.compute(*(dask.delayed(p)(x) for x in self))\n return type(self)(x for i, x in enumerate(self) if not preds[i])\n\n\ndef par_flat_map(self: List[A], f: Callable[[A], List[B]]) -> List[B]:\n \"\"\"\n Builds a new list by applying a function in parallel to all elements of this list and using the\n elements of the resulting collections.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new list.\n \"\"\"\n applications = dask.compute(*(dask.delayed(f)(x) for x in self))\n return cast(List[B], type(self)(z for y in applications for z in y))\n\n\ndef pure_map(self: List[A], f: Callable[[A], B]) -> List[B]:\n \"\"\"\n Builds a new list by applying a function to all elements of this list using memoization to improve performance.\n\n WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n f: The PURE function to apply to all elements.\n\n Returns:\n The new list.\n \"\"\"\n f_cache = functools.cache(f)\n return cast(List[B], type(self)(f_cache(x) for x in self))\n\n\ndef pure_flat_map(self: List[A], f: Callable[[A], List[B]]) -> List[B]:\n \"\"\"\n Builds a new list by applying a function to all elements of this list and using the elements of the resulting collections using memoization to improve performance.\n\n WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new list.\n \"\"\"\n f_cache = functools.cache(f)\n return cast(List[B], type(self)(y for x in self for y in f_cache(x)))\n\n\ndef pure_filter(self: List[A], p: Callable[[A], bool]) -> List[A]:\n \"\"\"\n Selects all elements of this list which satisfy a predicate using memoization to improve performance.\n\n WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered list.\n \"\"\"\n p_cache = functools.cache(p)\n return type(self)(x for x in self if p_cache(x))\n\n\ndef pure_filter_not(self: List[A], p: Callable[[A], bool]) -> List[A]:\n \"\"\"\n Selects all elements of this list which do not satisfy a predicate using memoization to improve performance.\n\n WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n\n Args:\n p: The predicate not to satisfy.\n\n Returns:\n The filtered list.\n \"\"\"\n p_cache = functools.cache(p)\n return type(self)(x for x in self if not p_cache(x))\n\n\ndef extend_list():\n \"\"\"\n Extends the list built-in type with methods.\n \"\"\"\n curse(list, \"map\", map)\n curse(list, \"filter\", filter)\n curse(list, \"filter_not\", filter_not)\n curse(list, \"flat_map\", flat_map)\n curse(list, \"flatten\", flatten)\n curse(list, \"contains\", contains)\n curse(list, \"distinct\", distinct)\n curse(list, \"foreach\", foreach)\n curse(list, \"group_by\", group_by)\n curse(list, \"is_empty\", is_empty)\n curse(list, \"size\", size)\n curse(list, \"find\", find)\n curse(list, \"index_of\", index_of)\n curse(list, \"fold_left\", fold_left)\n curse(list, \"fold_right\", fold_right)\n curse(list, \"forall\", forall)\n curse(list, \"head\", head)\n curse(list, \"tail\", tail)\n curse(list, \"take\", take)\n curse(list, \"length\", length)\n\n # Parallel operations\n curse(list, \"par_map\", par_map)\n curse(list, \"par_filter\", par_filter)\n curse(list, \"par_filter_not\", par_filter_not)\n curse(list, \"par_flat_map\", par_flat_map)\n\n # Pure operations\n curse(list, \"pure_map\", pure_map)\n curse(list, \"pure_flat_map\", pure_flat_map)\n curse(list, \"pure_filter\", pure_filter)\n curse(list, \"pure_filter_not\", pure_filter_not)\n", "id": "4705817", "language": "Python", "matching_score": 5.348161697387695, "max_stars_count": 0, "path": "pyfuncol/list.py" }, { "content": "from forbiddenfruit import curse\nfrom typing import Callable, Dict, Optional, Tuple, TypeVar, List, cast\nimport functools\nimport dask\n\nA = TypeVar(\"A\")\nB = TypeVar(\"B\")\nC = TypeVar(\"C\")\nD = TypeVar(\"D\")\nU = TypeVar(\"U\")\n\n\ndef contains(self: Dict[A, B], key: A) -> bool:\n \"\"\"\n Tests whether this dict contains a binding for a key.\n\n Args:\n key: The key to find.\n\n Returns:\n True if the dict contains a binding for the key, False otherwise.\n \"\"\"\n return key in self\n\n\ndef size(self: Dict[A, B]) -> int:\n \"\"\"\n Computes the size of this dict.\n\n Returns:\n The size of the dict.\n \"\"\"\n return len(self)\n\n\ndef filter(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> Dict[A, B]:\n \"\"\"\n Selects all elements of this dict which satisfy a predicate.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered dict.\n \"\"\"\n return type(self)({k: v for k, v in self.items() if p((k, v))})\n\n\ndef filter_not(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> Dict[A, B]:\n \"\"\"\n Selects all elements of this dict which do not satisfy a predicate.\n\n Args:\n p: The predicate to not satisfy.\n\n Returns:\n The filtered dict.\n \"\"\"\n return type(self)({k: v for k, v in self.items() if not p((k, v))})\n\n\ndef flat_map(self: Dict[A, B], f: Callable[[Tuple[A, B]], Dict[C, D]]) -> Dict[C, D]:\n \"\"\"\n Builds a new dict by applying a function to all elements of this dict and using the elements of the resulting collections.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new dict.\n \"\"\"\n res = cast(Dict[C, D], type(self)())\n for k, v in self.items():\n d = f((k, v))\n res.update(d)\n return res\n\n\ndef foreach(self: Dict[A, B], f: Callable[[Tuple[A, B]], U]) -> None:\n \"\"\"\n Apply f to each element for its side effects.\n\n Args:\n f: The function to apply to all elements for its side effects.\n \"\"\"\n for k, v in self.items():\n f((k, v))\n\n\ndef is_empty(self: Dict[A, B]) -> bool:\n \"\"\"\n Tests whether the dict is empty.\n\n Returns:\n True if the dict is empty, False otherwise.\n \"\"\"\n return len(self) == 0\n\n\ndef map(self: Dict[A, B], f: Callable[[Tuple[A, B]], Tuple[C, D]]) -> Dict[C, D]:\n \"\"\"\n Builds a new dict by applying a function to all elements of this dict.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new dict.\n \"\"\"\n return cast(Dict[C, D], type(self)(f(x) for x in self.items()))\n\n\ndef to_list(self: Dict[A, B]) -> List[Tuple[A, B]]:\n \"\"\"\n Converts this dict to a list of (key, value) pairs.\n\n Returns:\n A list of pairs corresponding to the entries of the dict\n \"\"\"\n return [(k, v) for k, v in self.items()]\n\n\ndef count(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> int:\n \"\"\"\n Counts the number of elements in the collection which satisfy a predicate.\n\n Note: will not terminate for infinite-sized collections.\n\n Args:\n p: The predicate used to test elements.\n\n Returns:\n The number of elements satisfying the predicate p.\n \"\"\"\n c = 0\n for t in self.items():\n if p(t):\n c += 1\n\n return c\n\n\ndef fold_left(self: Dict[A, B], z: B, op: Callable[[B, Tuple[A, B]], B]) -> B:\n \"\"\"\n Applies a binary operator to a start value and all elements of this collection, going left to right.\n\n Note: will not terminate for infinite-sized collections.\n\n Note: might return different results for different runs, unless the underlying collection type is ordered or the operator is associative and commutative.\n\n Args:\n z: The start value.\n op: The binary operator.\n\n Returns:\n The result of inserting op between consecutive elements of this collection, going left to right with the start value z on the left:\n\n op(...op(z, x_1), x_2, ..., x_n)\n where x1, ..., xn are the elements of this collection. Returns z if this collection is empty.\n \"\"\"\n acc = z\n for t in self.items():\n acc = op(acc, t)\n\n return acc\n\n\ndef fold_right(self: Dict[A, B], z: B, op: Callable[[Tuple[A, B], B], B]) -> B:\n \"\"\"\n Applies a binary operator to a start value and all elements of this collection, going right to left.\n\n Note: will not terminate for infinite-sized collections.\n\n Note: might return different results for different runs, unless the underlying collection type is ordered or the operator is associative and commutative.\n\n Args:\n z: The start value.\n op: The binary operator.\n\n Returns:\n The result of inserting op between consecutive elements of this collection, going right to left with the start value z on the right:\n\n op(x_1, op(x_2, ... op(x_n, z)...))\n where x1, ..., xn are the elements of this collection. Returns z if this collection is empty.\n \"\"\"\n acc = z\n for t in reversed(self.items()):\n acc = op(t, acc)\n\n return acc\n\n\ndef forall(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> bool:\n \"\"\"\n Tests whether a predicate holds for all elements of this collection.\n\n Note: may not terminate for infinite-sized collections.\n\n Args:\n p: The predicate used to test elements.\n\n Returns:\n True if this collection is empty or the given predicate p holds for all elements of this collection, otherwise False.\n \"\"\"\n for t in self.items():\n if not p(t):\n return False\n return True\n\n\ndef find(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> Optional[Tuple[A, B]]:\n \"\"\"\n Finds the first element of the collection satisfying a predicate, if any.\n\n Note: may not terminate for infinite-sized collections.\n\n Note: might return different results for different runs, unless the underlying collection type is ordered.\n\n Args:\n p: The predicate used to test elements.\n\n Returns:\n An option value containing the first element in the collection that satisfies p, or None if none exists.\n \"\"\"\n for t in self.items():\n if p(t):\n return t\n\n return None\n\n\n# Parallel operations\n\n\ndef par_filter(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> Dict[A, B]:\n \"\"\"\n Selects in parallel all elements of this dict which satisfy a predicate.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered dict.\n \"\"\"\n preds = dask.compute(*(dask.delayed(p)(x) for x in self.items()))\n return type(self)({k: v for i, (k, v) in enumerate(self.items()) if preds[i]})\n\n\ndef par_filter_not(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> Dict[A, B]:\n \"\"\"\n Selects in parallel all elements of this dict which do not satisfy a predicate.\n\n Args:\n p: The predicate to not satisfy.\n\n Returns:\n The filtered dict.\n \"\"\"\n preds = dask.compute(*(dask.delayed(p)(x) for x in self.items()))\n return type(self)({k: v for i, (k, v) in enumerate(self.items()) if not preds[i]})\n\n\ndef par_flat_map(\n self: Dict[A, B], f: Callable[[Tuple[A, B]], Dict[C, D]]\n) -> Dict[C, D]:\n \"\"\"\n Builds a new dict by applying a function in parallel to all elements of this dict and using the elements of the resulting collections.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new dict.\n \"\"\"\n applications = dask.compute(*(dask.delayed(f)(x) for x in self.items()))\n return cast(\n Dict[C, D], type(self)({k: v for y in applications for k, v in y.items()})\n )\n\n\ndef par_map(self: Dict[A, B], f: Callable[[Tuple[A, B]], Tuple[C, D]]) -> Dict[C, D]:\n \"\"\"\n Builds a new dict by applying a function in parallel to all elements of this dict.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new dict.\n \"\"\"\n return cast(\n Dict[C, D],\n type(self)((dask.compute(*(dask.delayed(f)(x) for x in self.items())))),\n )\n\n\n# Pure operations\n\n\ndef pure_map(self: Dict[A, B], f: Callable[[Tuple[A, B]], Tuple[C, D]]) -> Dict[C, D]:\n \"\"\"\n Builds a new dict by applying a function to all elements of this dict using memoization to improve performance.\n\n WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new dict.\n \"\"\"\n f_cache = functools.cache(f)\n return cast(Dict[C, D], type(self)(f_cache(x) for x in self.items()))\n\n\ndef pure_flat_map(\n self: Dict[A, B], f: Callable[[Tuple[A, B]], Dict[C, D]]\n) -> Dict[C, D]:\n \"\"\"\n Builds a new dict by applying a function to all elements of this dict and using the elements of the resulting collections using memoization to improve performance.\n\n WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new dict.\n \"\"\"\n res = cast(Dict[C, D], type(self)())\n f_cache = functools.cache(f)\n for k, v in self.items():\n d = f_cache((k, v))\n res.update(d)\n return res\n\n\ndef pure_filter(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> Dict[A, B]:\n \"\"\"\n Selects all elements of this dict which satisfy a predicate using memoization to improve performance.\n\n WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered dict.\n \"\"\"\n p_cache = functools.cache(p)\n return type(self)({k: v for k, v in self.items() if p_cache((k, v))})\n\n\ndef pure_filter_not(self: Dict[A, B], p: Callable[[Tuple[A, B]], bool]) -> Dict[A, B]:\n \"\"\"\n Selects all elements of this dict which do not satisfy a predicate using memoization to improve performance.\n\n WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n\n Args:\n p: The predicate not to satisfy.\n\n Returns:\n The filtered dict.\n \"\"\"\n p_cache = functools.cache(p)\n return type(self)({k: v for k, v in self.items() if not p_cache((k, v))})\n\n\ndef extend_dict():\n \"\"\"\n Extends the dict built-in type with methods.\n \"\"\"\n curse(dict, \"contains\", contains)\n curse(dict, \"size\", size)\n curse(dict, \"filter\", filter)\n curse(dict, \"filter_not\", filter_not)\n curse(dict, \"flat_map\", flat_map)\n curse(dict, \"foreach\", foreach)\n curse(dict, \"is_empty\", is_empty)\n curse(dict, \"map\", map)\n curse(dict, \"to_list\", to_list)\n curse(dict, \"count\", count)\n curse(dict, \"fold_left\", fold_left)\n curse(dict, \"fold_right\", fold_right)\n curse(dict, \"forall\", forall)\n curse(dict, \"find\", find)\n\n # Parallel operations\n curse(dict, \"par_map\", par_map)\n curse(dict, \"par_filter\", par_filter)\n curse(dict, \"par_filter_not\", par_filter_not)\n curse(dict, \"par_flat_map\", par_flat_map)\n\n # Pure operations\n curse(dict, \"pure_map\", pure_map)\n curse(dict, \"pure_flat_map\", pure_flat_map)\n curse(dict, \"pure_filter\", pure_filter)\n curse(dict, \"pure_filter_not\", pure_filter_not)\n", "id": "12014546", "language": "Python", "matching_score": 4.747989654541016, "max_stars_count": 0, "path": "pyfuncol/dict.py" }, { "content": "from forbiddenfruit import curse\nfrom collections import defaultdict\nfrom typing import Callable, Dict, Optional, TypeVar, Set, cast\nimport functools\nimport dask\n\nA = TypeVar(\"A\")\nB = TypeVar(\"B\")\nK = TypeVar(\"K\")\nU = TypeVar(\"U\")\n\n\ndef map(self: Set[A], f: Callable[[A], B]) -> Set[B]:\n \"\"\"\n Builds a new set by applying a function to all elements of this set.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new set.\n \"\"\"\n return cast(Set[B], type(self)(f(x) for x in self))\n\n\ndef filter(self: Set[A], p: Callable[[A], bool]) -> Set[A]:\n \"\"\"\n Selects all elements of this set which satisfy a predicate.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered set.\n \"\"\"\n return type(self)(x for x in self if p(x))\n\n\ndef filter_not(self: Set[A], p: Callable[[A], bool]) -> Set[A]:\n \"\"\"\n Selects all elements of this set which do not satisfy a predicate.\n\n Args:\n p: The predicate to not satisfy.\n\n Returns:\n The filtered set.\n \"\"\"\n return type(self)(x for x in self if not p(x))\n\n\ndef flat_map(self: Set[A], f: Callable[[A], Set[B]]) -> Set[B]:\n \"\"\"\n Builds a new set by applying a function to all elements of this set and using the elements of the resulting collections.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new set.\n \"\"\"\n return cast(Set[B], type(self)(y for x in self for y in f(x)))\n\n\ndef contains(self: Set[A], elem: A) -> bool:\n \"\"\"\n Tests whether this set contains a given value as element.\n\n Args:\n elem: The element to look for.\n\n Returns:\n True if the set contains the element, False otherwise.\n \"\"\"\n return elem in self\n\n\ndef foreach(self: Set[A], f: Callable[[A], U]) -> None:\n \"\"\"\n Apply f to each element of the set for its side effects.\n\n Args:\n f: The function to apply to all elements for its side effects.\n \"\"\"\n for x in self:\n f(x)\n\n\ndef group_by(self: Set[A], f: Callable[[A], K]) -> Dict[K, Set[A]]:\n \"\"\"\n Partitions this set into a dict of sets according to some discriminator function.\n\n Args:\n f: The grouping function.\n\n Returns:\n A dictionary where elements are grouped according to the grouping function.\n \"\"\"\n # frozenset does not have `add`\n d = defaultdict(set if isinstance(self, frozenset) else type(self))\n for x in self:\n k = f(x)\n d[k].add(x)\n return d\n\n\ndef is_empty(self: Set[A]) -> bool:\n \"\"\"\n Tests whether the set is empty.\n\n Returns:\n True if the set is empty, False otherwise.\n \"\"\"\n return len(self) == 0\n\n\ndef size(self: Set[A]) -> int:\n \"\"\"\n Computes the size of this set.\n\n Returns:\n The size of the set.\n \"\"\"\n return len(self)\n\n\ndef find(self: Set[A], p: Callable[[A], bool]) -> Optional[A]:\n \"\"\"\n Finds the first element of the set satisfying a predicate, if any.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The first element satisfying the predicate, otherwise None.\n \"\"\"\n for x in self:\n if p(x):\n return x\n return None\n\n\ndef fold_left(self: Set[A], z: B, op: Callable[[B, A], B]) -> B:\n \"\"\"\n Applies a binary operator to a start value and all elements of this set, going left to right.\n\n Note: might return different results for different runs, unless the underlying collection type is ordered or the operator is associative and commutative.\n\n Args:\n z: The start value.\n op: The binary operation.\n\n Returns:\n The result of inserting op between consecutive elements of this set, going left to right with the start value z on the left:\n op(...op(z, x_1), x_2, ..., x_n)\n where x1, ..., xn are the elements of this set. Returns z if this set is empty.\n \"\"\"\n acc = z\n for x in self:\n acc = op(acc, x)\n return acc\n\n\ndef fold_right(self: Set[A], z: B, op: Callable[[A, B], B]) -> B:\n \"\"\"\n Applies a binary operator to all elements of this set and a start value, going right to left.\n\n Note: might return different results for different runs, unless the underlying collection type is ordered or the operator is associative and commutative.\n\n Args:\n z: The start value.\n op: The binary operation.\n\n Returns:\n The result of inserting op between consecutive elements of this set, going right to left with the start value z on the right:\n op(x_1, op(x_2, ... op(x_n, z)...))\n where x1, ..., xn are the elements of this set. Returns z if this set is empty.\n \"\"\"\n\n acc = z\n for x in self:\n acc = op(x, acc)\n return acc\n\n\ndef forall(self: Set[A], p: Callable[[A], bool]) -> bool:\n \"\"\"\n Tests whether a predicate holds for all elements of this set.\n\n Args:\n p: The predicate used to test elements.\n\n Returns:\n True if this set is empty or the given predicate p holds for all elements of this set, otherwise False.\n \"\"\"\n for x in self:\n if not p(x):\n return False\n\n return True\n\n\ndef length(self: Set[A]) -> int:\n \"\"\"\n Returns the length (number of elements) of the set. `size` is an alias for length.\n\n Returns:\n The length of the set\n \"\"\"\n return len(self)\n\n\n# Parallel operations\n\n\ndef par_map(self: Set[A], f: Callable[[A], B]) -> Set[B]:\n \"\"\"\n Builds a new set by applying in parallel a function to all elements of this set.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new set.\n \"\"\"\n return cast(Set[B], type(self)((dask.compute(*(dask.delayed(f)(x) for x in self)))))\n\n\ndef par_filter(self: Set[A], p: Callable[[A], bool]) -> Set[A]:\n \"\"\"\n Selects in parallel all elements of this set which satisfy a predicate.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered set.\n \"\"\"\n preds = dask.compute(*(dask.delayed(p)(x) for x in self))\n return type(self)(x for i, x in enumerate(self) if preds[i])\n\n\ndef par_filter_not(self: Set[A], p: Callable[[A], bool]) -> Set[A]:\n \"\"\"\n Selects in parallel all elements of this set which do not satisfy a predicate.\n\n Args:\n p: The predicate to not satisfy.\n\n Returns:\n The filtered set.\n \"\"\"\n preds = dask.compute(*(dask.delayed(p)(x) for x in self))\n return type(self)(x for i, x in enumerate(self) if not preds[i])\n\n\ndef par_flat_map(self: Set[A], f: Callable[[A], Set[B]]) -> Set[B]:\n \"\"\"\n Builds a new set by applying in parallel a function to all elements of this set and using the elements of the resulting collections.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new set.\n \"\"\"\n applications = dask.compute(*(dask.delayed(f)(x) for x in self))\n return cast(Set[B], type(self)(x for y in applications for x in y))\n\n\n# Pure operations\n\n\ndef pure_map(self: Set[A], f: Callable[[A], B]) -> Set[B]:\n \"\"\"\n Builds a new set by applying a function to all elements of this set using memoization to improve performance.\n\n WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n f: The PURE function to apply to all elements.\n\n Returns:\n The new set.\n \"\"\"\n f_cache = functools.cache(f)\n return cast(Set[B], type(self)(f_cache(x) for x in self))\n\n\ndef pure_flat_map(self: Set[A], f: Callable[[A], Set[B]]) -> Set[B]:\n \"\"\"\n Builds a new set by applying a function to all elements of this set and using the elements of the resulting collections using memoization to improve performance.\n\n WARNING: f must be a PURE function i.e., calling f on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n f: The function to apply to all elements.\n\n Returns:\n The new set.\n \"\"\"\n f_cache = functools.cache(f)\n return cast(Set[B], type(self)(y for x in self for y in f_cache(x)))\n\n\ndef pure_filter(self: Set[A], p: Callable[[A], bool]) -> Set[A]:\n \"\"\"\n Selects all elements of this set which satisfy a predicate using memoization to improve performance.\n\n WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n Args:\n p: The predicate to satisfy.\n\n Returns:\n The filtered set.\n \"\"\"\n p_cache = functools.cache(p)\n return type(self)(x for x in self if p_cache(x))\n\n\ndef pure_filter_not(self: Set[A], p: Callable[[A], bool]) -> Set[A]:\n \"\"\"\n Selects all elements of this set which do not satisfy a predicate using memoization to improve performance.\n\n WARNING: p must be a PURE function i.e., calling p on the same input must always lead to the same result!\n\n Type A must be hashable using `hash()` function.\n\n\n Args:\n p: The predicate not to satisfy.\n\n Returns:\n The filtered set.\n \"\"\"\n p_cache = functools.cache(p)\n return type(self)(x for x in self if not p_cache(x))\n\n\ndef extend_set():\n \"\"\"\n Extends the set and frozenset built-in type with methods.\n \"\"\"\n curse(set, \"map\", map)\n curse(set, \"filter\", filter)\n curse(set, \"filter_not\", filter_not)\n curse(set, \"flat_map\", flat_map)\n curse(set, \"contains\", contains)\n curse(set, \"foreach\", foreach)\n curse(set, \"group_by\", group_by)\n curse(set, \"is_empty\", is_empty)\n curse(set, \"size\", size)\n curse(set, \"find\", find)\n curse(set, \"fold_left\", fold_left)\n curse(set, \"fold_right\", fold_right)\n curse(set, \"forall\", forall)\n curse(set, \"length\", length)\n\n curse(frozenset, \"map\", map)\n curse(frozenset, \"filter\", filter)\n curse(frozenset, \"filter_not\", filter_not)\n curse(frozenset, \"flat_map\", flat_map)\n curse(frozenset, \"contains\", contains)\n curse(frozenset, \"foreach\", foreach)\n curse(frozenset, \"group_by\", group_by)\n curse(frozenset, \"is_empty\", is_empty)\n curse(frozenset, \"size\", size)\n curse(frozenset, \"find\", find)\n curse(frozenset, \"fold_left\", fold_left)\n curse(frozenset, \"fold_right\", fold_right)\n curse(frozenset, \"forall\", forall)\n curse(frozenset, \"length\", length)\n\n # Parallel operations\n curse(set, \"par_map\", par_map)\n curse(set, \"par_filter\", par_filter)\n curse(set, \"par_filter_not\", par_filter_not)\n curse(set, \"par_flat_map\", par_flat_map)\n\n curse(frozenset, \"par_map\", par_map)\n curse(frozenset, \"par_filter\", par_filter)\n curse(frozenset, \"par_filter_not\", par_filter_not)\n curse(frozenset, \"par_flat_map\", par_flat_map)\n\n # Pure operations\n curse(set, \"pure_map\", pure_map)\n curse(set, \"pure_flat_map\", pure_flat_map)\n curse(set, \"pure_filter\", pure_filter)\n curse(set, \"pure_filter_not\", pure_filter_not)\n\n curse(frozenset, \"pure_map\", pure_map)\n curse(frozenset, \"pure_flat_map\", pure_flat_map)\n curse(frozenset, \"pure_filter\", pure_filter)\n curse(frozenset, \"pure_filter_not\", pure_filter_not)\n", "id": "3786576", "language": "Python", "matching_score": 1.7789862155914307, "max_stars_count": 0, "path": "pyfuncol/set.py" }, { "content": "from .dict import extend_dict\nfrom .list import extend_list\nfrom .set import extend_set\n\nextend_list()\nextend_dict()\nextend_set()\n", "id": "2693097", "language": "Python", "matching_score": 0.12512587010860443, "max_stars_count": 0, "path": "pyfuncol/__init__.py" } ]
3.213953
mdstepha
[ { "content": "#!/usr/local/bin/python3 \n\n# This script generate parts of code for the file src/functions/setSharedVar.m\n\nwith open('generated.txt', 'w') as file:\n file.write(\"\\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%% generated-start %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\n\")\n for model in ['Arm', 'Freq']:\n model_lower = model.lower(); \n\n for a in range(0, 2):\n for b in range(0, 2):\n for c in range(0, 2):\n for d in range(0, 2):\n for e in range(0, 2):\n for f in range(0, 2):\n s = f\"\"\"\n elseif name == \"simvma_{model_lower}ModelDef_{a}{b}{c}{d}{e}{f}\"\n set{model}ModelDef_{a}{b}{c}{d}{e}{f}(filepath, value);\n \"\"\"\n file.write(s)\n file.write(\"\\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%% generated-end %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\n\") \n\n\n", "id": "1760420", "language": "Python", "matching_score": 4.438104152679443, "max_stars_count": 0, "path": "devt/generate1_setSharedVar.py" }, { "content": "#!/usr/local/bin/python3 \n\n# This script generate parts of code for the file src/functions/setSharedVarsForPredModelsTrainedOnDefRepos.m \n\nwith open('generated.txt', 'w') as file:\n file.write(\"\\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%% generated-start %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\n\")\n for model in ['Arm', 'Freq']:\n model_lower = model.lower(); \n model_abb = 'am' if model == 'Arm' else 'fm'\n\n for a in range(0, 2):\n for b in range(0, 2):\n for c in range(0, 2):\n for d in range(0, 2):\n for e in range(0, 2):\n for f in range(0, 2):\n\n s = f\"\"\"\n setSharedVar('simvma_{model_lower}ModelDef_{a}{b}{c}{d}{e}{f}', {model_abb}_{a}{b}{c}{d}{e}{f})\"\"\"\n file.write(s)\n file.write(\"\\n\\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%% generated-end %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\n\") \n\n\n", "id": "11288490", "language": "Python", "matching_score": 0.989440381526947, "max_stars_count": 0, "path": "devt/generate3_setSharedVarsForPredModelsTrainedOnDefaultRepos.py" }, { "content": "#!/usr/local/bin/python3 \n\n# This script creates file 'A1_SourceCode.tex' in current directory (for thesis writing) \n\nimport os \n\nclasses = os.listdir('../src/classes') \nclasses = [x for x in classes if x.endswith('.m')]\nclasses.sort()\n\nfunns = os.listdir('../src/functions') + os.listdir('../src/functions/devt')\nfunns = [x for x in funns if x.endswith('.m')]\nfunns = funns + ['getSimvmaPath.m', 'initialize.m', 'sl_customization.m']\nfunns.sort() \n\ncontent = \"\"\"\\chapter{Source Code}\n\\label{chapter:appendix-source-code}\n\nThis appendix presents the MATLAB implementation of various classes and functions used in the project.\n\"\"\"\n\ncontent += \"\\n\\n\\section{Class Definitions}\\n\\label{section:class-definitions}\\n\"\n\n\nfor i, x in enumerate(classes):\n # print(x)\n # if i!=0:\n # content += f\"\\n\\\\newpage\"\n x = x[:-2] # removing trailing .m \n x_latex = x.replace('_', '\\_')\n content += f\"\\n\\lstinputlisting[caption={{{x_latex}.m class definition}}, captionpos=t,label={{lst:code-{x}}}]{{Codes/classes/{x}.m}}\"\n\n\ncontent += \"\\n\\n\\\\newpage\\n\\section{Function Definitions}\\n\\label{section:function-definitions}\\n\"\n\nfor i, x in enumerate(funns):\n # print(x)\n # if i!=0:\n # content += f\"\\n\\\\newpage\"\n x = x[:-2] # removing trailing .m \n x_latex = x.replace('_', '\\_')\n content += f\"\\n\\lstinputlisting[caption={{{x_latex}.m function definition}}, captionpos=t,label={{lst:code-{x}}}]{{Codes/functions/{x}.m}}\"\n\n\nwith open('A1_SourceCode.tex', 'w') as file: \n file.write(content)", "id": "3845774", "language": "Python", "matching_score": 0.800177812576294, "max_stars_count": 0, "path": "devt/generate_A1_SourceCode.py" }, { "content": "#!/usr/bin/python3 \n\nimport shutil \nimport os \n\n\ndef install_unix():\n \"\"\"Install slx2mdl in unix-like OS (macos, linux)\"\"\"\n path = os.path.expanduser('~/.SLX2MDL') # installation directory\n # remove ~/.slx2mdl/ (if exists previously), \n if os.path.exists(path):\n shutil.rmtree(path)\n\n # copy all contents of slx-mdl-tranformation/ to ~/.slx2mdl/\n shutil.copytree('../SLX2MDL', path)\n\n filepath = os.path.expanduser('~/.SLX2MDL/slx2mdl.py')\n symlink_path = '/usr/local/bin/slx2mdl'\n\n # remove symbolic link, if exists previously\n if os.path.exists(symlink_path):\n os.remove(symlink_path)\n\n os.system(f\"ln -s {filepath} {symlink_path}\")\n\n print(\"SLX2MDL installed successfully.\")\n print(\"You can now use it from the command line using the command slx2mdl.\") \n\n\nif __name__ == '__main__': \n install_unix() \n", "id": "4673738", "language": "Python", "matching_score": 1.158102035522461, "max_stars_count": 2, "path": "INSTALL.py" }, { "content": "#!/usr/bin/python3 \n\n# ASSUMPTIONS: \n# - The following files are present in current directory: \n# - slx2mdl - github.csv \n# - slx2mdl - matlab-central.csv \n# - slx2mdl - other.csv \n# - slx2mdl - sourceforge.csv \n\n\nimport os \nimport json \n\nREADABLE = True \n\ndata = {\n 'github': {\n 'filepath': 'slx2mdl - github.csv',\n 'g': 0,\n 'bin': 0,\n 'x': 0, \n 'dis': 0,\n 'total': 0,\n },\n 'matlab-central': {\n 'filepath': 'slx2mdl - matlab-central.csv',\n 'g': 0,\n 'bin': 0,\n 'x': 0, \n 'dis': 0,\n 'total': 0,\n },\n 'other': {\n 'filepath': 'slx2mdl - other.csv',\n 'g': 0,\n 'bin': 0,\n 'x': 0, \n 'dis': 0,\n 'total': 0,\n },\n 'sourceforge': {\n 'filepath': 'slx2mdl - sourceforge.csv',\n 'g': 0,\n 'bin': 0,\n 'x': 0, \n 'dis': 0,\n 'total': 0,\n }\n}\n\nmissing = 0\nsummary = {\n 'g': 0,\n 'bin': 0,\n 'x': 0,\n 'dis': 0,\n # 'missing': 0,\n 'total': 0,\n}\n\nfor dataset, innermap in data.items():\n filepath = innermap['filepath']\n with open(filepath, 'r') as file:\n for i in range(5): # first 5 lines are headers\n file.readline() \n for line in file.readlines():\n line = line.strip() \n if line:\n data[dataset]['total'] += 1\n summary['total'] += 1\n tokens = line.split(',')\n sn, model, status = tokens[:3]\n # print(model)\n status = status.strip() \n status = status.replace('*', '')\n assert status in ['g', 'bin', 'x', 'dis','']\n if status:\n innermap[status] += 1\n summary[status] += 1\n else: \n missing += 1\n # summary['missing'] += 1 \n print(f'missing status for {dataset}/{model}')\n\n\n # rename dict keys (for readable printing)\n if READABLE:\n innermap['good transformations'] = innermap.pop('g')\n innermap['good except for binary files'] = innermap.pop('bin')\n innermap['failed transformations'] = innermap.pop('x')\n innermap['discarded slx files'] = innermap.pop('dis')\n \n# rename dict keys (for readable printing)\nif READABLE:\n summary['good transformations'] = summary.pop('g')\n summary['good except for binary files'] = summary.pop('bin')\n summary['failed transformations'] = summary.pop('x')\n summary['discarded slx files'] = summary.pop('dis')\n\n\nprint('\\n\\nreport')\nprint(json.dumps(data, indent=4))\nprint(\"\\n\\nsummary\")\nprint(json.dumps(summary, indent=4))\n\n\n\n \n\n\n", "id": "9962969", "language": "Python", "matching_score": 1.0517854690551758, "max_stars_count": 2, "path": "evaluation/results/print_evaluation_report.py" }, { "content": "#!/usr/bin/python3\n\nimport glob\nfrom commons import *\nfrom tags_model import *\nfrom transform_stateflow import stateflow_xml2mdl\nimport time\n\n\nclass Transformer:\n # controls whether to preetify the output mdl\n # This can be overridden from Transformer.initialize()\n _preetify = False\n\n filepath_slx = None # input\n filepath_mdl = None # output\n filepath_original_mdl = None\n filepath_original_mdl_preetified = None\n dirpath_working = None\n dirpath_batch_prod_output = None # see docstring of Transformer.initialize()\n dirpath_slx_extracted = None\n filepath_merged_commented = None\n filepath_merged_uncommented = None\n\n filepath_stateflow = None\n filepath_stateflow_preprocessed = None\n\n filepath_output_model_only_unpreetified = None\n filepath_output_model_only_preetified = None\n filepath_output_stateflow_only = None\n\n filepath_merged_commented_no_multiline_str_content = None\n filepath_merged_uncommented_no_multiline_str_content = None\n filepath_mdl_unpreetified = None\n filepath_mdl_preetified = None\n\n filepath_original_mdl_at_working_directory = None\n filepath_original_mdl_at_current_directory = None\n filepath_output_mdl_at_current_directory = None\n\n filepath_error_log = None\n\n @classmethod\n def _extract_slx_archive(cls):\n \"\"\"Extract slx archive (located in cls.filepath_slx).\n The extracted files will be in cls.dirpath_slx_extracted\"\"\"\n with zipfile.ZipFile(cls.filepath_slx, 'r') as zip_ref:\n zip_ref.extractall(cls.dirpath_slx_extracted)\n\n @classmethod\n def _merge_xmls(cls, output_filepath):\n \"\"\"Merge xml files in the slx archive into one big xml file\n Some xml files (eg. stateflow.xml) won't be merged.\n \"\"\"\n\n # files are in the order in which the corresponding information appears in the final mdl file\n # while this is not a requirement, it will keep the information look more 'organized' to a human reader.\n files = [\n 'simulink/blockdiagram.xml',\n 'simulink/windowsInfo.xml',\n # 'slx-files/metadata/coreProperties.xml',\n\n # plugins\n 'simulink/plugins/AnimationPlugin', # first found in sldemo_suspn ;\n 'simulink/plugins/DiagnosticSuppressor.xml',\n 'simulink/plugins/LogicAnalyzerPlugin.xml',\n 'simulink/plugins/NotesPlugin.xml',\n 'simulink/plugins/SLCCPlugin.xml',\n 'simulink/plugins/WebScopes_FoundationPlugin.xml',\n\n 'simulink/configSet0.xml',\n 'simulink/bddefaults.xml',\n # 'simulink/configSetInfo.xml',\n 'simulink/graphicalInterface.xml',\n ]\n\n blockdiagram_file_contains_stateflow = False \n\n with open(output_filepath, 'w') as wfile:\n for f in files:\n # it is not guarranted that every slx files contains all of these xml files\n # so we proceed only if the file exists.\n p = os.path.join(cls.dirpath_working, 'slx-files', f)\n if os.path.exists(p):\n with open(p) as rfile:\n wfile.write('\\n\\n')\n comment = f'<!--{f[9:]}-->' # filename inside comment\n\n rfile.readline() # skip first line\n for line in rfile:\n\n if p.endswith('blockdiagram.xml'): # file-specific processing\n # skip <ModelInformation...> and its closing tag (of blockdiagram.xml)\n if 'ModelInformation' in line:\n continue\n # don't write the last line yet.\n if line.strip() == '</Model>':\n roottag = 'Model'\n continue\n if line.strip() == '</Library>':\n roottag = 'Library'\n continue\n if line.strip() == '</Subsystem>': # first found in simulink/ex_modeling_mechanical_system\n roottag = 'Subsystem'\n continue\n\n if line.strip() == '<Stateflow>': \n blockdiagram_file_contains_stateflow = True \n\n line = line[:-1] + comment + '\\n'\n # IMPORTANT: NEVER ADD ANY INDENTATION I.E. NEVER ADD ANY EXTRA SPACES TO THE\n # LEFT OF THE LINES WHEN WRITING THEM TO THE FILE. OTHERWISE, WE WILL END UP\n # WITH INVALID STRINGS (FOR MULTILINE STRING CONTENTS). THIS BECOMES FATAL\n # SOMETIMES -- FOR EXAMPLE: THIS CAN CAUSE INVLID PATH REFERENCES TO LIBRARY\n # ELEMENTS AND HENCE CORRUPT THE MODEL.\n wfile.write(line)\n\n # finally write the closing tag for <Model> or <Library> or <Subsystem>\n wfile.write(f'</{roottag}>')\n\n # First found in matlab-central/Tube_Alpha_sf.slx, \n # some slx files have <Stateflow> tag within <Model> tag inside simulink/blockdiagram.xml file, \n # rather than in simulink/stateflow.xml file. In such cases, \n # we extract the <Stateflow>...</Stateflow content from inside simulink/blockdiagram.xml, \n # and create the file simulink/stateflow.xml ourselves with the extracted content.\n if blockdiagram_file_contains_stateflow:\n temp_filepath = os.path.join(cls.dirpath_working, 'tempfile')\n Utils.copy_file(output_filepath, temp_filepath)\n stateflow_filepath = os.path.join(cls.dirpath_working, 'slx-files', 'simulink', 'stateflow.xml')\n\n with open(temp_filepath) as rfile:\n # with open(output_filepath, 'w') as merged_file:\n with open(output_filepath, 'w') as merged_file:\n with open(stateflow_filepath, 'w') as sf_file: \n sf_file.write('<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n')\n for line in rfile:\n if line.strip().startswith('<Stateflow>'):\n sf_starttagline = line # stateflow's start line \n # write <Stateflow>...</Stateflow> in simulink/stateflow.file \n while not line.strip().startswith('</Stateflow>'):\n line = line.replace('<!--blockdiagram.xml-->', '') # remove comment \n sf_file.write(line)\n line = rfile.readline()\n # this makes sure the indentation of closing </Stateflow> tag is same as that of <Stateflow>\n sf_endtagline = sf_starttagline.replace('<!--blockdiagram.xml-->', '').replace('<Stateflow>', '</Stateflow>')\n sf_file.write(sf_endtagline) \n else:\n merged_file.write(line) \n Utils.remove_file(temp_filepath)\n\n\n @classmethod\n def initialize(cls, args, filepath_slx, dirpath_working, dirpath_batch_prod_output, count, filepath_mdl=None, preetify=False):\n \"\"\"Initialize the transformer.\n\n Parameters: \n args(dict) : arguments provided by user \n filepath_slx(str) : absolute/relative filepath of input slx file. This is available in args only in 'single' mode, \n so we need to pass it. \n dirpath_working(str): directory where intermediate (and output) files will be produced. \n In production mode, this directory will be removed after transformation completes.\n dirpath_batch_prod_output(str) : If 'mode' is 'batch', and 'devt-mode' is 'no', then this directory will contain\n all output mdl files immediately within itself (i.e. without nesting). If 'devt-mode' is 'yes',\n this directory won't be created at all.\n count(str) : count of the slx file. If mode = 'single', set it to 1. If mode = 'batch', set it to the slx file\n count (beginning from 1.)\n filepath_mdl(str) : absolute/relative filepath of output mdl file \n preetify(bool) : If true, output mdl file is preetified. \n \"\"\"\n cls._args = args\n cls._preetify = preetify\n\n # this is needed because Utils is SINGLETON\n Utils.clear_ids()\n UtilsStfl.clear_ids()\n\n # define all necessary file and directory paths\n # we will always use absolute paths\n cls.filepath_slx = os.path.abspath(filepath_slx)\n cls.dirpath_working = os.path.abspath(dirpath_working)\n cls.dirpath_batch_prod_output = os.path.abspath(dirpath_batch_prod_output) if dirpath_batch_prod_output else None\n\n if filepath_mdl is None:\n # by default, the output mdl filename will be same as the input slx file with a different extension i.e mdl instead of slx.\n # And the output mdl file will be placed in user's current dirrectory (rather than input slx file's directory.)\n dirpath, filename_without_ext, ext_with_dot = Utils.split_filepath(cls.filepath_slx)\n filepath_mdl = f\"{filename_without_ext}.mdl\" # relative wrt current directory\n\n cls.filepath_mdl = os.path.abspath(filepath_mdl)\n\n # the path is set irrespective of whether the file exists or not\n cls.filepath_original_mdl = os.path.splitext(cls.filepath_slx)[0] + '_org.mdl'\n cls.filepath_original_mdl_preetified = os.path.join(cls.dirpath_working, 'original_preetified.mdl')\n\n cls.dirpath_slx_extracted = os.path.abspath(os.path.join(cls.dirpath_working, 'slx-files'))\n cls.filepath_merged_commented = os.path.abspath(os.path.join(cls.dirpath_working, 'merged_commented.xml'))\n cls.filepath_merged_uncommented = os.path.abspath(os.path.join(cls.dirpath_working, 'merged_uncommented.xml'))\n cls.filepath_merged_commented_no_multiline_str_content = os.path.abspath(os.path.join(cls.dirpath_working, 'merged_commented_no_multiline_str_content.xml'))\n cls.filepath_merged_uncommented_no_multiline_str_content = os.path.abspath(os.path.join(cls.dirpath_working, 'merged_uncommented_no_multiline_str_content.xml'))\n\n cls.filepath_output_model_only_unpreetified = os.path.abspath(os.path.join(cls.dirpath_working, 'output_model_only_unpreetified.mdl'))\n cls.filepath_output_model_only_preetified = os.path.abspath(os.path.join(cls.dirpath_working, 'output_model_only_preetified.mdl'))\n\n cls.filepath_output_stateflow_only = os.path.abspath(os.path.join(cls.dirpath_working, 'output_stateflow_only.mdl'))\n\n cls.filepath_mdl_unpreetified = os.path.abspath(os.path.join(cls.dirpath_working, 'output.mdl'))\n cls.filepath_mdl_preetified = os.path.abspath(os.path.join(cls.dirpath_working, 'output_preetified.mdl'))\n\n cls.filepath_stateflow = os.path.abspath(os.path.join(cls.dirpath_working, 'slx-files', 'simulink', 'stateflow.xml'))\n cls.filepath_stateflow_preprocessed = os.path.abspath(os.path.join(cls.dirpath_working, 'stateflow-preprocessed.xml'))\n\n cls.filepath_output_mdl_at_current_directory = os.path.abspath('output.mdl')\n\n # the path is set irrespective of whether the original mdl file exists or not\n cls.filepath_original_mdl_at_working_directory = os.path.abspath(os.path.join(cls.dirpath_working, 'original.mdl'))\n cls.filepath_original_mdl_at_current_directory = os.path.abspath('original.mdl')\n\n cls.filepath_error_log = os.path.abspath('error.log')\n\n # if mode is 'single', the performance report will be located in the working directory\n # if mode is 'batch', the performance report will be located in the 'parent' working directory (not the individual slx file's working directory)\n if args['mode'] == 'single':\n cls.filepath_performance_report = os.path.join(cls.dirpath_working, 'performance-report.csv')\n else: # mode = batch\n cls.filepath_performance_report = os.path.join(os.path.sep.join(cls.dirpath_working.split(os.path.sep)[:-1]), 'performance-report.csv')\n \n \n\n mode = f\"{cls._args['mode']}, {'development' if cls._args['devt_mode'] else 'production'}\"\n\n print(f\"\\nmode : {mode}\")\n print(f\"filepath_slx : {cls.filepath_slx}\")\n print(f\"filepath_mdl : {cls.filepath_mdl}\")\n if cls._args['devt_mode']:\n print(f\"filepath_original_mdl, if any : {cls.filepath_original_mdl}\")\n print(f\"dirpath_working : {cls.dirpath_working}\")\n if dirpath_batch_prod_output:\n print(f\"dirpath_batch_prod_output : {cls.dirpath_batch_prod_output}\")\n print(f\"filepath_error_log : {cls.filepath_error_log}\")\n print(f\"filepath_performance_report : {cls.filepath_performance_report}\\n\")\n \n\n # remove previous files and create all necessary files/folders\n # IMPORTANT: dirpath_batch_prod_output must not be removed or created from here,\n # because doing so would erase output mdl files of pervious models in the same batch.\n # Removing previous dirpath_batch_prod_output (if any), and recreating one\n # is handled by main() method of this module.\n\n Utils.remove_file(cls.filepath_original_mdl_at_current_directory)\n Utils.remove_file(cls.filepath_output_mdl_at_current_directory)\n Utils.remove_file(cls.filepath_mdl)\n Utils.remove_file('original.slx') # this file exists if previous transformation was run in 'devt' mode\n Utils.remove_dirpath(cls.dirpath_working)\n Utils.create_dirpath(cls.dirpath_working)\n Utils.create_file(cls.filepath_mdl)\n if args['report_performance'] and count == 1:\n Utils.create_file(cls.filepath_performance_report)\n with open(cls.filepath_performance_report, 'w') as file:\n file.write(f\"slx_filepath,n_lines_output_mdl,time_taken(s)\\n\") \n \n\n # copy original mdl file to working directory and current directory\n # do this in this method rather than in transform() so that even if transformation fails,\n # we can still inspect the original mdl file at the working directory and current directory\n if os.path.exists(cls.filepath_original_mdl) and cls._args['devt_mode']:\n Utils.copy_file(cls.filepath_original_mdl, cls.filepath_original_mdl_at_working_directory)\n Utils.copy_file(cls.filepath_original_mdl, cls.filepath_original_mdl_at_current_directory)\n if cls._preetify:\n Utils.preetify_mdl_file(cls.filepath_original_mdl, cls.filepath_original_mdl_preetified)\n\n @classmethod\n def _preprocess_stateflow(cls, input_filepath, output_filepath):\n with open(cls.filepath_stateflow) as rfile:\n rfile.readline() # skip first line\n with open(output_filepath, 'w') as wfile: # intermediate output in output_filepath\n for line in rfile:\n wfile.write(line)\n Utils.str_contents_multiline_to_singleline(input_filepath=output_filepath, output_filepath=output_filepath) # write in same file\n\n @classmethod\n def _remove_intermediate_files_and_dirs(cls):\n \"\"\"Remove (all) intermediate files produced during the transformation.\n Which files/dirs to remove is decided based on args.\n\n Parameters: \n args(dict): arguments passed by user\n \"\"\"\n\n # we don't want to remove any intermediate file in 'development' mode.\n if not cls._args['devt_mode']:\n Utils.remove_file(cls.filepath_original_mdl_at_current_directory)\n # the file 'output.mdl' is produced in the current directory as an intermediate file during transformation \n # If the user specified --mdl-filepath as 'output.mdl', don't delete this file, otherwise delete it. \n if cls.filepath_output_mdl_at_current_directory != os.path.abspath('output.mdl'):\n Utils.remove_file(cls.filepath_output_mdl_at_current_directory)\n Utils.remove_dirpath(cls.dirpath_working)\n\n @classmethod\n def transform(cls):\n \"\"\"Transform the slx file at cls.filepath_slx to mdl file\n and save it in cls.filepath_mdl.\"\"\"\n\n cls._extract_slx_archive()\n cls._merge_xmls(output_filepath=cls.filepath_merged_commented)\n\n Utils.uncomment_xml_file(input_filepath=cls.filepath_merged_commented, output_filepath=cls.filepath_merged_uncommented)\n Utils.str_contents_multiline_to_singleline(input_filepath=cls.filepath_merged_commented, output_filepath=cls.filepath_merged_commented_no_multiline_str_content)\n Utils.str_contents_multiline_to_singleline(input_filepath=cls.filepath_merged_uncommented, output_filepath=cls.filepath_merged_uncommented_no_multiline_str_content)\n\n # transform Model\n with open(cls.filepath_merged_uncommented_no_multiline_str_content) as file:\n xml = file.read()\n xml = xml.strip()\n modelOrLibraryOrSubsystem = ModelOrLibraryOrSubsystem(xml, parent_xml=None)\n mdl = modelOrLibraryOrSubsystem.strmdl\n with open(cls.filepath_output_model_only_unpreetified, 'w') as file:\n file.write(mdl)\n\n Utils.copy_file(cls.filepath_output_model_only_unpreetified, cls.filepath_mdl_unpreetified)\n\n if cls._preetify:\n Utils.preetify_mdl_file(cls.filepath_output_model_only_unpreetified, cls.filepath_output_model_only_preetified)\n Utils.copy_file(cls.filepath_output_model_only_preetified, cls.filepath_mdl_preetified)\n\n # handle stateflow\n if os.path.exists(cls.filepath_stateflow):\n # transform Statelfow\n cls._preprocess_stateflow(input_filepath=cls.filepath_stateflow, output_filepath=cls.filepath_stateflow_preprocessed)\n with open(cls.filepath_stateflow_preprocessed) as file:\n stfl_xml = file.read()\n stfl_mdl = stateflow_xml2mdl(stfl_xml) # stateflow transformation\n with open(cls.filepath_output_stateflow_only, 'w') as file:\n file.write(stfl_mdl)\n\n # Append stateflow content to both unpreetified as well as preetified versions of mdl file\n with open(cls.filepath_mdl_unpreetified, 'a') as file:\n file.write(stfl_mdl)\n if cls._preetify:\n with open(cls.filepath_mdl_preetified, 'a') as file:\n file.write(stfl_mdl)\n\n # copy file\n # preetifying output may introduce error in the mdl file,\n # so the unpreetified version is copied as the output to filepath_mdl\n Utils.copy_file(cls.filepath_mdl_unpreetified, cls.filepath_mdl, ignore_same_file_error=True)\n\n if cls._args['devt_mode']:\n Utils.copy_file(cls.filepath_mdl, 'output.mdl', ignore_same_file_error=True)\n Utils.copy_file(cls.filepath_slx, 'original.slx', ignore_same_file_error=True)\n Utils.copy_file(cls.filepath_slx, os.path.join(cls.dirpath_working, 'original.slx'))\n\n # notify user if preetifying failed.\n if cls._preetify:\n with open(cls.filepath_output_model_only_preetified) as file:\n content = file.read()\n # In case preetifying with TXL fails, nothing is written to output_model_only_preetified.mdl\n if not content:\n print('\\nATTENTIION: Preetifying the output mdl file using TXL grammar failed!!!')\n print(' Please, discard the preetified version in working directory.\\n')\n\n\ndef main():\n def try_transform(args):\n time_start = time.time()\n try:\n Transformer.transform()\n\n print(f\"slx to mdl conversion was successful!\\n\")\n\n if args['remove_slx']:\n print(f'deleting file: {Transformer.filepath_slx}')\n Utils.remove_file(Transformer.filepath_slx)\n\n if args['report_performance']:\n time_taken = time.time() - time_start\n with open(Transformer.filepath_mdl) as file:\n nlines = len(file.readlines())\n print(f\"Output mdl file size : {nlines} lines\")\n print(f\"Time taken : {time_taken:.4} seconds\")\n \n with open(Transformer.filepath_performance_report, 'a') as file:\n file.write(f\"{Transformer.filepath_slx},{nlines},{time_taken}\\n\")\n\n\n\n except Exception as e:\n err_msg = \"\\n*** ERROR: slx to mdl conversion encountered a problem. See details below:\\n\\n\"\n err_msg_print = err_msg + str(e)\n print(err_msg_print)\n\n err_msg += f\"\\n\\nfilepath_slx : {Transformer.filepath_slx}\"\n err_msg += f\"\\nfilepath_mdl : {Transformer.filepath_mdl}\"\n err_msg += f\"\\nfilepath_original_mdl, if any : {Transformer.filepath_original_mdl}\"\n err_msg += f\"\\ndirpath_working : {Transformer.dirpath_working}\\n\"\n\n err_msg += str(e)\n\n Utils.log(log_msg=err_msg, log_filepath=Transformer.filepath_error_log, write_mode='append')\n\n if args['exit_on_failure']:\n sys.exit(1)\n finally:\n Transformer._remove_intermediate_files_and_dirs()\n\n args = Utils.parse_and_verify_args()\n\n if args['mode'] == 'single': # transform only one slx file\n Transformer.initialize(\n args=args,\n filepath_slx=args['slx_filepath'],\n dirpath_working='working-dir',\n dirpath_batch_prod_output=None,\n filepath_mdl=args['mdl_filepath'], \n count=1\n )\n try_transform(args)\n # Transformer.transform()\n\n else: # mode = 'batch' : transform all slx files in the folder\n dirpath_slx = args['slx_dirpath']\n filepaths_slx = os.path.join(dirpath_slx, '*.slx')\n filepaths_slx = glob.glob(filepaths_slx)\n\n if not filepaths_slx:\n print(f\"No slx file was found in given slx-dirpath.\\n\")\n\n _, slx_foldername = os.path.split(args['slx_dirpath'])\n dirpath_batch_prod_output = f'outputs-{slx_foldername}'\n # we want to make sure dirpath_batch_prod_output is created brand new\n Utils.remove_dirpath(dirpath_batch_prod_output)\n if not args['devt_mode']:\n Utils.create_dirpath(dirpath_batch_prod_output)\n\n count = 0\n for filepath in filepaths_slx:\n count += 1\n print(f\"\\ncount: {count}/{len(filepaths_slx)}\", end=' ')\n if count < 0:\n print(f\"skipping : {filepath}\")\n\n else:\n print(f\"================== {filepath} ==================\\n\")\n\n dirpath, filename_without_ext, ext_with_dot = Utils.split_filepath(filepath)\n # unlike dirpath_batch_prod_output, dirpath_working is different for each Simulink model\n dirpath_working = f'working-dir-{slx_foldername}/{filename_without_ext}'\n\n if args['devt_mode']:\n filepath_mdl = os.path.join(dirpath_working, \"output.mdl\")\n else:\n filepath_mdl = os.path.join(dirpath_batch_prod_output, f\"{filename_without_ext}.mdl\")\n\n Transformer.initialize(\n args=args,\n filepath_slx=filepath,\n dirpath_working=dirpath_working,\n dirpath_batch_prod_output=dirpath_batch_prod_output,\n filepath_mdl=filepath_mdl,\n count=count,\n )\n try_transform(args)\n\n if not args['devt_mode']:\n Utils.remove_dirpath(f'working-dir-{slx_foldername}')\n Utils.remove_dirpath('working-dir') # this dir exists if previous transformation was run in 'single' mode\n\n if args['mode'] == 'batch':\n Utils.remove_file('original.slx')\n Utils.remove_file('original.mdl')\n Utils.remove_file('output.mdl')\n\n\ndef test():\n Utils.log(log_msg='hello there', log_filepath='error.log', write_mode='append')\n # from datetime import datetime\n # x = datetime.now()\n # print(x)\n\n\nif __name__ == '__main__':\n main()\n # test()\n", "id": "3687650", "language": "Python", "matching_score": 4.9090576171875, "max_stars_count": 2, "path": "slx2mdl.py" }, { "content": "#!/usr/bin/python3\n\nimport sys\nimport os\nimport shutil\nimport zipfile\nimport argparse\nimport datetime \n# from xml.dom import minidom\n\n\nclass Utils:\n\n # key: XmlElement\n # value: ObjectID in mdl\n _object_id_dict = {}\n\n @classmethod\n def clear_ids(cls):\n \"\"\"Clear all entries in cls._object_id_dict.\n Call this method from Transformer.initialize() when operating in 'batch' mode\n so that all previous ids, if any, are cleared. \"\"\"\n cls._object_id_dict = {}\n\n @classmethod\n def object_idmdl_by_xml_element(cls, xml_element):\n \"\"\"Return object id \n Args: \n xml_element(an object of a class that inherits XmlElement)\n \"\"\"\n # Not all objects that inherit XmlElement qualify to have an objectID.\n # Only those classes which may appear as 'Object' in the mdl format\n # are qualified to have an object ID.\n\n assert xml_element.tag in [\n 'Object',\n 'Mask',\n 'MaskDefaults',\n 'MaskParameter',\n 'DialogControl',\n ]\n\n key = id(xml_element)\n try:\n return cls._object_id_dict[key]\n except KeyError:\n ids = cls._object_id_dict.values()\n ids = [int(x) for x in ids]\n max_id = max(ids) if ids else 0 # because ids is initially empty\n id_ = str(max_id + 1)\n cls._object_id_dict[key] = id_\n return id_\n\n @classmethod\n def parse_and_verify_args(cls):\n \"\"\"Return command line args in a dict. \n 'mode' will be set to 'single' if the user does not provide this argument\n Raise error if provided arguments are invalid. \n \"\"\"\n # parse args\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode') # 'single'/'batch'\n parser.add_argument('--slx-filepath') # absolute/relative filepath of slx file (relevant when mode = 'single')\n parser.add_argument('--slx-dirpath') # absolute/relative filepath of slx directory (relevant when mode = 'batch') \n parser.add_argument('--mdl-filepath')\n \n # if mode is 'batch' and conversion fails for any slx file, \n # this argument controls whether to continue with the conversion of other slx files\n # useful during development phase \n parser.add_argument('--exit-on-failure') # 'yes'/'no' (default: 'no')\n \n # if report-performance is set to 'yes', \n # number of lines in output mdl file, and \n # time taken for conversion will be output, as well \n parser.add_argument('--report-performance') # 'yes'/'no' (default: 'no')\n\n # if devt-mode is set to 'yes', \n # there will be some useful files (for debugging) in current and working directory.\n # otherwise these files either not be created at all, or deleted after the transformation completes or throws exception. \n parser.add_argument('--devt-mode') # 'yes'/'no' (default: 'no')\n\n # specifies whether to delete the source slx file after the transformation completes successfully\n # relevant only when --mode == 'batch' and --devt-mode == 'yes'\n # this is useful during development time when we need to test this tool on a large set of slx files (batch mode), \n # by fixing the transformation logic (mostly adding new components as they are discovered) incrementally as new components \n # are discovered -- we don't want to run the tool again in the slx files that were transformed successfully.\n parser.add_argument('--remove-slx') # 'yes'/'no' (default: 'no')\n\n # if dash (-) is present in the input argument,\n # it is automatically converted to underscore.\n # eg. slx-filepath --> slx_filepath\n args = parser.parse_args()\n args = vars(args) # argparse.Namespace --> dict\n\n # now, verify args\n\n # mode\n if args['mode'] is None:\n args['mode'] = 'single'\n # converting single file vs. converting all files in a folder\n assert args['mode'] in ['single', 'batch']\n\n args['exit_on_failure'] = True if args['exit_on_failure'] == 'yes' else False \n args['report_performance'] = True if args['report_performance'] == 'yes' else False \n args['devt_mode'] = True if args['devt_mode'] == 'yes' else False \n\n print()\n if args['mode'] == 'single':\n\n if not args['slx_filepath']:\n raise Exception(f\"Missing argument 'slx-filepath' for mode 'single'\")\n if not os.path.exists(args['slx_filepath']):\n raise Exception(f\"Invalid slx-filepath: '{args['slx_filepath']}'\")\n\n if not args['mdl_filepath']:\n print(f\"Argument 'mdl-filepath' was not provided. Default path (see below) will be used.\")\n\n else:\n if not args['slx_dirpath']:\n raise Exception(f\"Missing argument 'slx-dirpath' for mode 'batch'\")\n if not os.path.exists(args['slx_dirpath']):\n raise Exception(f\"Invalid slx-dirpath: '{args['slx_dirpath']}'\")\n\n args['remove_slx'] = True if args['remove_slx'] == 'yes' else False \n \n return args\n\n @classmethod\n def create_file(cls, filepath):\n \"\"\"Create file if it does not exist already. \n Parent directory is created if it does not exist already.\n filepath may be relative or absolute.\"\"\"\n if not os.path.exists(filepath):\n # convert to absolute path so that dirpath is guarranted\n # to be non-empty\n filepath = os.path.abspath(filepath)\n dirpath = os.path.dirname(filepath)\n # need to make sure directory exists before creating file\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n open(filepath, 'w').close() # create file\n\n @classmethod\n def create_dirpath(cls, dirpath):\n \"\"\"Create directory if it does not exist already. \n Parent directory is created if it does not exist already.\n dirpath may be relative or absolute.\"\"\"\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n\n @classmethod\n def remove_file(cls, filepath):\n \"\"\"Remove a file if it exists. \n filepath may be relative or absolute\"\"\"\n if os.path.exists(filepath):\n os.remove(filepath)\n\n @classmethod\n def remove_dirpath(cls, dirpath):\n \"\"\"Remove directory if it exists. \n dirpath may be relative or absolute\"\"\"\n if os.path.exists(dirpath):\n shutil.rmtree(dirpath)\n\n @classmethod\n def copy_file(cls, srcpath, dstpath, ignore_same_file_error=False):\n try:\n shutil.copyfile(srcpath, dstpath)\n except shutil.SameFileError:\n if not ignore_same_file_error:\n raise\n\n @classmethod\n def uncomment_xml(cls, xml):\n \"\"\"Remove all xml comments from the input xml str\"\"\"\n indices = [] # tuple of (start_index, end_index)\n length = len(xml)\n i = -1\n while i < length:\n i += 1\n if xml[i:i+4] == '<!--':\n index_comment_start = i\n j = i + 4\n while True:\n j += 1\n if xml[j:j+3] == '-->':\n index_comment_end = j+3 # exclusive\n indices.append(\n (index_comment_start, index_comment_end))\n i = j + 3\n break\n\n res = xml\n for s, e in indices:\n comment = xml[s:e]\n res = res.replace(comment, '')\n return res\n\n @classmethod\n def uncomment_xml_file(cls, input_filepath, output_filepath):\n \"\"\"input_filepath and output_filepath can be same\"\"\"\n with open(input_filepath) as rfile:\n str_ = rfile.read()\n str_ = Utils.uncomment_xml(str_)\n with open(output_filepath, 'w') as wfile:\n wfile.write(str_)\n\n @classmethod\n def preetify_mdl_file(cls, input_filepath, output_filepath):\n \"\"\"input_filepath and output filepath can be same\"\"\"\n os.system(f\"txl {input_filepath} preetify-mdl.txl > {output_filepath}\")\n\n @classmethod\n def transform_self_closing_tag(cls, xml):\n \"\"\"Transform self-closing tag, if any, to non-self-closing tag\"\"\"\n\n def transform_first_self_closing_tag(xml):\n \"\"\"Transform only the first self_closing tag. All occurrences of the\n first self-closing tag are transformed\"\"\"\n\n len_xml = len(xml)\n found = False\n i = 0\n while i < len_xml - 1:\n i += 1\n if xml[i:i+2] == '/>':\n found = True\n j = i\n while True:\n j -= 1\n if xml[j] == '<':\n k = j\n while True:\n k += 1\n if xml[k] in [' ', '/']:\n tag_name = xml[j+1:k]\n break\n break\n break\n\n if found:\n self_closing_tag = xml[j:i+2]\n non_self_closing_tag = f\"{self_closing_tag[0:-2]}></{tag_name}>\"\n\n xml = xml.replace(self_closing_tag, non_self_closing_tag)\n return xml\n\n xml = xml.strip()\n i = 0\n while True:\n i += 1\n xml_transformed = transform_first_self_closing_tag(xml)\n if xml_transformed == xml:\n return xml\n xml = xml_transformed\n\n # @classmethod\n # def preetify_xml(cls, xml, no_self_closing_tags=False):\n # dom = minidom.parseString(xml)\n # xml = dom.toprettyxml()\n # with open('___tempfile___', 'w') as file:\n # file.write(xml)\n # lines = []\n # with open('___tempfile___', 'r') as file:\n # for line in file:\n # line = line.rstrip()\n # if line:\n # lines.append(line)\n # os.remove('___tempfile___')\n # # dom.topreetyxml adds title line, which we want to avoid\n # xml = '\\n'.join(lines[1:])\n # # revert undesired replacements introduced by the python library\n # replacements = {\n # # \"'\" : '&apos;',\n # }\n\n # for k, v in replacements.items():\n # xml = xml.replace(k, v)\n\n # if no_self_closing_tags:\n # xml = Utils.transform_self_closing_tag(xml)\n\n # return xml\n\n @classmethod\n def preetify_xml(cls, xml, idtn_space=2):\n \"\"\"Return a preetified version of the xml string.\n Self-closing tags, if any, will be transformed to non-self-closing tags.\n\n THIS METHOD IS WRITTEN TO REMOVE THE DEPENDENCY ON PYTHON'S LIBRARY\n\n Args:\n xml(str): input xml string\n idtn_space(int, optional): number of spaces for indentation\n\n \"\"\"\n\n def break_xml_into_tags_and_content(xml):\n \"\"\"Return (<start_tag>, content, </end_tag>).\n Leading/trailing spaces in the content, if any, are NOT striped.\n\n Assumptions:\n - xml has no self-closing tag\n \"\"\"\n xml = xml.strip()\n\n index_stag_start = 0\n i = 1\n while True:\n i += 1\n if xml[i] == '>':\n index_stag_end = i + 1 # exclusive\n break\n elif xml[i] == ' ':\n j = i\n while True:\n j += 1\n if xml[j] == '>':\n index_stag_end = j + 1 # exclusive\n break\n break\n\n index_content_start = index_stag_end # inclusive\n\n index_etag_end = len(xml) # exclusive\n i = index_etag_end\n while True:\n i -= 1\n if xml[i:i+2] == '</':\n index_etag_start = i # inclusive\n break\n\n index_content_end = index_etag_start # exclusive\n\n stag = xml[index_stag_start: index_stag_end]\n content = xml[index_content_start: index_content_end]\n etag = xml[index_etag_start: index_etag_end]\n\n return stag, content, etag\n\n def content_blocks(content):\n \"\"\"Return a list of xml blocks which appear in the content\n Assumptions:\n - content contains at least 1 xml element i.e. <>...<>\n - content does not contain any self-closing xml block\n \"\"\"\n content = content.strip()\n if not(content.startswith('<') and content.endswith('>')):\n return []\n\n length = len(content)\n\n # list of tuple (start_index, end_index)\n # both start_index and end_index are inclusive\n start_end_indices = []\n i = -1\n while i < length-1:\n i += 1\n if content[i] == '<':\n start_index = i\n end_index = Utils.end_index(content, start_index)\n start_end_indices.append((start_index, end_index))\n i = end_index\n\n xmls = [content[s:e+1] for (s, e) in start_end_indices]\n return xmls\n\n def is_content_plain_str(content):\n cs = content.strip()\n return not (cs.startswith('<') and cs.endswith('>'))\n\n def indent_block(block, idtn_space=2, idtn_level=1):\n \"\"\"Shift whole block by given indentation level to the right\"\"\"\n ind = ' ' * idtn_space * idtn_level\n lines = block.split('\\n')\n lines = [f'{ind}{x}' for x in lines]\n block = '\\n'.join(lines)\n return block\n\n def preetify_xml_helper(xml, idtn_space):\n stag, content, etag = break_xml_into_tags_and_content(xml)\n\n # base case\n if is_content_plain_str(content):\n return xml.strip()\n\n else: # content contains one or more xml blocks\n cbs = content_blocks(content)\n preetified_xml = stag\n for cb in cbs:\n pcb = preetify_xml_helper(cb, idtn_space)\n pcb = indent_block(pcb, idtn_space, 1)\n preetified_xml += f'\\n{pcb}'\n preetified_xml += f'\\n{etag}'\n return preetified_xml\n\n xml = Utils.transform_self_closing_tag(xml)\n return preetify_xml_helper(xml, idtn_space=idtn_space)\n\n @classmethod\n def tag(cls, xml):\n \"\"\"Return the tag's name of given xml string.\n Return None if no match found.\"\"\"\n xml = Utils.transform_self_closing_tag(xml)\n xml = xml.strip()\n\n global ALL_TAGS\n for tag in ALL_TAGS:\n if xml.endswith(f\"</{tag}>\"):\n return tag\n\n @classmethod\n def end_index(cls, xml, start_index):\n \"\"\"Return the last index of the tag\n\n DOES NOT SUPPORT SELF-CLOSING TAGS\n\n Parameters:\n xml (string) : xml string\n start_index (int) : index of first character i.e. '<' of starting tag\n\n Returns:\n (int) : index (NOT exclusive) of last character i.e. '>' of ending tag\n \"\"\"\n\n # find tag's name\n i = start_index\n while True:\n i += 1\n if xml[i] in [' ', '>']:\n tag_name = xml[start_index + 1: i]\n break\n\n start_pattern = f'<{tag_name}'\n end_pattern = f'</{tag_name}>'\n\n len_start_pattern = len(start_pattern)\n len_end_pattern = len(end_pattern)\n len_xml = len(xml)\n\n stack = ['s']\n i = start_index\n while i < len_xml - len_end_pattern:\n i += 1\n\n # the second condition makes sure that if an inner tag begins with the same\n # string pattern as an outer tag, the inner tag is not falsely matched to\n # the start_pattern\n if xml[i: i + len_start_pattern] == start_pattern and xml[i + len_start_pattern] in [' ', '>']:\n stack.append('s')\n\n if xml[i: i + len_end_pattern] == end_pattern:\n stack.append('e')\n\n if len(stack) > 1 and stack[-1] == 'e' and stack[-2] == 's':\n stack = stack[0: -2] # remove last 2 entries\n\n if len(stack) == 0:\n return i + len_end_pattern - 1\n\n @classmethod\n def disect_xml_str(cls, xml):\n \"\"\"Return (tagname(str), list_of_XmlAttrs, content(str))\n Args: \n xml(str)\n \"\"\"\n xml = xml.strip()\n assert xml.startswith('<') and xml.endswith('>')\n\n i = 0\n index_tag_start = 1\n while True:\n i += 1\n if xml[i] in [' ', '>']:\n index_tag_end = i # exclusive\n if xml[i] == '>':\n attrless = True\n else:\n attrless = False\n break\n\n xml_attrs = []\n if not attrless:\n nquote = 0\n i -= 1\n while True:\n i += 1\n if xml[i] == '>': # end of attrs\n break\n\n if xml[i] == ' ':\n j = i\n while True:\n j += 1\n if xml[j] != ' ':\n index_attrname_start = j\n break\n\n if xml[i] == '=':\n index_attrname_end = i # exclusive\n\n j = i\n while True:\n j += 1\n if xml[j] == '\"': # starting quote of an attribute's value\n index_attrval_start = j + 1 # not including \"\n k = j\n while True:\n k += 1\n # k will eventually have corresponding ending quote's index\n if xml[k] == '\"':\n index_attrval_end = k\n\n attrname = xml[index_attrname_start: index_attrname_end]\n attrval = xml[index_attrval_start: index_attrval_end]\n\n # first found in corpus/matlab-central/HEV_Electrical_Lib.slx\n # (see line: '<Block BlockType=\"Gain\" Name=\"1\\ib3\" SID=\"2:35\">' in file 'merged_uncommented_no_multiline_str_content'), \n # some tag attribute values (not just the tag content) contain characters that need replacement\n attrval = Utils.str_content_replacements(attrval)\n xml_attr = XmlAttr(attrname, attrval)\n xml_attrs.append(xml_attr)\n\n i = k\n break\n break\n\n index_content_start = i + 1\n i = len(xml)\n while True:\n i -= 1\n if xml[i] == '<':\n index_content_end = i # exclusive\n break\n\n tag = xml[index_tag_start: index_tag_end]\n content = xml[index_content_start: index_content_end]\n\n return tag, xml_attrs, content\n\n @classmethod\n def content_elements(cls, content):\n \"\"\"Return a list of xml blocks (strs) which appear in the content (str)\n Assumptions:\n - content contains at least 1 xml element i.e. <>...<>\n - content does not contain any self-closing xml elements\n\n \"\"\"\n content = content.strip()\n if not(content.startswith('<') and content.endswith('>')):\n return []\n\n length = len(content)\n\n # list of tuple (start_index, end_index)\n # both start_index and end_index are inclusive\n start_end_indices = []\n i = -1\n\n while i < length-1:\n i += 1\n if content[i] == '<':\n start_index = i\n end_index = Utils.end_index(content, start_index)\n start_end_indices.append((start_index, end_index))\n i = end_index\n\n xmls = [content[s:e+1] for (s, e) in start_end_indices]\n return xmls\n\n @classmethod\n def str_contents_multiline_to_singleline(cls, input_filepath, output_filepath):\n \"\"\"COMBINE MULTI-LINE STRINGS INTO SINGLE-LINE STRINGS CONCATENATED BY '&#xA;' IF THESE STRINGS APPEAR AS THE CONTENT OF THE LISTED TAGS\n\n input_filepath and output_filepath can be same.\n\n ASSUMPTIONS: \n - input_file contains no xml comments \n\n We are using '&#xA' rather than '\\n', to concatenate lines because\n 1. '\\n' changed to '\\\\n' by another method i.e.\n str_content_replacements() which is called after this method. This was observed to introduce errors in the model\n (dma/ex_bus_to_vector).\n So, we cannot use '\\n' to concatenate lines.\n 2. slx format also uses '&#xA;' to represent linebreaks (this is\n standard xml encoding).\n \"\"\"\n tags = [\n 'P',\n 'Description',\n 'Help',\n 'Display', # first found in powerwindow05\n 'Initialization', # first found in powerwindow05\n 'Callback', # first found in applications/aero_dap3dof\n 'Option', # first foundin github/AC_Quadcopter_Simulation\n ]\n\n # for any tag, start patterns can be of 2 types.\n # eg for 'P', they may be either '<P>' or '<P '\n # This is needed to avoid matching other starting tags starting with\n # these tags, eg: <Port> matchies <P> otherwise\n start_patterns = [f'<{x}>' for x in tags] + [f'<{x} ' for x in tags]\n sp_ep = {}\n for sp in start_patterns:\n sp_ep[sp] = f'</{sp[1:-1]}>'\n\n with open(input_filepath) as file:\n lines = file.readlines()\n\n writelines = []\n nlines = len(lines)\n i = -1\n while i < nlines - 1:\n i += 1\n line = lines[i]\n ls = line.strip()\n for sp, ep in sp_ep.items():\n # cannot just check if ls.endswith(ep) because the line might\n # end in a comment too while still having a multi line string content\n\n if ls.startswith(sp) and not ep in line and not '/>' in line:\n while True:\n # remove the nextline char at the end of the line\n # IMP: cannot use \"line = line.rstrip()\" because that\n # would strip away any trailing whitespace character\n # as well along with the nextline character. This would\n # then introduce errors -- such as invalid library block --\n # in the output mdl file.\n line = line[:-1]\n i += 1\n nextline = lines[i]\n line += '&#xA;' + nextline\n nrs = nextline.rstrip()\n if ep in line or '/>' in line:\n break\n break\n writelines.append(line)\n\n with open(output_filepath, 'w') as file:\n for line in writelines:\n file.write(line)\n\n @classmethod\n def replacements4mdl(cls, mdl):\n \"\"\"Make necessary requirements for mdl format.\n This transformation should be applied at the very end of the pipeline\n i.e. just before returning the generated mdl string. \n Otherwise, these replacements will introduce unintended changes in the\n xml string (eg. '&lt' changes to '<' as a result xml parsing is affected\n producing errorneous results or raising exception.\n \"\"\"\n\n # TODO: update this list incrementally\n replacements = {\n '&lt;': '<',\n '&gt;': '>',\n '&apos;': \"'\",\n '&#xA;': '\\\\n',\n '&amp;': '&',\n # IMPORTANT: the replacing character is not a 'space' character.\n # It is some special character that looks like a whitespace;\n # first found in AccelerationUnits (powerwindowlibphys)\n # when its raw value is printed as print(repr('\u001a')), this outputs \\x1a \n # (the whitespace-like character is inside the quotes) \n '�': '\u001a',\n '&quot;': '\\\\\"',\n '&#x9;': '\t', # tab character (first found in corpus/github/ctrl_student)\n\n # previously, in the method Utils.str_content_replacements we were making each \n # replacements individually (eg: \\n --> \\\\n, \\t --> \\\\t), which are now commented \n # out as we are using a generic replacement (\\ --> \\\\) in that method. This generic\n # replacement is still under test, so we have not deleted the previous individual \n # replacements entirely (we've just commented them out)\n # With that generic replacement in method Utils.str_content_replacements, the \n # following replacemnents in this method i.e. (eg. \\alpha --> \\\\alpha) introduce\n # 'extra' unintended backslash character. Therefore this section has been commented out.\n # we have not deleted it yet because the generic replacement code in Utils.str_content_replacements()\n # (i.e. \\ --> \\\\) is still experimental -- in case it fails we will have to resort to these individual \n # transformations \n\n # alpha and delta were discovered in 'selxAircraftExample'\n # '\\\\alpha': '\\\\\\\\alpha', # \\alpha --> \\\\alpha \n # '\\\\Alpha': '\\\\\\\\Alpha', # \\alpha --> \\\\alpha \n # '\\\\beta': '\\\\\\\\beta', \n # '\\\\Beta': '\\\\\\\\Beta', \n # '\\\\gamma': '\\\\\\\\gamma', \n # '\\\\Gamma': '\\\\\\\\Gamma', \n # # '\\\\delta': '\\\\\\\\delta', \n # '\\\\Delta': '\\\\\\\\Delta', \n # '\\\\epsilon': '\\\\\\\\epsilon', \n # '\\\\Epsilon': '\\\\\\\\Epsilon', \n # '\\\\zeta': '\\\\\\\\zeta', \n # '\\\\Zeta': '\\\\\\\\Zeta', \n # '\\\\eta': '\\\\\\\\eta', \n # '\\\\Eta': '\\\\\\\\Eta', \n # '\\\\theta': '\\\\\\\\theta', \n # '\\\\Theta': '\\\\\\\\Theta', \n # '\\\\iota': '\\\\\\\\iota', \n # '\\\\Iota': '\\\\\\\\Iota', \n # '\\\\kappa': '\\\\\\\\kappa', \n # '\\\\Kappa': '\\\\\\\\Kappa', \n # '\\\\lambda': '\\\\\\\\lambda', \n # '\\\\Lambda': '\\\\\\\\Lambda', \n # '\\\\mu': '\\\\\\\\mu', \n # '\\\\Mu': '\\\\\\\\Mu', \n # # 'nu' is commented out because it brought unintended changes: eg, \\nusing --> \\\\nusing \n # # so, currently our transformation fails if model contains 'nu' character (rare case)\n # # TODO: solve it \n # # '\\\\nu': '\\\\\\\\nu', \n # '\\\\Nu': '\\\\\\\\Nu', \n # '\\\\xi': '\\\\\\\\xi', \n # '\\\\Xi': '\\\\\\\\Xi', \n # '\\\\omikron': '\\\\\\\\omikron', \n # '\\\\Omikron': '\\\\\\\\Omikron', \n # '\\\\pi': '\\\\\\\\pi', \n # '\\\\Pi': '\\\\\\\\Pi', \n # '\\\\rho': '\\\\\\\\rho', \n # '\\\\Rho': '\\\\\\\\Rho', \n # '\\\\sigma': '\\\\\\\\sigma', \n # '\\\\Sigma': '\\\\\\\\Sigma', \n # '\\\\tau': '\\\\\\\\tau', \n # '\\\\Tau': '\\\\\\\\Tau', \n # '\\\\upsilon': '\\\\\\\\upsilon', \n # '\\\\Upsilon': '\\\\\\\\Upsilon', \n # '\\\\phi': '\\\\\\\\phi', \n # '\\\\Phi': '\\\\\\\\Phi', \n # '\\\\chi': '\\\\\\\\chi', \n # '\\\\Chi': '\\\\\\\\Chi', \n # '\\\\psi': '\\\\\\\\psi', \n # '\\\\Psi': '\\\\\\\\Psi', \n # '\\\\omega': '\\\\\\\\omega', \n # '\\\\Omega': '\\\\\\\\Omega', \n }\n\n for k, v in replacements.items():\n mdl = mdl.replace(k, v)\n return mdl\n\n @classmethod\n def str_content_replacements(cls, content):\n \"\"\"Make necessary requirements for the content of type 'str'.\n Args: \n content(str); \n \"\"\"\n # IMPORTANT replacement that affect xml parsing such as '&gt;' --> '>'\n # are not made by this method\n\n\n replacements = {\n '\\\\' : '\\\\\\\\', # \\ --> \\\\ \n\n # todo: if the above replacement (\\ --> \\\\) introduces unintended consequences, \n # get rid of the above entry and restore the following (commented-out) ones\n\n # '\\\\n': '\\\\\\\\n', # \\n --> \\\\n # first found in automotive/sldemo_wheelspeed_absbrake\n # '\\\\t': '\\\\\\\\t', # \\t --> \\\\t # first found in automotive/sldemo_wheelspeed_absbrake\n # '\\\\x': '\\\\\\\\x', # \\x --> \\\\x # first found in corpus/matlab-central/Chassis_Alpha.slx\n # '\\\\i': '\\\\\\\\i', # \\i --> \\\\i # first found in corpus/matlab-central/HEV_Electrical_Lib.slx \n }\n\n for k, v in replacements.items():\n content = content.replace(k, v)\n return content\n\n @classmethod\n def remove_multiple_linegaps(cls, str_):\n \"\"\"Returned string will not have 2 or more consecutive empty lines\"\"\"\n if not '\\n\\n\\n' in str_: # base condition\n return str_\n str_ = str_.replace('\\n\\n\\n', '\\n\\n')\n return Utils.remove_multiple_linegaps(str_)\n\n @classmethod\n def remove_linegaps(cls, str_):\n \"\"\"Returned string will not have any empty line\"\"\"\n if not '\\n\\n' in str_: # base condition\n return str_\n str_ = str_.replace('\\n\\n', '\\n')\n return Utils.remove_linegaps(str_)\n\n @classmethod\n def remove_multiple_linegaps_between_consecutive_closing_braces(cls, str_):\n \"\"\"Returned string will not have an empty line between any 2 consecutive \n closing braces i.e. }\"\"\"\n if not '}\\n\\n}' in str_: # base condition\n return str_\n str_ = str_.replace('}\\n\\n}', '}\\n}')\n return Utils.remove_multiple_linegaps_between_consecutive_closing_braces(str_)\n\n @classmethod\n def extract_first_tag(cls, xml, tag):\n \"\"\"Return the first tag (<>...</>) as a string.\n If there is an inner tag inside the first tag, the outer tag (containing\n the inner tag) will be returned.\n Return None if no such tag is found\n\n Assumption:\n xml MUST NOT CONTAIN ANY SELF-CLOSING TAG\n\n Parameters:\n xml (string) : xml string\n tag (string) : tag to be found. eg. 'state'\n\n Returns:\n (string): entire tag (<>...</>)\n \"\"\"\n\n stag = f\"<{tag}\"\n etag = f\"</{tag}>\"\n\n len_xml = len(xml)\n len_stag = len(stag)\n len_etag = len(etag)\n\n start_index = None\n end_index = None\n found = False\n\n # both insertion and removal from highest index\n # s : start\n # e : end\n tag_stack = []\n\n for i in range(0, len_xml - len_etag + 1):\n substr = xml[i: i + len_stag]\n if substr == stag:\n found = True\n tag_stack.append('s')\n\n if start_index == None:\n start_index = i\n\n substr = xml[i: i + len_etag]\n if substr == etag:\n tag_stack.append('e')\n end_index = i + len_etag\n\n if found:\n if len(tag_stack) > 1 and tag_stack[-1] == 'e' and tag_stack[-2] == 's':\n tag_stack = tag_stack[0: -2] # remove last 2 entires\n if len(tag_stack) == 0:\n return xml[start_index: end_index]\n\n @classmethod\n def extract_outer_tags(cls, xml, tag, ignore_inside=None):\n \"\"\"Return a list of matching tags.\n - If there is an inner tag inside the first tag, the outer tag (containing\n the inner tag) will be added to the list, the inner tag will NOT be added\n separately.\n\n - Tags (in the returned list) will appear in the same order as they appear\n in the xml file\n\n Assumption:\n - xml MUST NOT CONTAIN ANY SELF-CLOSING TAG\n\n Args:\n xml (str): xml string\n tag (str): tag to be found. eg. 'state'\n ignore_inside ([str]): list of tags. The tag element inside these tags\n will be ignored.\n\n Returns:\n [string]: list of entire tag (<>...</>). The list is empty if no match is found.\n \"\"\"\n\n if ignore_inside is None:\n ignore_inside = []\n\n stag = f\"<{tag}\"\n etag = f\"</{tag}>\"\n\n len_xml = len(xml)\n len_stag = len(stag)\n len_etag = len(etag)\n\n tags = []\n start_index = None\n end_index = None\n found = False\n inside_ignored_tag = False\n\n # both insertion and removal from highest index\n # s : start\n # e : end\n tag_stack = []\n\n # for i in range(0, len_xml - len_etag + 1):\n i = -1\n while i < len_xml - len_etag + 1:\n i += 1\n\n for ignored_tag in ignore_inside:\n start_pattern = f'<{ignored_tag}'\n len_start_pattern = len(start_pattern)\n if xml[i: i + len_start_pattern] == start_pattern:\n # skip the entire tag\n i = Utils.end_index(xml, i)\n\n if xml[i: i + len_stag] == stag:\n found = True\n\n # update start_index only if tag_stack is empty\n # i.e. all previously entered tags, if any, are exited.\n if len(tag_stack) == 0:\n start_index = i\n tag_stack.append('s')\n\n if xml[i: i + len_etag] == etag:\n tag_stack.append('e')\n end_index = i + len_etag\n\n if found:\n if len(tag_stack) > 1 and tag_stack[-1] == 'e' and tag_stack[-2] == 's':\n tag_stack = tag_stack[0: -2] # remove last 2 entries\n if len(tag_stack) == 0:\n tags.append(xml[start_index: end_index])\n found = False\n\n return tags\n\n @classmethod\n def split_filepath(cls, filepath):\n \"\"\"Return (dirpath, filename_without_extension, extension_with_dot)\n dirpath will be empty if filepath is relative and corresponds to a file\n in current directory\"\"\"\n dirpath, filename_with_ext = os.path.split(filepath) # a/b/c/d.txt --> a/b/c, d.txt\n filename_without_ext, ext_with_dot = os.path.splitext(filename_with_ext) # d.txt --> d, .txt\n return dirpath, filename_without_ext, ext_with_dot\n\n\n @classmethod \n def log(cls, log_msg, log_filepath, write_mode):\n \"\"\"Write log message to the file\n Args: \n log_msg(str): log message \n log_filepath(str): absolute path of the log file (will be created if does not exist already)\n write_mode(str): 'write'/'append'\n \"\"\"\n assert write_mode in ['write', 'append']\n write_mode = 'w' if write_mode == 'write' else 'a'\n\n log_msg = f\"\\n\\n\\n-------------------------------------\\n\\nDate/time: {datetime.datetime.now()}\\n\\n\" + log_msg\n\n with open(log_filepath, write_mode) as file: \n file.write(log_msg)\n\n\nclass XmlAttr:\n def __init__(self, name, value):\n self.name = name\n self.value = value\n\n def __str__(self):\n return f'{self.name}={self.value}'\n\n\nclass XmlElement:\n \"\"\"An XmlElement is either <tag>...</tag> or just any string, eg: 'abc' \n In reality, a string without tag is not considered an XmlElement, but this model \n treats such strings too as an XmlElement\n\n Attributes: \n type(str): 'xml'/'str'\n strval(str):\n tag(str/None):\n xml_attrs([XmlAttr]/None):\n inner_xmls([XmlElement]/None): \n content(str): \n parent_xml(XmlElement)\n \"\"\"\n\n def __init__(self, strval, parent_xml):\n \"\"\"\n Args: \n parent_xml(XmlElement): parent XmlElement, if any, else None \n \"\"\"\n self.parent_xml = parent_xml\n\n strval = Utils.transform_self_closing_tag(strval)\n strval_stripped = strval.strip()\n if strval_stripped.startswith('<') and strval_stripped.endswith('>'):\n self.type = 'xml'\n self.strval = strval_stripped\n tag, xml_attrs, content = Utils.disect_xml_str(strval)\n self.tag = tag\n self.attrs = xml_attrs\n content_striped = content.strip()\n\n if content_striped.startswith('<') and content_striped.endswith('>'):\n self.content = content_striped\n self.content_type = 'xml'\n content_elements = Utils.content_elements(content) # [str]\n self.inner_xmls = [XmlElement(x, self)\n for x in content_elements]\n else:\n # if content is 'str' type:\n # - don't strip leading/trailing spaces\n # - make some replacements\n # - replacements like \\n --> \\\\n are made at this time\n # - replacements like &gt; --> > are NOT made at this time.\n self.content = Utils.str_content_replacements(content)\n self.content_type = 'str'\n self.inner_xmls = [XmlElement(content, self)]\n else:\n self.type = 'str'\n # don't strip leading/trailing spaces for 'str' type element\n self.strval = strval\n self.attrs = None\n self.tag = None\n self.inner_xmls = []\n self.content_type = None\n self.content = None\n\n def attr_value_by_name(self, attrname):\n \"\"\"Return the value (str) of attribute. \n Return None if no such attribute exists\"\"\"\n for attr in self.attrs:\n if attr.name == attrname:\n return attr.value\n\n @property\n def inner_xmls_of_type_xml(self):\n return [x for x in self.inner_xmls if x.type == 'xml']\n\n @property\n def inner_xmls_of_type_str(self):\n return [x for x in self.inner_xmls if x.type == 'str']\n\n def __str__(self):\n return self.strval\n\n\nclass UtilsStfl:\n \"\"\"Utility class for Stateflow\"\"\"\n\n _id_dict = {}\n\n @classmethod\n def clear_ids(cls):\n \"\"\"Clear all entries in cls._id_dict.\n Call this method from Transformer.initialize() when operating in 'batch' mode\n so that all previous ids, if any, are cleared. \"\"\"\n cls._id_dict = {}\n\n @classmethod\n def _get_idmdl(cls, idslx, ssid, idmdl_chart, create_if_needed=True):\n # ssid of elements from different <chart>s collide.\n # so, we need to include idmdl_chart in the key\n key = (idslx, ssid, idmdl_chart)\n\n try:\n return cls._id_dict[key]\n except KeyError:\n if create_if_needed:\n ids = cls._id_dict.values()\n ids = [int(x) for x in ids]\n max_id = max(ids) if ids else 0 # because ids is initially empty\n id = str(max_id + 1)\n cls._id_dict[key] = id\n return id\n return None\n\n @classmethod\n def idmdl_by_idslx(cls, idslx, create_if_needed=True):\n \"\"\"Return idmdl for this element.\n The id will be generated, if this method\n is called with these arguments for the first time.\n\n Call this method to get idmdl for ['chart', 'instance', 'machine', 'target']\n\n Args: \n idslx(str): id in slx version \n created_if_needed(bool): If this is set to true, new id will be generated in \n case idmdl for given idslx doesnot exist already. Otherwise, None will be \n returned in such case. \n \"\"\"\n assert idslx\n return cls._get_idmdl(idslx=idslx, ssid=None, idmdl_chart=None, create_if_needed=create_if_needed)\n\n @classmethod\n def idmdl_by_ssid(cls, ssid, idmdl_chart, create_if_needed=True):\n \"\"\"Return idmdl for this element.\n The id will be generated, if this method\n is called with these arguments for the first time.\n\n Call this method to get idmdl for ['data', 'event', 'junction', 'state', 'transition']\n\n Args: \n ssid(str): ssid of the element \n idmdl_chart(str): idmdl of parent chart. This is needed for some \n elements, because SSIDs can collide between the children\n (for exampe, <state>s of two different <chart>s \n created_if_needed(bool): If this is set to true, new id will be generated in \n case idmdl for given idslx doesnot exist already. Otherwise, None will be \n returned in such case. \n \"\"\"\n assert ssid\n assert idmdl_chart\n return cls._get_idmdl(idslx=None, ssid=ssid, idmdl_chart=idmdl_chart, create_if_needed=create_if_needed)\n\n\nif __name__ == '__main__':\n pass\n", "id": "12167470", "language": "Python", "matching_score": 2.9485576152801514, "max_stars_count": 2, "path": "commons.py" }, { "content": "#!/usr/bin/python3\n\nfrom commons import Utils, XmlElement\n\n\nclass Annotation(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Annotation') and strval.endswith('</Annotation>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Annotation' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Annotation(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Annotation {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass AnnotationDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<AnnotationDefaults') and strval.endswith('</AnnotationDefaults>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'AnnotationDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return AnnotationDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'AnnotationDefaults {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Array(XmlElement):\n # OBSERVATION:\n # 1. <Array> contains only one type of children tag\n # 2. <Array> does not contain <P> tag (follows from observation 1)\n # 3. 'Dimension' of an array (in mdl) is its number of children\n\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Array') and strval.endswith('</Array>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.dimension = len(self.inner_xmls_of_type_xml)\n self.ps = []\n self.objects = []\n self.cells = []\n self.mATStructs = []\n self.arrays = [] # found in matlab-central/RC_Demo_C2000_Control_Unit\n\n for x in self.inner_xmls:\n\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Cell':\n self.cells.append(Cell.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'MATStruct':\n self.mATStructs.append(MATStruct.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n\n \n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Array' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Array(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n\n str_ = 'Array {\\n'\n\n for x in self.attrs:\n if x.name in ['Dimension']:\n continue\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n str_ += f'Dimension {self.dimension}\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.cells:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.mATStructs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n \n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Block(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Block') and strval.endswith('</Block>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.ports = []\n self.masks = []\n self.systems = []\n self.instanceDatas = []\n self.lists = []\n self.functionPorts = []\n self.objects = [] \n self.linkDatas = [] # found in corpus/matlab-central/Dual_Clutch_Trans.slx \n self.instanceDatas = [] # found in corpus/matlab-central/HEV_Battery_Lib.slx\n self.arrays = [] # found in corpus/github/daq2_sim.slx\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Port':\n self.ports.append(Port.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Mask':\n self.masks.append(Mask.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'System':\n self.systems.append(System.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'InstanceData':\n self.instanceDatas.append(InstanceData.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'List':\n self.lists.append(List.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'FunctionPort':\n self.functionPorts.append(FunctionPort.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'LinkData':\n self.linkDatas.append(LinkData.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'InstanceData':\n self.instanceDatas.append(InstanceData.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Block' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Block(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Block {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.ports:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.masks:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.systems:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.instanceDatas:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.lists:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.functionPorts:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects: \n str_ += f'{x.strmdl}\\n'\n\n for x in self.linkDatas: \n str_ += f'{x.strmdl}\\n'\n\n for x in self.instanceDatas: \n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays: \n str_ += f'{x.strmdl}\\n'\n\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass BlockDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<BlockDefaults') and strval.endswith('</BlockDefaults>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'BlockDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return BlockDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'BlockDefaults {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass BlockDiagramDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<BlockDiagramDefaults') and strval.endswith('</BlockDiagramDefaults>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.systemDefaults = []\n self.blockDefaults = []\n self.annotationDefaults = []\n self.lineDefaults = []\n self.maskDefaults = []\n self.blockParameterDefaults = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'SystemDefaults':\n self.systemDefaults.append(SystemDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'BlockDefaults':\n self.blockDefaults.append(BlockDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'AnnotationDefaults':\n self.annotationDefaults.append(AnnotationDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'LineDefaults':\n self.lineDefaults.append(LineDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'MaskDefaults':\n self.maskDefaults.append(MaskDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'BlockParameterDefaults':\n self.blockParameterDefaults.append(BlockParameterDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'BlockDiagramDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return BlockDiagramDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = ''\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.systemDefaults:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.blockDefaults:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.annotationDefaults:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.lineDefaults:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.maskDefaults:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.blockParameterDefaults:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass BlockParameterDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<BlockParameterDefaults') and strval.endswith('</BlockParameterDefaults>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.blocks = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Block':\n self.blocks.append(Block.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'BlockParameterDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return BlockParameterDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'BlockParameterDefaults {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.blocks:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Branch(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Branch') and strval.endswith('</Branch>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.branches = [] # <Branch> can contain <Branch>\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Branch':\n self.branches.append(Branch.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Branch' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Branch(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Branch {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.branches:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Callback(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Callback') and strval.endswith('</Callback>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Callback(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Callback \"{self.content}\"'\n\n\nclass Capabilities(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Capabilities') and strval.endswith('</Capabilities>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Capabilities(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Capabilities \"{self.content}\"'\n\n\nclass Cell(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Cell') and strval.endswith('</Cell>')\n super().__init__(strval, parent_xml)\n self.class_attr = None\n for x in self.attrs:\n if x.name == 'Class':\n self.class_attr = x\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Cell(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n quoted = f'Cell \"{self.content}\"'\n unquoted = f'Cell {self.content}'\n boxed = f'Cell [{self.content}]'\n\n # as seen from corpus/matlab-central/fir_filter_example.slx, \n # if attribute 'Class' = 'double', then content is boxed\n # if attribute 'Class' = 'char', then content is quoted\n \n if self.class_attr and self.class_attr.value in ['double']:\n if self.content.startswith('[') and self.content.endswith(']'):\n return unquoted\n return boxed\n return quoted # default \n \n\n\nclass ConfigSet(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ConfigSet') and strval.endswith('</ConfigSet>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.objects = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ConfigSet' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ConfigSet(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Array {\\n'\n str_ += 'Type \"Handle\"\\n'\n str_ += f'Dimension {len(self.inner_xmls_of_type_xml)}\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass ConfigurationSet(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ConfigurationSet') and strval.endswith('</ConfigurationSet>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.objects = []\n self.arrays = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ConfigurationSet' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ConfigurationSet(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass ConcurrentExecutionSettings(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ConcurrentExecutionSettings') and strval.endswith('</ConcurrentExecutionSettings>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.objects = []\n self.arrays = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ConcurrentExecutionSettings' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ConcurrentExecutionSettings(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass ConfigManagerSettings(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ConfigManagerSettings') and strval.endswith('</ConfigManagerSettings>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ConfigManagerSettings' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ConfigManagerSettings(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass Connector(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Connector') and strval.endswith('</Connector>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Connector' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Connector(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Connector {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass ControlOptions(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ControlOptions') and strval.endswith('</ControlOptions>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ControlOptions' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ControlOptions(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n # special : no surrounding braces, just contents\n str_ = '\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass CustomProperty(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<CustomProperty') and strval.endswith('</CustomProperty>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.enumStrPairss = [] # first found in corpus/github-downloaded/CSEI_u.slx\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'EnumStrPairs':\n self.enumStrPairss.append(EnumStrPairs.from_XmlElement(x))\n innerxml_used[x] = True\n\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'CustomProperty' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return CustomProperty(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'CustomProperty {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.enumStrPairss:\n str_ += f'{x.strmdl}\\n'\n\n \n\n str_ += '}\\n\\n'\n return str_\n\nclass Description(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Description') and strval.endswith('</Description>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Description(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Description \"{self.content}\"'\n\n\nclass DialogControl(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<DialogControl') and strval.endswith('</DialogControl>')\n super().__init__(strval, parent_xml)\n self.object_idmdl = Utils.object_idmdl_by_xml_element(self)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.controlOptions = []\n self.prompts = []\n self.dialogControls = [] # there can be nested <DialogControl> see: applications/sldemo_autotrans\n self.callbacks = []\n self.tooltips = []\n self.filePaths = [] # first found in corpus/matlab-central/Contact_Forces_Lib.slx\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ControlOptions':\n self.controlOptions.append(ControlOptions.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Prompt':\n self.prompts.append(Prompt.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'DialogControl':\n self.dialogControls.append(DialogControl.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Callback':\n self.callbacks.append(Callback.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Tooltip':\n self.tooltips.append(Tooltip.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'FilePath':\n self.filePaths.append(FilePath.from_XmlElement(x))\n innerxml_used[x] = True\n\n \n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'DialogControl' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return DialogControl(xml_element.strval, xml_element.parent_xml)\n\n def strmdl(self, is_array_element):\n \"\"\"\n Args: \n is_array_element (bool): True if the returned str is to be wrapped inside Array{}, else False \n \"\"\"\n\n # special\n str_ = 'Object {\\n'\n\n # OBSERVATION: If multiple <DialogControl> are contained in a parent tag (eg. <Mask>),\n # they are wrapped in Array{}\n #\n # <DialogControl> become Object {} in mdl and they contain $ObjectID, $PropName, and\n # $ClassName.\n #\n # When they are wrapped in Array{}, in original mdl files (generated by Simulink)\n # - $ PropName is moved out (becomes a MANDATORY attribute of Array and renamed to\n # Propname i.e. no leading $)\n # - $ClassName is NOT removed. (THIS IS DIFFERENT IN <MaskParameter>)\n # - $ObjectID remains the same.\n #\n # Although keeping $PropName inside these wrapped Object{}s\n # does not harm, we have chosen to remove it just like in the mdl file produced by Simulink.\n\n str_ += f'$ObjectID {self.object_idmdl}\\n' # TODO: figure out what ObjectID is\n str_ += f'$ClassName \"{self.array_type_or_object_className()}\"\\n'\n if not is_array_element:\n str_ += f'$PropName \"DialogControls\"\\n'\n\n for x in self.attrs:\n # 'Type' info goes to $ClassName\n if x.name not in ['Type']:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.controlOptions:\n str_ += f'{x.strmdl}\\n'\n\n # OBSERVATION: Some <Prompt> in <DialogControl> do not appear in the mdl file\n # for example, when <DialogControl> has Type=\"CheckBox\", the <Prompt> contained in the\n # <DialogControl> does not appear in the mdl file. (see applications/aero_dap3dof)\n # However, at this time, we are not sure when exactly not to include <Prompt>'s transformation\n # in the mdl format. So, we are always including it.\n # TODO: If this results in problem(s), investigate further and when <Prompt>'s transformation\n # should appear and when it should not and make required changes.\n\n for x in self.prompts:\n str_ += f'{x.strmdl}\\n'\n\n if self.dialogControls and len(self.dialogControls) > 1:\n str_ += 'Array {\\n'\n str_ += f'Type \"Simulink.dialog.Control\"\\n'\n # PropName attribute is mandatory.\n # Notice that there is no leading $\n str_ += 'PropName \"DialogControls\"\\n'\n str_ += f'Dimension {len(self.dialogControls)}\\n'\n for x in self.dialogControls:\n str_ += f'{x.strmdl(is_array_element=True)}\\n'\n str_ += '}\\n'\n else:\n for x in self.dialogControls:\n str_ += f'{x.strmdl(is_array_element=False)}\\n'\n\n for x in self.callbacks:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.tooltips:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.filePaths:\n str_ += f'{x.strmdl}\\n'\n\n \n\n str_ += '}\\n\\n'\n return str_\n\n def array_type_or_object_className(self):\n \"\"\"Return what value is needed for \n - Array/Type (if this DialogControl is to be wrapped in array, or\n - Object/$ClassName (if this DialogControl is not be wrapped in array)\n \"\"\"\n # OBSERVATION: $ClassName, whether it appears inside Object{} or just inside Array{} i.e.\n # outside Object{} is derived from the value of 'Type' attr\n # OBSERVATION: $ClassName xxx may be of the form 'Simulink.dialog.parameter.xxx' or 'Simulink.dialog.xxx'\n # see applications/sldemo_autotrans, applications/aero_dap3dof\n type = self.attr_value_by_name('Type')\n\n if type in [\n 'Button',\n 'Group',\n 'Text',\n 'TabContainer', # first found in corpus/github-downloaded/adi_ad961_models.slx \n 'Tab', # first found in corpus/github-downloaded/adi_ad961_models.slx \n 'CollapsiblePanel', # first found in corpus/github-downloaded/adi_ad961_models.slx \n 'Control', # first found in corpus/github-downloaded/adi_ad961_models.slx\n 'Panel', # first found in corpus/github/Lib_Turbo_CompressorVG_TMATS.slx \n 'Image', # first found in corpus/github/matlab/Contact_Forces_Lib \n \n ]:\n return f'Simulink.dialog.{type}'\n elif type in [\n 'CheckBox',\n 'Edit',\n 'Slider',\n 'Spinbox',\n 'Popup', # first found in corpus/github-downloaded/adi_ad961_models.slx \n 'RadioButton', # first found in corpus/matlab-central/ACTimeOvercurrentRelayBlock\n ]:\n return f'Simulink.dialog.parameter.{type}'\n else:\n raise Exception(f\"Unknown 'Type' attribute '{type}' in <DialogControl>\")\n\n\nclass DiagnosticSuppressor(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<DiagnosticSuppressor') and strval.endswith('</DiagnosticSuppressor>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'DiagnosticSuppressor' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return DiagnosticSuppressor(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\nclass DialogParameters(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<DialogParameters') and strval.endswith('</DialogParameters>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'DialogParameters' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return DialogParameters(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'DialogParameters {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\nclass Display(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Display') and strval.endswith('</Display>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Display' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Display(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += f'Display \"{self.content}\"' # special\n\n str_ += '\\n\\n'\n return str_\n\n\nclass EditorSettings(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<EditorSettings') and strval.endswith('</EditorSettings>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'EditorSettings' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return EditorSettings(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass EngineSettings(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<EngineSettings') and strval.endswith('</EngineSettings>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'EngineSettings' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return EngineSettings(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special \n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass EnumStrPairs(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<EnumStrPairs') and strval.endswith('</EnumStrPairs>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'EnumStrPairs' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return EnumStrPairs(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'EnumStrPairs {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\nclass ExternalFileReference(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ExternalFileReference') and strval.endswith('</ExternalFileReference>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ExternalFileReference' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ExternalFileReference(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'ExternalFileReference {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass ExternalMode(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ExternalMode') and strval.endswith('</ExternalMode>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ExternalMode' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ExternalMode(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass Field(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Field') and strval.endswith('</Field>')\n super().__init__(strval, parent_xml)\n self.name_attr = None\n self.class_attr = None\n for x in self.attrs:\n if x.name == 'Name':\n self.name_attr = x\n if x.name == 'Class':\n self.class_attr = x\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Field(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n quoted = f'{self.name_attr.value} \"{self.content}\"' # default\n unquoted = f'{self.name_attr.value} {self.content}' # special\n boxed = f'{self.name_attr.value} [{self.content}]' # special\n\n if self.class_attr and self.class_attr.value in ['double']:\n if self.content.startswith('[') and self.content.endswith(']'):\n return unquoted\n return boxed\n return quoted\n\n\nclass FilePath(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<FilePath') and strval.endswith('</FilePath>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return FilePath(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'FilePath \"{self.content}\"'\n\n\n\n\nclass FunctionConnector(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<FunctionConnector') and strval.endswith('</FunctionConnector>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'FunctionConnector' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return FunctionConnector(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'FunctionConnector {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass FunctionPort(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<FunctionPort') and strval.endswith('</FunctionPort>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'FunctionPort' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return FunctionPort(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'FunctionPort {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass GraphicalInterface(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<GraphicalInterface') and strval.endswith('</GraphicalInterface>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.externalFileReferences = []\n self.modelReferences = []\n self.testPointedSignals = []\n self.inports = []\n self.outports = []\n self.requireFunctions = []\n self.subsystemReferences = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ExternalFileReference':\n self.externalFileReferences.append(ExternalFileReference.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ModelReference':\n self.modelReferences.append(ModelReference.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'TestPointedSignal':\n self.testPointedSignals.append(TestPointedSignal.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Inport':\n self.inports.append(Inport.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Outport':\n self.outports.append(Outport.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'RequireFunction':\n self.requireFunctions.append(RequireFunction.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'SubsystemReference':\n self.subsystemReferences.append(SubsystemReference.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'GraphicalInterface' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return GraphicalInterface(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'GraphicalInterface {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.externalFileReferences:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.modelReferences:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.testPointedSignals:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.inports:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.outports:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.requireFunctions:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.subsystemReferences:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Help(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Help') and strval.endswith('</Help>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Help(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Help \"{self.content}\"'\n\n\nclass Initialization(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Initialization') and strval.endswith('</Initialization>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Initialization(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Initialization \"{self.content}\"'\n\n\nclass Inport(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Inport') and strval.endswith('</Inport>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Inport' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Inport(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Inport {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass InstanceData(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<InstanceData') and strval.endswith('</InstanceData>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.objects = [] # found in matlab-central/HEV_Battery_Lib.slx\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n \n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'InstanceData' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return InstanceData(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n \n\n str_ += '\\n\\n'\n return str_\n\nclass LinkData(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<LinkData') and strval.endswith('</LinkData>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.dialogParameterss = [] # found in matlab-central/Dual_Clutch_Trans.slx \n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'DialogParameters':\n self.dialogParameterss.append(DialogParameters.from_XmlElement(x))\n innerxml_used[x] = True\n\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'LinkData' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return LinkData(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'LinkData {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.dialogParameterss:\n str_ += f'{x.strmdl}\\n'\n\n\n\n str_ += '}\\n\\n'\n return str_\n\nclass Line(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Line') and strval.endswith('</Line>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.branches = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Branch':\n self.branches.append(Branch.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Line' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Line(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Line {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.branches:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass LineDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<LineDefaults') and strval.endswith('</LineDefaults>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'LineDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return LineDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'LineDefaults {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass List(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<List') and strval.endswith('</List>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'List' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return List(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'List {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass LogicAnalyzerPlugin(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<LogicAnalyzerPlugin') and strval.endswith('</LogicAnalyzerPlugin>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'LogicAnalyzerPlugin' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return LogicAnalyzerPlugin(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass Mask(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Mask') and strval.endswith('</Mask>')\n super().__init__(strval, parent_xml)\n self.object_idmdl = Utils.object_idmdl_by_xml_element(self)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.displays = []\n self.types = []\n self.maskParameters = []\n self.dialogControls = []\n self.descriptions = []\n self.initializations = []\n self.helps = []\n self.capabilities = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Display':\n self.displays.append(Display.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Type':\n self.types.append(Type.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'MaskParameter':\n self.maskParameters.append(MaskParameter.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'DialogControl':\n self.dialogControls.append(DialogControl.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Description':\n self.descriptions.append(Description.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Initialization':\n self.initializations.append(Initialization.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Capabilities':\n self.capabilities.append(Capabilities.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Help':\n self.helps.append(Help.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ImageFile':\n # the corresponding information does not appear in the mdl file\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Mask' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Mask(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Object {\\n' # special\n\n str_ += f'$PropName \"MaskObject\"\\n'\n str_ += f'$ObjectID {self.object_idmdl}\\n' # TODO: figure out what ObjectID is\n str_ += f'$ClassName \"Simulink.Mask\"\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.displays:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.types:\n str_ += f'{x.strmdl}\\n'\n\n if self.maskParameters and len(self.maskParameters) > 1:\n str_ += 'Array {\\n'\n str_ += 'Type \"Simulink.MaskParameter\"\\n'\n # PropName attribute is mandatory.\n # Notice that there is no leading $\n str_ += 'PropName \"Parameters\"\\n'\n str_ += f'Dimension {len(self.maskParameters)}\\n'\n for x in self.maskParameters:\n str_ += f'{x.strmdl(is_array_element=True)}\\n'\n str_ += '}\\n'\n else:\n for x in self.maskParameters:\n str_ += f'{x.strmdl(is_array_element=False)}\\n'\n\n if self.dialogControls and len(self.dialogControls) > 1:\n str_ += 'Array {\\n'\n str_ += f'Type \"{self.dialogControls[0].array_type_or_object_className()}\"\\n'\n # PropName attribute is mandatory.\n # Notice that there is no leading $\n str_ += 'PropName \"DialogControls\"\\n'\n str_ += f'Dimension {len(self.dialogControls)}\\n'\n for x in self.dialogControls:\n str_ += f'{x.strmdl(is_array_element=True)}\\n'\n str_ += '}\\n'\n else:\n for x in self.dialogControls:\n str_ += f'{x.strmdl(is_array_element=False)}\\n'\n\n for x in self.descriptions:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.initializations:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.helps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.capabilities:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass MaskDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<MaskDefaults') and strval.endswith('</MaskDefaults>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.displays = []\n self.maskParameters = []\n self.dialogControls = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Display':\n self.displays.append(Display.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'MaskParameter':\n self.maskParameters.append(MaskParameter.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'DialogControl':\n self.dialogControls.append(DialogControl.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'MaskDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return MaskDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'MaskDefaults {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.displays:\n str_ += f'{x.strmdl}\\n'\n\n # although <MaskDefaults> contains <DialogControl>, the information\n # about the contained <DialogControl> and its children (<ControlOptions>) is\n # not present in the mdl file. So, it is not included in the mdl string\n\n # for x in self.dialogControls:\n # str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n\n # special: appears outside the parent\n for x in self.maskParameters:\n str_ += f'{x.strmdl(is_array_element=False)}\\n'\n\n return str_\n\n\nclass MaskParameter(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<MaskParameter') and strval.endswith('</MaskParameter>')\n super().__init__(strval, parent_xml)\n self.object_idmdl = Utils.object_idmdl_by_xml_element(self)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.prompts = []\n self.values = []\n self.typeOptions = []\n self.callbacks = []\n self.ranges = []\n self.tabNames = [] # first found in corpus/github/Lib_Cntrl_FirstOrderActuator_TMATS.slx\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Prompt':\n self.prompts.append(Prompt.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Value':\n self.values.append(Value.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'TypeOptions':\n self.typeOptions.append(TypeOptions.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Callback':\n self.callbacks.append(Callback.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Range':\n self.ranges.append(Range.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'TabName':\n self.tabNames.append(TabName.from_XmlElement(x))\n innerxml_used[x] = True\n\n \n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'MaskParameter' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return MaskParameter(xml_element.strval, xml_element.parent_xml)\n\n def strmdl(self, is_array_element):\n \"\"\"\n Args: \n is_array_element (bool): True if the returned str is to be wrapped inside Array{}, else False \n \"\"\"\n # special\n\n if self.parent_xml.tag == 'MaskDefaults': # see automotive/sldemo_autotrans\n element_name = 'MaskParameterDefaults'\n elif self.parent_xml.tag == 'Mask': # see automotive/sldemo_autotrans\n element_name = 'Object'\n else:\n raise Exception(f\"Element name for <MaskParameter> not decided\")\n\n str_ = f'{element_name} {{\\n'\n\n # OBSERVATION: If multiple <MaskParameter> are contained in a parent tag (eg. <Mask>),\n # they are wrapped in Array{}\n #\n # <MaskParameter> become Object {} in mdl and they contain $ObjectID, $PropName, and\n # $ClassName.\n #\n # When they are wrapped in Array{}, in original mdl files (generated by Simulink)\n # - $ PropName is moved out (becomes a MANDATORY attribute of Array and renamed to\n # Propname i.e. no leading $)\n # - $ClassName is removed (THIS IS DIFFERENT IN <DialogControl>)\n # - $ObjectID remains the same.\n #\n # Although keeping $PropName, and $ClassName inside these wrapped Object{}s\n # does not harm, we have chosen to remove them just like in the mdl file produced by Simulink.\n\n if element_name == 'Object':\n str_ += f'$ObjectID {self.object_idmdl}\\n' # TODO: figure out what ObjectID is\n if not is_array_element:\n str_ += f'$PropName \"Parameters\"\\n'\n str_ += f'$ClassName \"Simulink.MaskParameter\"\\n'\n\n # TODO: mdl contains 'Prompt'. What is it? (see dma/ex_modeling_simple_system)\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.prompts:\n str_ += f'{x.strmdl}\\n'\n\n # special \n for x in self.values:\n str_ += f'{x.strmdl}\\n'\n\n # special: inferred from corpus/matlab-central/Link_A.slx \n # Even if 'Value' does not appear in attributes or inner tags of <MaskParameter>,\n # the corresponding mdl format still has 'Value' (set to \"\"). \n if not self.values: # if empty\n for x in self.attrs:\n if x.name == 'Value':\n break \n else: # none of the attributes has name 'Value' \n str_ += f'Value \"\"\\n'\n\n for x in self.typeOptions:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.callbacks:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.ranges:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.tabNames:\n str_ += f'{x.strmdl}\\n'\n\n \n \n\n str_ += '}\\n\\n'\n return str_\n\nclass MaskParameterDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<MaskParameterDefaults') and strval.endswith('</MaskParameterDefaults>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'MaskParameterDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return MaskParameterDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'MaskParameterDefaults {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\nclass MATStruct(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<MATStruct') and strval.endswith('</MATStruct>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.fields = []\n self.arrays = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Field':\n self.fields.append(Field.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'MATStruct' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return MATStruct(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'MATStruct {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.fields:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass ModelOrLibraryOrSubsystem(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert (strval.startswith('<Model') and strval.endswith('</Model>')) or (strval.startswith('<Library') and strval.endswith('</Library>')) or (strval.startswith('<Subsystem') and strval.endswith('</Subsystem>'))\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n\n self.ps = []\n self.configManagerSettings = []\n self.editorSettings = []\n self.simulationSettings = []\n self.externalModes = []\n self.modelReferenceSettings = []\n self.concurrentExecutionSettings = []\n self.systems = []\n self.diagnosticSuppressors = []\n self.logicAnalyzerPlugins = []\n self.notesPlugins = []\n self.sLCCPlugins = []\n self.webScopes_FoundationPlugins = []\n self.arrays = []\n self.graphicalInterfaces = []\n self.userParameters = []\n self.modelWorkspaces = []\n self.objects = []\n self.windowsInfos = []\n self.configSets = []\n self.blockDiagramDefaults = []\n self.verifications = [] # found in matlab-central/Baro_Library.slx\n self.configurationSets = [] # found in matlab-central/Baro_Library.slx\n self.systemDefaultss = [] # found in matlab-central/Baro_Library.slx\n self.blockDefaultss = [] # found in matlab-central/Baro_Library.slx\n self.annotationDefaultss = [] # found in matlab-central/Baro_Library.slx\n self.lineDefaultss = [] # found in matlab-central/Baro_Library.slx\n self.maskDefaultss = [] # found in matlab-central/Baro_Library.slx\n self.maskParameterDefaultss = [] # found in matlab-central/Baro_Library.slx\n self.blockParameterDefaultss = [] # found in matlab-central/Baro_Library.slx\n self.engineSettingss = [] # found in matlab-central/Assembly_Quadrotor.slx \n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ConfigManagerSettings':\n self.configManagerSettings.append(ConfigManagerSettings.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'EditorSettings':\n self.editorSettings.append(EditorSettings.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'SimulationSettings':\n self.simulationSettings.append(SimulationSettings.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ExternalMode':\n self.externalModes.append(ExternalMode.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ModelReferenceSettings':\n self.modelReferenceSettings.append(ModelReferenceSettings.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ConcurrentExecutionSettings':\n self.concurrentExecutionSettings.append(ConcurrentExecutionSettings.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'System':\n self.systems.append(System.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'DiagnosticSuppressor':\n self.diagnosticSuppressors.append(DiagnosticSuppressor.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'LogicAnalyzerPlugin':\n self.logicAnalyzerPlugins.append(LogicAnalyzerPlugin.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'NotesPlugin':\n self.notesPlugins.append(NotesPlugin.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'SLCCPlugin':\n self.sLCCPlugins.append(SLCCPluginPlugin.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'WebScopes_FoundationPlugin':\n self.webScopes_FoundationPlugins.append(WebScopes_FoundationPlugin.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'GraphicalInterface':\n self.graphicalInterfaces.append(GraphicalInterface.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'UserParameters':\n self.userParameters.append(UserParameters.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ModelWorkspace':\n self.modelWorkspaces.append(ModelWorkspace.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'WindowsInfo':\n self.windowsInfos.append(WindowsInfo.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ConfigSet':\n self.configSets.append(ConfigSet.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'BlockDiagramDefaults':\n self.blockDiagramDefaults.append(BlockDiagramDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Verification':\n self.verifications.append(Verification.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'ConfigurationSet':\n self.configurationSets.append(ConfigurationSet.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'SystemDefaults':\n self.systemDefaultss.append(SystemDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'BlockDefaults':\n self.blockDefaultss.append(BlockDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'AnnotationDefaults':\n self.annotationDefaultss.append(AnnotationDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'LineDefaults':\n self.lineDefaultss.append(LineDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'MaskDefaults':\n self.maskDefaultss.append(MaskDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'MaskParameterDefaults':\n self.maskParameterDefaultss.append(MaskParameterDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'BlockParameterDefaults':\n self.blockParameterDefaultss.append(BlockParameterDefaults.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'EngineSettings':\n self.engineSettingss.append(EngineSettings.from_XmlElement(x))\n innerxml_used[x] = True\n\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ModelOrLibraryOrSubsystem' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ModelOrLibraryOrSubsystem(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n\n str_ = f'{self.tag} {{\\n' # can be Model or Library\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.configManagerSettings:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.editorSettings:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.simulationSettings:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.externalModes:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.modelReferenceSettings:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.concurrentExecutionSettings:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.systems:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.diagnosticSuppressors:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.logicAnalyzerPlugins:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.notesPlugins:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.sLCCPlugins:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.webScopes_FoundationPlugins:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.graphicalInterfaces:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.userParameters:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.modelWorkspaces:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.windowsInfos:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.configSets:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.blockDiagramDefaults:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.verifications:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.configurationSets:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.systemDefaultss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.blockDefaultss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.annotationDefaultss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.lineDefaultss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.maskDefaultss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.maskParameterDefaultss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.blockParameterDefaultss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.engineSettingss:\n str_ += f'{x.strmdl}\\n'\n\n\n str_ += '}\\n\\n'\n\n str_ = Utils.remove_multiple_linegaps(str_)\n str_ = Utils.replacements4mdl(str_)\n str_ = Utils.remove_multiple_linegaps_between_consecutive_closing_braces(str_)\n return str_\n\n\nclass ModelReference(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ModelReference') and strval.endswith('</ModelReference>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ModelReference' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ModelReference(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'ModelReference {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass ModelReferenceSettings(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ModelReferenceSettings') and strval.endswith('</ModelReferenceSettings>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.objects = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ModelReferenceSettings' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ModelReferenceSettings(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass ModelWorkspace(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<ModelWorkspace') and strval.endswith('</ModelWorkspace>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ModelWorkspace' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return ModelWorkspace(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass NotesPlugin(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<NotesPlugin') and strval.endswith('</NotesPlugin>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'NotesPlugin' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return NotesPlugin(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass Object(XmlElement):\n # TODO: what is $ObjectID in mdl? (this is generated)\n # there isObjectID in xml but that does not match mdl's $ObjectID\n # figure out how to generate it\n\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Object') and strval.endswith('</Object>')\n super().__init__(strval, parent_xml)\n self.object_idmdl = Utils.object_idmdl_by_xml_element(self)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.arrays = []\n self.objects = [] # <Object> can contain children <Object>\n self.customPropertys = [] # first found in corpus/github-downloaded/CSEI_u.slx\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n \n if x.tag == 'CustomProperty':\n self.customPropertys.append(CustomProperty.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Object' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Object(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n # special\n\n # these <Object> tags are found in configSet0.xml\n if self.attr_value_by_name('ClassName') in [\n 'Simulink.ConfigSet',\n 'Simulink.SolverCC',\n 'Simulink.DataIOCC',\n 'Simulink.OptimizationCC',\n 'Simulink.DebuggingCC',\n 'Simulink.HardwareCC',\n 'Simulink.ModelReferenceCC',\n 'Simulink.SFSimCC',\n 'Simulink.RTWCC',\n 'SlCovCC.ConfigComp',\n 'hdlcoderui.hdlcc'\n ]:\n element_name = self.attr_value_by_name('ClassName')\n\n else:\n element_name = 'Object' # default\n\n str_ = f'{element_name} {{\\n'\n\n for x in self.attrs:\n name = x.name\n value = x.value\n\n if x.name in ['ClassName', 'ObjectID', 'PropName']:\n name = '$' + name\n if x.name in ['BackupClass', 'ClassName', 'PropName', 'Version']:\n value = f'\"{x.value}\"'\n if x.name == 'ObjectID':\n value = self.object_idmdl\n\n str_ += f'{name} {value}\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.customPropertys:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Option(XmlElement):\n # first found in design-model-behavior/prioritydemo\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Option') and strval.endswith('</Option>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Option(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Cell \"{self.content}\"'\n\n\nclass Outport(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Outport') and strval.endswith('</Outport>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Outport' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Outport(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Outport {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass P(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<P') and strval.endswith('</P>')\n super().__init__(strval, parent_xml)\n self.name_attr = None\n self.class_attr = None\n for x in self.attrs:\n if x.name == 'Name':\n self.name_attr = x\n if x.name == 'Class':\n self.class_attr = x\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return P(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n quoted = f'{self.name_attr.value} \"{self.content}\"' # default\n unquoted = f'{self.name_attr.value} {self.content}'\n boxed = f'{self.name_attr.value} [{self.content}]'\n unquoted_indented = f' {self.name_attr.value} {self.content}'\n\n # order rules by priority.\n\n if '&quot;' in self.content: # content contains double quotes i.e. \"\n return quoted\n\n # OBSERVATION: if these are not indented, model comparison shows differences -- don't know why\n # TODO: If mdl-preetification is implemented, this can be removed as preetifying mdl will \n # introduce indentation by itself \n if self.name_attr and self.name_attr.value in [\n 'rep_seq_t', # see applications/sldemo_hydroid\n 'rep_seq_y', # see applications/sldemo_hydroid\n ]:\n return unquoted_indented\n\n if self.name_attr and self.name_attr.value in [\n 'Components', # mandatory\n 'Location', # mandatory\n 'Position',\n ]:\n return unquoted\n\n # contents starting and ending with [ and ] respectively are MOSTLY unquoted,\n # However, if some p tags with content starting and ending in [ and ] respectively need to\n # be quoted, put them in the list inside this rule.\n if self.content.startswith('[') and self.content.endswith(']'):\n # special\n if self.name_attr and self.name_attr.value in [\n\n ]:\n return quoted\n # default\n return unquoted\n\n if self.content in ['on', 'off']:\n return unquoted\n\n if self.class_attr:\n if self.class_attr.value == 'double':\n if self.content.startswith('[') and self.content.endswith(']'):\n return unquoted\n return boxed\n if self.class_attr.value in ['logical', 'int32', 'uint32']:\n return boxed\n\n return quoted\n\n\nclass Port(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Port') and strval.endswith('</Port>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.arrays = [] \n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n \n if x.tag == 'Array':\n self.arrays.append(Array.from_XmlElement(x))\n innerxml_used[x] = True\n \n \n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Port' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Port(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Port {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Prompt(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Prompt') and strval.endswith('</Prompt>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Prompt(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Prompt \"{self.content}\"'\n\n\nclass Range(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Range') and strval.endswith('</Range>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Range(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Range {self.content}'\n\n\nclass RequireFunction(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<RequireFunction') and strval.endswith('</RequireFunction>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'RequireFunction' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return RequireFunction(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'RequireFunction {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass SimulationSettings(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<SimulationSettings') and strval.endswith('</SimulationSettings>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.objects = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for x in self.inner_xmls:\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'SimulationSettings' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return SimulationSettings(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass SLCCPluginPlugin(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<SLCCPlugin') and strval.endswith('</SLCCPlugin>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'SLCCPlugin' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return SLCCPluginPlugin(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass SubsystemReference(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<SubsystemReference') and strval.endswith('</SubsystemReference>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'SubsystemReference' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return SubsystemReference(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'SubsystemReference {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass System(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<System') and strval.endswith('</System>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.blocks = []\n self.lines = []\n self.annotations = []\n self.lists = []\n self.functionConnectors = []\n self.connectors = [] \n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Block':\n self.blocks.append(Block.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Line':\n self.lines.append(Line.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Annotation':\n self.annotations.append(Annotation.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'List':\n self.lists.append(List.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'FunctionConnector':\n self.functionConnectors.append(FunctionConnector.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Connector':\n self.connectors.append(Connector.from_XmlElement(x))\n innerxml_used[x] = True\n\n \n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'System' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return System(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'System {\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.blocks:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.lines:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.annotations:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.lists:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.functionConnectors:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.connectors:\n str_ += f'{x.strmdl}\\n'\n\n \n\n str_ += '}\\n\\n'\n return str_\n\n\nclass SystemDefaults(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<SystemDefaults') and strval.endswith('</SystemDefaults>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'SystemDefaults' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return SystemDefaults(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'System {\\n' # SystemDefaults appears as System only\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\n\nclass TabName(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<TabName') and strval.endswith('</TabName>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return TabName(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'TabName \"{self.content}\"'\n\n\n\n\nclass TestPointedSignal(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<TestPointedSignal') and strval.endswith('</TestPointedSignal>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'TestPointedSignal' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return TestPointedSignal(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'TestPointedSignal {\\n'\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Tooltip(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Tooltip') and strval.endswith('</Tooltip>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Tooltip(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Tooltip \"{self.content}\"'\n\n\nclass Type(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Type') and strval.endswith('</Type>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Type(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Type \"{self.content}\"'\n\n\nclass UserParameters(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<UserParameters') and strval.endswith('</UserParameters>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'UserParameters' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return UserParameters(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass TypeOptions(XmlElement):\n # first found in design-model-behavior/prioritydemo\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<TypeOptions') and strval.endswith('</TypeOptions>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.options = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Option':\n self.options.append(Option.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'TypeOptions' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return TypeOptions(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Array {\\n' # special\n str_ += 'Type \"Cell\"\\n'\n str_ += f'Dimension {len(self.inner_xmls_of_type_xml)}\\n'\n str_ += 'PropName \"TypeOptions\"\\n' # required\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.options:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Value(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Value') and strval.endswith('</Value>')\n super().__init__(strval, parent_xml)\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Value(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n return f'Value \"{self.content}\"'\n\n\n\n\nclass Verification(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Verification') and strval.endswith('</Verification>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Verification' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return Verification(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n # special: this tag was found in matlab-central/Baro_Library\n # but the content was not found in corresponding mdl format\n return ''\n # str_ = 'Verification {\\n'\n\n # for x in self.attrs:\n # str_ += f'{x.name} \"{x.value}\"\\n'\n\n # for x in self.ps:\n # str_ += f'{x.strmdl}\\n'\n\n # str_ += '}\\n\\n'\n # return str_\n\n\nclass WebScopes_FoundationPlugin(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<WebScopes_FoundationPlugin') and strval.endswith('</WebScopes_FoundationPlugin>')\n super().__init__(strval, parent_xml)\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'WebScopes_FoundationPlugin' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return WebScopes_FoundationPlugin(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = '' # special\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nclass WindowsInfo(XmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<WindowsInfo') and strval.endswith('</WindowsInfo>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.objects = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_XmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Object':\n self.objects.append(Object.from_XmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'WindowsInfo' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_XmlElement(cls, xml_element):\n return WindowsInfo(xml_element.strval, xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = ''\n\n for x in self.attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.objects:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n return str_\n\n\nif __name__ == '__main__':\n pass\n", "id": "1015367", "language": "Python", "matching_score": 3.199739694595337, "max_stars_count": 2, "path": "tags_model.py" }, { "content": "#!/usr/bin/python3\n\nfrom commons import Utils, UtilsStfl, XmlElement \n\n\nclass StflXmlElement(XmlElement):\n # these mdl attributes are based on id, and thus the value from\n # xml cannot be used directly while forming mdl string.\n # In XML tag they may appear either as attributes or enclosed <P> tags\n _id_based_mdl_attrs = [\n 'chart',\n 'firstSubWire', # found in simulink_general/sldemo_boiler\n 'id',\n 'machine',\n 'outputData', # found in state, chart (corpus/matlab-central/stateflow_example.slx)\n 'outputState',\n 'quantum', # found in automotive/sldemo_fuelsys(junction)\n 'SSID',\n 'subLink', # found in simulink_general/sldemo_boiler\n 'subviewer',\n 'viewObj',\n ]\n\n # key: some tag\n # value: list of parent tag of the key tag whose id serves\n # as the first element in treeNode/linkNode. If there are\n # multiple such values (for example, 'chart' and 'state',\n # the one which is more closer (in the xml tree hierarchy)\n # to this element will have priority over others (see code\n # section in StflXmlElement.__init__() where this dict is used.)\n node1parent_map = {\n 'data': ['chart', 'state'],\n 'event': ['chart', 'state'],\n 'junction': ['chart', 'state'],\n 'target': ['machine'], # TODO: needs further confirmation\n 'transition': ['chart', 'state'],\n 'state': ['chart', 'state']\n }\n\n def __init__(self, strval, parent_xml):\n super().__init__(strval, parent_xml)\n\n # note that not all StflXmlElements will have each of these attributes set to not-None value\n # They are declared here in the parent class to reduce model complexity (during development)\n\n # While this may look illogical from OOP design point of view,\n # this is justified since we don't have a complete knowledge of what attributes a class\n # should have. The project is fairly explorative.\n\n # if an attribute is discovered only in few classes, we will mention that in a comment\n # next to its declaration (for example, see idmdl_subviewer, idmdl_outputState)\n\n # Only those attributes which can be set here (in this parent class's initializer)\n # will be set by this method. Those attributes which are derived from information\n # contained within an included <P> tag will be set by the respective class's\n # initializer after its self.ps is set.\n\n self.idmdl = None\n self.idslx = None\n self.ssid = None\n\n # these are objects, not just ids\n self.chart = None\n self.machine = None\n self.superstate = None\n self.firstData = None\n self.firstEvent = None\n self.firstJunction = None\n self.firstTransition = None\n\n # these are ids, not objects\n self.idmdl_subviewer = None # found in junction, state, transition\n self.idmdl_outputState = None # found in data\n self.idmdl_outputData = None # found in state\n self.idmdl_firstSubWire = None # found in transition (simulink_general/sldemo_boiler)\n\n self.idmdl_treeNode1 = None\n self.idmdl_treeNode2 = None\n self.idmdl_treeNode3 = None\n self.idmdl_treeNode4 = None\n self.idmdl_linkNode1 = None\n self.idmdl_linkNode2 = None\n self.idmdl_linkNode3 = None\n\n # the information is contined in self.ps\n # so, this will be set\n self.idmdl_quantum1 = None\n self.idmdl_quantum2 = None\n self.idmdl_quantum3 = None\n self.idmdl_quantum4 = None\n\n # first found in simulink_general/sldemo_boiler (transition)\n self.idmdl_subLink1 = None\n self.idmdl_subLink2 = None\n self.idmdl_subLink3 = None\n\n # set self.chart\n xml = self.parent_xml\n while xml:\n if xml and xml.tag == 'chart':\n self.chart = xml\n break\n xml = xml.parent_xml\n\n # set self.machine\n xml = self.parent_xml\n while xml:\n if xml and xml.tag == 'machine':\n self.machine = xml\n break\n xml = xml.parent_xml\n\n # set self.superstate\n xml = self.parent_xml\n while xml:\n if xml and xml.tag == 'state':\n self.superstate = xml\n break\n xml = xml.parent_xml\n\n # set self.idmdl_treeNode1\n if self.tag in ['state']:\n xml = self.parent_xml\n while xml:\n if xml and xml.tag in self.node1parent_map[self.tag]:\n self.idmdl_treeNode1 = xml.idmdl\n break\n xml = xml.parent_xml\n\n # set self.idmdl_linkNode1\n if self.tag in ['data', 'event', 'junction', 'transition', 'target', ]:\n xml = self.parent_xml\n while xml:\n if xml and xml.tag in self.node1parent_map[self.tag]:\n self.idmdl_linkNode1 = xml.idmdl\n break\n xml = xml.parent_xml\n\n self.idslx = self.attr_value_by_name('id')\n self.ssid = self.attr_value_by_name('SSID')\n\n # for <src>, <dst>, idmdl is set in their own __init__()\n # because SSID for these tags is located in a included <P> tag\n # and self.ps are set only in corresponding init methods.\n if self.tag in ['data', 'event', 'junction', 'state', 'transition']:\n self.idmdl = UtilsStfl.idmdl_by_ssid(ssid=self.ssid, idmdl_chart=self.chart.idmdl)\n\n if self.tag in ['chart', 'instance', 'machine', 'target']:\n self.idmdl = UtilsStfl.idmdl_by_idslx(idslx=self.idslx)\n\n @property\n def treeNode(self):\n return f\"[{self.idmdl_treeNode1} {self.idmdl_treeNode2} {self.idmdl_treeNode3} {self.idmdl_treeNode4}]\"\n\n @property\n def linkNode(self):\n return f\"[{self.idmdl_linkNode1} {self.idmdl_linkNode2} {self.idmdl_linkNode3}]\"\n\n @property\n def quantum(self):\n return f\"[{self.idmdl_quantum1} {self.idmdl_quantum2} {self.idmdl_quantum3} {self.idmdl_quantum4}]\"\n\n @property\n def subLink(self):\n return f\"[{self.idmdl_subLink1} {self.idmdl_subLink2} {self.idmdl_subLink3}]\"\n\n\n####################################################################################\n\nclass ActiveStateOutput(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<activeStateOutput') and strval.endswith('</activeStateOutput>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'ActiveStateOutput' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return ActiveStateOutput(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'activeStateOutput {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Array(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<array') and strval.endswith('</array>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Array' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Array(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'array {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Chart(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<chart') and strval.endswith('</chart>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.subviewSs = []\n self.emls = []\n self.activeStateOutputs = [] # first found in corpus/matlab-central/stateflow_example.slx \n self.plantModelingInfos = [] # first found in corpus/other/fwr.slx \n self.children = None\n \n\n for x in self.inner_xmls:\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n if p.name_attr.value == 'viewObj':\n # slx contains viewObj in <P> whose value is equal to its own idslx (observation so far) which needs to be mapped to idmdl\n self.idmdl_viewObj = UtilsStfl.idmdl_by_idslx(idslx=p.content)\n\n if p.name_attr.value == 'outputData':\n self.idmdl_outputData = UtilsStfl.idmdl_by_ssid(p.content, self.idmdl)\n\n if x.tag == 'subviewS':\n self.subviewSs.append(SubviewS.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'eml':\n self.emls.append(Eml.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'activeStateOutput':\n self.activeStateOutputs.append(ActiveStateOutput.from_StflXmlElement(x))\n innerxml_used[x] = True\n \n if x.tag == 'plantModelingInfo':\n self.plantModelingInfos.append(PlantModelingInfo.from_StflXmlElement(x))\n innerxml_used[x] = True\n \n if x.tag == 'Children':\n self.children = Children.from_StflXmlElement(x)\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Chart' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Chart(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'chart {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'treeNode {self.treeNode}\\n'\n str_ += f'machine {self.machine.idmdl}\\n'\n str_ += f'viewObj {self.idmdl_viewObj}\\n'\n\n if self.firstData:\n str_ += f'firstData {self.firstData.idmdl}\\n'\n if self.firstEvent:\n str_ += f'firstEvent {self.firstEvent.idmdl}\\n'\n if self.firstJunction:\n str_ += f'firstJunction {self.firstJunction.idmdl}\\n'\n if self.firstTransition:\n str_ += f'firstTransition {self.firstTransition.idmdl}\\n'\n if self.idmdl_outputData:\n str_ += f'outputData {self.idmdl_outputData}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.subviewSs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.emls:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.activeStateOutputs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.plantModelingInfos:\n str_ += f'{x.strmdl}\\n' \n\n str_ += '}\\n\\n'\n\n # mdl from children goes outside {}\n if self.children:\n str_ += f'{self.children.strmdl}\\n'\n\n return str_\n\n\nclass Children(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Children') and strval.endswith('</Children>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.charts = []\n self.states = []\n self.transitions = []\n self.junctions = []\n self.datas = []\n self.events = []\n self.targets = []\n\n for x in self.inner_xmls:\n pass\n\n if x.tag == 'chart':\n self.charts.append(Chart.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'state':\n self.states.append(State.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'transition':\n self.transitions.append(Transition.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'junction':\n self.junctions.append(Junction.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'data':\n self.datas.append(Data.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'event':\n self.events.append(Event.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'target':\n self.targets.append(Target.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Children' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Children(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = ''\n\n for x in self.charts:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.states:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.transitions:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.junctions:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.datas:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.events:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.targets:\n str_ += f'{x.strmdl}\\n'\n\n return str_\n\n\nclass Data(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<data') and strval.endswith('</data>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.propss = []\n self.messages = [] # first found in design-model-behavior/slexDynamicSchedulingExample\n self.loggingInfos = [] # first found in corpus/github-downloaded/ATWS.slx\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n # TODO: We are assuming that the chart of this data is the same as the chart of the output state. What if this does not hold true?\n if p.name_attr.value == 'outputState':\n self.idmdl_outputState = UtilsStfl.idmdl_by_ssid(p.content, self.chart.idmdl)\n\n if x.tag == 'props':\n self.propss.append(Props.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'message':\n self.messages.append(Message.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'loggingInfo':\n self.loggingInfos.append(LoggingInfo.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n \n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Data' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Data(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'data {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'ssIdNumber {self.ssid}\\n'\n str_ += f'linkNode {self.linkNode}\\n'\n str_ += f'machine {self.machine.idmdl}\\n'\n if self.idmdl_outputState:\n str_ += f'outputState {self.idmdl_outputState}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.propss:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.messages:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.loggingInfos:\n str_ += f'{x.strmdl}\\n'\n\n \n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Debug(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<debug') and strval.endswith('</debug>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Debug' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Debug(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'debug {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Dst(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<dst') and strval.endswith('</dst>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n if p.name_attr.value == 'SSID':\n self.idmdl = UtilsStfl.idmdl_by_ssid(p.content, self.chart.idmdl)\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Dst' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Dst(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'dst {\\n'\n if self.idmdl:\n str_ += f'id {self.idmdl}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Eml(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<eml') and strval.endswith('</eml>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Eml' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Eml(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'eml {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Event(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<event') and strval.endswith('</event>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Event' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Event(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'event {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'ssIdNumber {self.ssid}\\n'\n str_ += f'linkNode {self.linkNode}\\n'\n str_ += f'machine {self.machine.idmdl}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Fixpt(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<fixpt') and strval.endswith('</fixpt>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Fixpt' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Fixpt(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'fixpt {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Instance(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<instance') and strval.endswith('</instance>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n if p.name_attr.value == 'machine':\n self.idmdl_machine = UtilsStfl.idmdl_by_idslx(p.content)\n if p.name_attr.value == 'chart':\n self.idmdl_chart = UtilsStfl.idmdl_by_idslx(p.content)\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Instance' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Instance(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'instance {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'machine {self.idmdl_machine}\\n'\n str_ += f'chart {self.idmdl_chart}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Junction(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<junction') and strval.endswith('</junction>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.has_quantum = False\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n # OBSERVATION: subviewer can be either state or chart\n if p.name_attr.value == 'subviewer':\n idmdl_subviewer = UtilsStfl.idmdl_by_idslx(idslx=p.content, create_if_needed=False)\n if not idmdl_subviewer:\n idmdl_subviewer = UtilsStfl.idmdl_by_ssid(ssid=p.content, idmdl_chart=self.chart.idmdl, create_if_needed=False)\n if idmdl_subviewer:\n self.idmdl_subviewer = idmdl_subviewer\n else:\n raise Exception(f\"Failed to set 'subviewer' of junction.\")\n\n if p.name_attr.value == 'quantum':\n self.has_quantum = True\n tokens = p.content.split()\n self.idmdl_quantum1 = tokens[0][1:]\n self.idmdl_quantum2 = tokens[1]\n self.idmdl_quantum3 = tokens[2]\n self.idmdl_quantum4 = tokens[3][:-1]\n\n # OBSERVATION: the ids in quantum attribute are transition ids\n # cannot just write \"if slef.idmdl_quantum1\", because bool('0') is True (unlike bool(0))\n if self.idmdl_quantum1 != '0':\n self.idmdl_quantum1 = UtilsStfl.idmdl_by_ssid(self.idmdl_quantum1, self.chart.idmdl)\n if self.idmdl_quantum2 != '0':\n self.idmdl_quantum2 = UtilsStfl.idmdl_by_ssid(self.idmdl_quantum2, self.chart.idmdl)\n if self.idmdl_quantum3 != '0':\n self.idmdl_quantum3 = UtilsStfl.idmdl_by_ssid(self.idmdl_quantum3, self.chart.idmdl)\n if self.idmdl_quantum4 != '0':\n self.idmdl_quantum4 = UtilsStfl.idmdl_by_ssid(self.idmdl_quantum4, self.chart.idmdl)\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Junction' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Junction(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'junction {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'ssIdNumber {self.ssid}\\n'\n str_ += f'linkNode {self.linkNode}\\n'\n str_ += f'chart {self.chart.idmdl}\\n'\n str_ += f'subviewer {self.idmdl_subviewer}\\n'\n if self.has_quantum:\n str_ += f'quantum {self.quantum}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass LoggingInfo(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<loggingInfo') and strval.endswith('</loggingInfo>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'LoggingInfo' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return LoggingInfo(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'loggingInfo {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Machine(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<machine') and strval.endswith('</machine>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.children = []\n self.debugs = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'debug':\n self.debugs.append(Debug.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Children':\n self.children = Children.from_StflXmlElement(x)\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Machine' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Machine(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'machine {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'name \"dummy_name\"\\n'\n\n if self.firstTarget:\n str_ += f'firstTarget {self.firstTarget.idmdl}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.debugs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n\n if self.children:\n str_ += f'{self.children.strmdl}\\n'\n\n return str_\n\n\nclass Message(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<message') and strval.endswith('</message>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Message' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Message(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'message {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass NoteBox(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<noteBox') and strval.endswith('</noteBox>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'NoteBox' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return NoteBox(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'noteBox {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass P(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<P') and strval.endswith('</P>')\n super().__init__(strval, parent_xml)\n self.name_attr = None\n self.class_attr = None\n for x in self.attrs:\n if x.name == 'Name':\n self.name_attr = x\n if x.name == 'Class':\n self.class_attr = x\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return P(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n quoted = f'{self.name_attr.value} \"{self.content}\"' # default\n unquoted = f'{self.name_attr.value} {self.content}'\n boxed = f'{self.name_attr.value} [{self.content}]'\n\n quoted_list = [\n 'blockName',\n 'codeFlags',\n 'dataType',\n 'description',\n 'document',\n 'editorLayout', # first found in automotive/sldemo_fuelsys/(eml)\n 'fimathString', # first found in automotive/sldemo_fuelsys/(eml)\n 'firstIndex', # first found in powerwindow03/data\n 'labelString',\n 'logName', # first found in simulink_general/sldemo_boiler\n 'name',\n 'sampleTime',\n 'script', # see automotive/sldemo_fuelsys/(eml)\n 'size', # first foundin powerwindow03/data\n ]\n\n if self.name_attr.value in quoted_list:\n return quoted\n return unquoted\n\n\n# this tag was first found in Corpus/other/fwr.slx\n# This slx file has 'empty' <plantModelingInfo>. \n# so, at this point, we don't know what tags are contained inside\n# '<plantModelingInfo'>, and how they are transformed into mdl format.\n# Thus, at this point, we assume only <P> tags are contained inside <plantModelingInfo> tag. \nclass PlantModelingInfo(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<plantModelingInfo') and strval.endswith('</plantModelingInfo>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'PlantModelingInfo' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return PlantModelingInfo(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'plantModelingInfo {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\n\nclass Props(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<props') and strval.endswith('</props>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.arrays = []\n self.types = []\n self.units = []\n self.ranges = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'array':\n self.arrays.append(Array.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'type':\n self.types.append(Type.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'unit':\n self.units.append(Unit.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'range':\n self.ranges.append(Range.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Props' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Props(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'props {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.arrays:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.types:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.units:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.ranges:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Range(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<range') and strval.endswith('</range>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Range' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Range(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'range {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Simulink(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<simulink') and strval.endswith('</simulink>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Simulink' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Simulink(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'simulink {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Slide(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<slide') and strval.endswith('</slide>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Slide' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Slide(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'slide {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Src(StflXmlElement):\n \"\"\"the SSID in <src> and <dst> is of corresponding state/junction.\"\"\"\n\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<src') and strval.endswith('</src>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n # For some src eg. the initial transition which have no source state/junction,\n # this condition will never be met and\n # self.idmdl will remain None (set in super()'s init)\n if p.name_attr.value == 'SSID':\n self.idmdl = UtilsStfl.idmdl_by_ssid(p.content, self.chart.idmdl)\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Src' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Src(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'src {\\n'\n if self.idmdl:\n str_ += f'id {self.idmdl}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass State(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<state') and strval.endswith('</state>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.subviewSs = []\n self.children = None\n self.activeStateOutputs = []\n self.simulinks = []\n self.emls = []\n self.noteBoxs = []\n self.loggingInfos = []\n\n for x in self.inner_xmls:\n\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n # OBSERVATION: subviewer can be either state or chart\n if p.name_attr.value == 'subviewer':\n idmdl_subviewer = UtilsStfl.idmdl_by_idslx(idslx=p.content, create_if_needed=False)\n if not idmdl_subviewer:\n idmdl_subviewer = UtilsStfl.idmdl_by_ssid(ssid=p.content, idmdl_chart=self.chart.idmdl, create_if_needed=False)\n if idmdl_subviewer:\n self.idmdl_subviewer = idmdl_subviewer\n else:\n raise Exception(f\"Failed to set 'subviewer' of state.\")\n if p.name_attr.value == 'outputData':\n self.idmdl_outputData = UtilsStfl.idmdl_by_ssid(p.content, self.chart.idmdl)\n\n if x.tag == 'subviewS':\n self.subviewSs.append(SubviewS.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Children':\n self.children = Children.from_StflXmlElement(x)\n innerxml_used[x] = True\n\n if x.tag == 'activeStateOutput':\n self.activeStateOutputs.append(ActiveStateOutput.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'simulink':\n self.simulinks.append(Simulink.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'eml':\n self.emls.append(Eml.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'noteBox':\n self.noteBoxs.append(NoteBox.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'loggingInfo':\n self.loggingInfos.append(LoggingInfo.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'State' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return State(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'state {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'ssIdNumber {self.ssid}\\n'\n str_ += f'treeNode {self.treeNode}\\n'\n str_ += f'chart {self.chart.idmdl}\\n'\n str_ += f'subviewer {self.idmdl_subviewer}\\n'\n\n if self.firstData:\n str_ += f'firstData {self.firstData.idmdl}\\n'\n if self.firstEvent:\n str_ += f'firstEvent {self.firstEvent.idmdl}\\n'\n if self.firstJunction:\n str_ += f'firstJunction {self.firstJunction.idmdl}\\n'\n if self.firstTransition:\n str_ += f'firstTransition {self.firstTransition.idmdl}\\n'\n if self.idmdl_outputData:\n str_ += f'outputData {self.idmdl_outputData}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.subviewSs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.activeStateOutputs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.simulinks:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.emls:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.noteBoxs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.loggingInfos:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n\n if self.children:\n str_ += f'{self.children.strmdl}\\n'\n\n return str_\n\n\nclass Stateflow(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<Stateflow') and strval.endswith('</Stateflow>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.machines = []\n self.instances = []\n self.children = None\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'machine':\n self.machines.append(Machine.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'instance':\n self.instances.append(Instance.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'Children':\n self.children = Children.from_StflXmlElement(x)\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Stateflow' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Stateflow(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'Stateflow {\\n\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.machines:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.instances:\n str_ += f'{x.strmdl}\\n'\n\n if self.children:\n str_ += f'{self.children.strmdl}\\n'\n\n str_ += '}'\n return str_\n\n\nclass SubviewS(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<subviewS') and strval.endswith('</subviewS>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'SubviewS' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return SubviewS(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'subviewS {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Target(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<target') and strval.endswith('</target>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Target' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Target(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'target {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'linkNode {self.linkNode}\\n'\n str_ += f'machine {self.machine.idmdl}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Transition(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<transition') and strval.endswith('</transition>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.src = None\n self.dst = None\n self.slide = None\n self.has_subLink = False\n\n for x in self.inner_xmls:\n\n if x.tag == 'P':\n p = P.from_StflXmlElement(x)\n self.ps.append(p)\n innerxml_used[x] = True\n # OBSERVATION: subviewer can be either state or chart\n if p.name_attr.value == 'subviewer':\n idmdl_subviewer = UtilsStfl.idmdl_by_idslx(idslx=p.content, create_if_needed=False)\n if not idmdl_subviewer:\n idmdl_subviewer = UtilsStfl.idmdl_by_ssid(ssid=p.content, idmdl_chart=self.chart.idmdl, create_if_needed=False)\n if idmdl_subviewer:\n self.idmdl_subviewer = idmdl_subviewer\n else:\n raise Exception(f\"Failed to set 'subviewer' of transition.\")\n # OBSERVATION: firstSubWire is an id of some transition\n if p.name_attr.value == 'firstSubWire':\n self.idmdl_firstSubWire = UtilsStfl.idmdl_by_ssid(p.content, self.chart.idmdl)\n\n if p.name_attr.value == 'subLink':\n self.has_subLink = True\n tokens = p.content.split()\n self.idmdl_subLink1 = tokens[0][1:]\n self.idmdl_subLink2 = tokens[1]\n self.idmdl_subLink3 = tokens[2][:-1]\n\n # OBSERVATION: the ids in subLink attribute are transition ids\n # cannot just write \"if slef.idmdl_subLink1\", because bool('0') is True (unlike bool(0))\n if self.idmdl_subLink1 != '0':\n self.idmdl_subLink1 = UtilsStfl.idmdl_by_ssid(self.idmdl_subLink1, self.chart.idmdl)\n if self.idmdl_subLink2 != '0':\n self.idmdl_subLink2 = UtilsStfl.idmdl_by_ssid(self.idmdl_subLink2, self.chart.idmdl)\n if self.idmdl_subLink3 != '0':\n self.idmdl_subLink3 = UtilsStfl.idmdl_by_ssid(self.idmdl_subLink3, self.chart.idmdl)\n\n if x.tag == 'src':\n self.src = Src.from_StflXmlElement(x)\n innerxml_used[x] = True\n\n if x.tag == 'dst':\n self.dst = Dst.from_StflXmlElement(x)\n innerxml_used[x] = True\n\n if x.tag == 'slide':\n self.slide = Slide.from_StflXmlElement(x)\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Transition' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Transition(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'transition {\\n'\n str_ += f'id {self.idmdl}\\n'\n str_ += f'ssIdNumber {self.ssid}\\n'\n str_ += f'linkNode {self.linkNode}\\n'\n str_ += f'chart {self.chart.idmdl}\\n'\n str_ += f'subviewer {self.idmdl_subviewer}\\n'\n\n if self.idmdl_firstSubWire:\n str_ += f'firstSubWire {self.idmdl_firstSubWire}\\n'\n\n if self.has_subLink:\n str_ += f'subLink {self.subLink}\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '\\n\\n'\n str_ += f'{self.src.strmdl}\\n'\n str_ += f'{self.dst.strmdl}\\n'\n str_ += f'{self.slide.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Type(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<type') and strval.endswith('</type>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n self.fixpts = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n if x.tag == 'fixpt':\n self.fixpts.append(Fixpt.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Type' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Type(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'type {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n for x in self.fixpts:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\nclass Unit(StflXmlElement):\n def __init__(self, strval, parent_xml):\n strval = strval.strip()\n assert strval.startswith('<unit') and strval.endswith('</unit>')\n super().__init__(strval, parent_xml)\n\n innerxml_used = {x: False for x in self.inner_xmls if x.type == 'xml'}\n self.ps = []\n\n for x in self.inner_xmls:\n if x.tag == 'P':\n self.ps.append(P.from_StflXmlElement(x))\n innerxml_used[x] = True\n\n for ix, u in innerxml_used.items():\n if not u:\n raise Exception(f\"Inner XML of 'Unit' not used.\\nUnused XML:\\n\\n{ix.strval}\")\n\n @classmethod\n def from_StflXmlElement(cls, stfl_xml_element):\n return Unit(stfl_xml_element.strval, stfl_xml_element.parent_xml)\n\n @property\n def strmdl(self):\n str_ = 'unit {\\n'\n\n for x in self.attrs:\n if not x.name in self._id_based_mdl_attrs:\n str_ += f'{x.name} \"{x.value}\"\\n'\n\n for x in self.ps:\n if not x.name_attr.value in self._id_based_mdl_attrs:\n str_ += f'{x.strmdl}\\n'\n\n str_ += '}\\n\\n'\n return str_\n\n\ndef main():\n import glob\n\n files = glob.glob('**/stateflow-preprocessed.xml', recursive=True)\n for file in files[-1:]:\n print(file)\n Utils.copy_file(file, 'input.xml')\n\n with open('input.xml') as file:\n xml = file.read()\n xml = xml.strip()\n stateflow = Stateflow(strval=xml, parent_xml=None)\n mdl = stateflow.strmdl\n with open('output.xml', 'w') as file:\n file.write(mdl)\n\n\nif __name__ == '__main__':\n main()\n pass\n", "id": "2247690", "language": "Python", "matching_score": 4.66970682144165, "max_stars_count": 2, "path": "tags_stateflow.py" }, { "content": "#!/usr/bin/python3\n\nfrom commons import * \nfrom tags_stateflow import * \n\n\nclass Transformer:\n\n @classmethod\n def set_treeNodes_and_linkNodes(cls, stateflow):\n \"\"\"Set all the treeNodes and linkNodes.\"\"\"\n\n def set_linkNode(element, siblings):\n \"\"\"Set the linkNode (last 2 ids) of the element. \n Assumptions: \n - The first id in linkNode is assumed to be already set. \n - arg siblings includes this element too.\n - siblings contains elements in the same order as they appear in stateflow.xml\"\"\"\n\n # only 1 element\n if len(siblings) == 1:\n element.idmdl_linkNode2 = '0' # no previous sibling\n element.idmdl_linkNode3 = '0' # no next sibling\n\n # the element is the first sibling and has other siblings\n elif element == siblings[0]:\n element.idmdl_linkNode2 = '0' # no previous sibling\n element.idmdl_linkNode3 = siblings[1].idmdl\n\n # the element is the last sibling and has other siblings\n elif element == siblings[-1]:\n element.idmdl_linkNode2 = siblings[-2].idmdl\n element.idmdl_linkNode3 = '0' # no next sibling\n\n # there are at least 3 siblings (this element included) and\n # this element has both previous and next siblings\n else:\n index = siblings.index(element)\n element.idmdl_linkNode2 = siblings[index-1].idmdl\n element.idmdl_linkNode3 = siblings[index+1].idmdl\n\n def set_nodes_in_state_recursively(state, sibling_states):\n \"\"\"Set the treeNode (of state) and linkNode (of other elements in the state)\n recursively. \n ASSUMPTION: The first id of any treeNode or linkNode is already set.\"\"\"\n\n state.idmdl_treeNode2 = '0' # will be overridden if a child state exists\n\n # set treeNode3 and treeNode4\n # only 1 state\n if len(sibling_states) == 1:\n state.idmdl_treeNode3 = '0' # no previous sibling\n state.idmdl_treeNode4 = '0' # no next sibling\n\n # the state is the first sibling and has other siblings\n elif state == sibling_states[0]:\n state.idmdl_treeNode3 = '0' # no previous sibling\n state.idmdl_treeNode4 = sibling_states[1].idmdl\n\n # the state is the last sibling and has other siblings\n elif state == sibling_states[-1]:\n state.idmdl_treeNode3 = sibling_states[-2].idmdl\n state.idmdl_treeNode4 = '0' # no next sibling\n\n # there are at least 3 siblings (this state included) and\n # this state has both previous and next siblings\n else:\n index = sibling_states.index(state)\n state.idmdl_treeNode3 = sibling_states[index-1].idmdl\n state.idmdl_treeNode4 = sibling_states[index+1].idmdl\n\n # -------------------------------------------------------\n\n if state.children:\n if state.children.datas:\n state.firstData = state.children.datas[0]\n if state.children.events:\n state.firstEvent = state.children.events[0]\n if state.children.states:\n state.idmdl_treeNode2 = state.children.states[0].idmdl\n\n for data in state.children.datas:\n set_linkNode(data, state.children.datas)\n for event in state.children.events:\n set_linkNode(event, state.children.events)\n for junction in state.children.junctions:\n set_linkNode(junction, state.children.junctions)\n for child_state in state.children.states:\n set_nodes_in_state_recursively(\n child_state, state.children.states)\n for transition in state.children.transitions:\n set_linkNode(transition, state.children.transitions)\n\n for machine in stateflow.machines:\n if machine.children:\n for target in machine.children.targets:\n set_linkNode(element=target,\n siblings=machine.children.targets)\n for chart in machine.children.charts:\n # TODO: the information about 1st, 3rd, and 4th ids of treeNode is not\n # known, so we are setting them to '0', update this once the information\n # is known\n chart.idmdl_treeNode1 = '0'\n chart.idmdl_treeNode2 = '0' # is overridden below\n chart.idmdl_treeNode3 = '0'\n chart.idmdl_treeNode4 = '0'\n\n if chart.children:\n if chart.children.states:\n chart.idmdl_treeNode2 = chart.children.states[0].idmdl\n\n for data in chart.children.datas:\n set_linkNode(data, chart.children.datas)\n for event in chart.children.events:\n set_linkNode(event, chart.children.events)\n for junction in chart.children.junctions:\n set_linkNode(junction, chart.children.junctions)\n for state in chart.children.states:\n set_nodes_in_state_recursively(\n state, chart.children.states)\n for transition in chart.children.transitions:\n set_linkNode(\n transition, chart.children.transitions)\n\n @classmethod\n def set_firstChildren(cls, stateflow):\n \"\"\"Set the following attributes, if they exist\n - for machines, set firstTarget \n - for states, set firstTransition, firstJunction, firstData, firstEvent\n - for charts, set fistData, firstEvent, firstTransition \n\n \"\"\"\n\n # TODO: we might still be missing to set all the firstXXX. \n # As new firstXXX are discovered in various elements, update this implementation \n\n def set_firstChildren_of_state(state):\n if state.children:\n if state.children.datas: \n state.firstData = state.children.datas[0]\n if state.children.events: \n state.firstEvent = state.children.events[0]\n if state.children.junctions: \n state.firstJunction = state.children.junctions[0]\n if state.children.transitions:\n state.firstTransition = state.children.transitions[0]\n if state.children.states: # recursion\n for x in state.children.states:\n set_firstChildren_of_state(x)\n\n for machine in stateflow.machines:\n if machine.children:\n for chart in machine.children.charts:\n if chart.children:\n if chart.children.datas:\n chart.firstData = chart.children.datas[0]\n if chart.children.events:\n chart.firstEvent = chart.children.events[0]\n if chart.children.junctions:\n chart.firstJunction = chart.children.junctions[0]\n if chart.children.transitions:\n chart.firstTransition = chart.children.transitions[0] \n for state in chart.children.states:\n set_firstChildren_of_state(state) # recursively\n \n if machine.children.targets:\n machine.firstTarget = machine.children.targets[0]\n\n \n# public funciton \ndef stateflow_xml2mdl(xml):\n \"\"\"Convert stateflow xml string to mdl string\"\"\"\n xml = xml.strip()\n xml = Utils.transform_self_closing_tag(xml)\n stateflow = Stateflow(xml, parent_xml=None) \n Transformer.set_treeNodes_and_linkNodes(stateflow)\n Transformer.set_firstChildren(stateflow)\n\n mdl = stateflow.strmdl \n mdl = Utils.remove_multiple_linegaps(mdl)\n mdl = Utils.replacements4mdl(mdl)\n return mdl \n\n\n\ndef main():\n \n input_file = 'input.xml'\n with open(input_file, 'r') as file:\n xml = file.read() # whole file as single string\n\n mdl = stateflow_xml2mdl(xml) or '--- empty ---'\n\n with open('op-stateflow.mdl', 'w') as file:\n file.write(mdl)\n\n # print(mdl)\n\n\ndef test():\n input_file = 'input.xml'\n with open(input_file, 'r') as file:\n xml = file.read() # whole file as single string\n xml = Utils.transform_self_closing_tag(xml)\n\n Transformer._xml = xml\n Transformer.set_treeNodes_and_linkNodes(\n None)\n\n\nif __name__ == '__main__':\n main()\n # test()\n", "id": "1393853", "language": "Python", "matching_score": 1.6350195407867432, "max_stars_count": 2, "path": "transform_stateflow.py" } ]
2.291789
peterchun2000
[ { "content": "import math\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom multiprocessing import Process\n\nimport os\nimport requests\nfrom selenium import webdriver\nimport selenium as se\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\n\nfrom threading import Thread\n#import time\nimport threading\nfrom queue import Queue\nimport time\nfrom time import sleep\nimport queue\nimport pickle\n# _________________________________________________\nclass Proffessor:\n def __init__(self, name, avg_gpa, samp_size):\n self.name = name\n self.avg_gpa = 0\n self.samp_size = 0\n\n def __eq__(self, other):\n if isinstance(other, Proffessor):\n return self.name.lower() == other.name.lower() \n return False\n \nclass Course_Rank:\n def __init__(self, course_name):\n self.course_name = course_name\n self.gpa_rank = 0\n self.samp_rank = 0\n self.comb_rank = 0\n self.prof_rank = 0\n\n def __eq__(self, other):\n if isinstance(other, Course):\n return self.course_name == other.course_name\n return False\n\nclass Course:\n def __init__(self, course_name):\n self.course_name = course_name\n self.prof_list = []\n self.gen_eds = []\n self.avg_gpa = 0\n self.samp_num = 0\n self.gpa_rank = 0\n self.samp_rank = 0\n self.comb_rank = 0\n\n def get_course_name(self):\n return self.course_name\n\n def add_section(self, section):\n self.prof_list.append(section)\n\n def __eq__(self, other):\n if isinstance(other, Course):\n return self.course_name == other.course_name\n return False\n\n\nclass Arnav:\n def __init__(self, course_name, val):\n self.course_name = course_name\n self.val = val\n\n\n# global vars\ncourse_dict = dict()\n\n\n\ndef make_temp(gen_ed):\n result = []\n for key, value in course_dict.items():\n if(gen_ed in value.gen_eds):\n result.append(value)\n return result\n\n\ndef get_best_gpa(gen_ed):\n unordered_list = make_temp(gen_ed)\n output = f'{gen_ed} Best of GPA:\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n top_gpa = merge_sort(unordered_list, 'gpa')\n index = 1\n for course in top_gpa:\n if(index < 31):\n print(index, ')', course.course_name, \":\", course.avg_gpa)\n output = f'{index}) {course.course_name}, avg_gpa: {course.avg_gpa}\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n index += 1\n output = '\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n\ncourse_rank_list = dict()\ndef get_best_of_both(gen_ed):\n unordered_list = make_temp(gen_ed)\n output = f'{gen_ed} best of both: \\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n top_gpa = merge_sort(unordered_list, 'gpa')\n\n top_samp = merge_sort(unordered_list, 'samp')\n # for idx, val in enumerate(top_gpa):\n # combined_rec[idx] = top_gpa.index()\n\n for course in unordered_list:\n gpa_idx = top_gpa.index(course)\n samp_idx = top_samp.index(course)\n course_rank_list[course.course_name] = Course_Rank(course.course_name)\n course_rank_list[course.course_name].gpa_rank = gpa_idx\n course_rank_list[course.course_name].samp_rank = samp_idx\n course_rank_list[course.course_name].comb_rank = samp_idx + gpa_idx\n # course.gpa_rank = gpa_idx\n # course.samp_rank = samp_idx\n # course.comb_rank = samp_idx + gpa_idx\n\n best_of_both = merge_sort(unordered_list, 'comb')\n index = 1\n for val in best_of_both:\n if(index < 31):\n print(index, ')', val.course_name, \":\",\n val.gpa_rank, \":\", val.samp_rank)\n output = f'{index}) {val.course_name} (gpa):{val.gpa_rank} (samplSz):{val.samp_num}\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n index += 1\n output = '\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n\n\ndef arnav(gen_ed):\n unordered_list = make_temp(gen_ed)\n best_list = []\n output = f'{gen_ed} best with Arnav alg: \\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n for course in unordered_list:\n best_list.append(Arnav(course.course_name, float(\n course.avg_gpa * math.log2(course.samp_num))))\n best_of_both = merge_sort(best_list, 'norm')\n index = 1\n for elt in best_of_both:\n if(index < 31):\n print(index, ')', elt.course_name, \":\", elt.val)\n output = f'{index}) {elt.course_name} val:{elt.val}\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n index += 1\n output = '\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n\n\ndef merge_norm(left_half, right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0].val > right_half[0].val:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\n\ndef merge_sort(unsorted_list, type_in):\n if len(unsorted_list) <= 1:\n return unsorted_list\n # Find the middle point and devide it\n middle = len(unsorted_list) // 2\n left_list = unsorted_list[:middle]\n right_list = unsorted_list[middle:]\n\n left_list = merge_sort(left_list, type_in)\n right_list = merge_sort(right_list, type_in)\n if(type_in == 'gpa'):\n return list(merge_with_gpa(left_list, right_list))\n elif(type_in == 'samp'):\n return list(merge_with_samp(left_list, right_list))\n elif (type_in == 'comb'):\n return list(merge_with_comb(left_list, right_list))\n elif (type_in == 'norm'):\n return list(merge_norm(left_list, right_list))\n\n\ndef merge_with_gpa(left_half, right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0].avg_gpa > right_half[0].avg_gpa:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\n\ndef merge_with_samp(left_half, right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0].samp_num > right_half[0].samp_num:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\n\ndef merge_with_comb(left_half, right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0].comb_rank < right_half[0].comb_rank:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\n\ngens_list = {\"DSNL\"}\nif __name__ == '__main__':\n\n with open('course_data.pkl', 'rb') as input:\n while(True):\n try:\n curr_course = pickle.load(input)\n except:\n break\n course_dict[curr_course.course_name] = curr_course\n\n for key, value in course_dict.items():\n # print(course.course_name)\n # print(value.course_name)\n # print(value.avg_gpa)\n for key2, prof in value.prof_list.items():\n print(prof.name)\n print(prof.avg_gpa)\n print(prof.samp_size)\n print(\"\")\n\n for gen in gens_list:\n get_best_of_both(gen)\n print(\"_____________________________\")\n # for key, value in course_dict.items():\n # if (key == \"AOSC200\"):\n # print(\"asdfasdfasdf\")\n # print(value.course_name)\n # for gen in value.gen_eds:\n # print(gen)\n", "id": "6816590", "language": "Python", "matching_score": 5.309909820556641, "max_stars_count": 0, "path": "old_ranking.py" }, { "content": "import math\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom multiprocessing import Process\n\nimport os\nimport requests\nfrom selenium import webdriver\nimport selenium as se\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\n\nfrom threading import Thread\n#import time\nimport threading\nfrom queue import Queue\nimport time\nfrom time import sleep\nimport queue\nimport pickle\n# _________________________________________________\nclass Proffessor:\n def __init__(self, name, avg_gpa, samp_size):\n self.name = name\n self.avg_gpa = 0\n self.samp_size = 0\n\n def __eq__(self, other):\n if isinstance(other, Proffessor):\n return self.name.lower() == other.name.lower() \n return False\n \nclass Course_Rank:\n def __init__(self, course_name):\n self.course_name = course_name\n self.arnav_val = 0\n self.prof_avg_gpa = 0\n self.prof_high_gpa = 0\n self.high_prof_arnav_val = 0\n self.avg_prof_arnav_val = 0\n self.high_prof_name = \"\"\n\n def __eq__(self, other):\n if isinstance(other, Course):\n return self.course_name.lower() == other.course_name.lower()\n return False\n\nclass Course:\n def __init__(self, course_name):\n self.course_name = course_name\n self.prof_list = dict()\n self.gen_eds = []\n self.avg_gpa = 0\n self.samp_num = 0\n self.gpa_rank = 0\n self.samp_rank = 0\n self.comb_rank = 0\n\n def __eq__(self, other):\n if isinstance(other, Course):\n return self.course_name.lower() == other.course_name.lower()\n return False\n\n\n\nclass Arnav:\n def __init__(self, course_name, val):\n self.course_name = course_name\n self.val = val\n\n\n# global vars\ncourse_dict = dict()\n\n\ndef make_temp(gen_ed):\n result = []\n for key, value in course_dict.items():\n if(gen_ed in value.gen_eds):\n result.append(value)\n return result\n\n\ndef find_course_index(list_in, course_in):\n try:\n return list_in.index(Course_Rank(course_in))\n except:\n return -1\n\ndef add_prof_data(gen_ed):\n unordered_list = make_temp(gen_ed)\n result_list = []\n for course in unordered_list:\n high_prof_arnav_val = 0\n high_prof_name = \"\"\n avg_prof_arnav_val = 0\n result_list.append(Course_Rank(course.course_name))\n for key2, prof in course.prof_list.items():\n prof_arnav_val = float(prof.avg_gpa * math.log2(prof.samp_size+1))\n avg_prof_arnav_val += prof_arnav_val\n if (prof_arnav_val > high_prof_arnav_val):\n high_prof_arnav_val = prof_arnav_val\n high_prof_name = prof.name\n if len(course.prof_list) != 0:\n avg_prof_arnav_val = avg_prof_arnav_val / len(course.prof_list)\n else:\n avg_prof_arnav_val = 0\n c_index = find_course_index(result_list, course.course_name)\n result_list[c_index].high_prof_arnav_val = high_prof_arnav_val\n result_list[c_index].avg_prof_arnav_val = avg_prof_arnav_val\n result_list[c_index].high_prof_name = high_prof_name\n return result_list\n\ndef prof_rank(gen_ed):\n best_list = add_prof_data(gen_ed)\n output = f'{gen_ed} best with Arnav_Prof alg: \\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n best_of_both = merge_sort(best_list, 'prof_val')\n index = 1\n for elt in best_of_both:\n #controls how many courses are printed per gen ed\n if(index < 200):\n print(index, ')', elt.course_name, \":\", elt.high_prof_arnav_val , \"with \", elt.high_prof_name)\n output = f'{index}) {elt.course_name} val:{elt.high_prof_arnav_val} with {elt.high_prof_name} \\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n index += 1\n output = '\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n\ndef arnav(gen_ed):\n unordered_list = make_temp(gen_ed)\n best_list = []\n output = f'{gen_ed} best with Arnav alg: \\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n for course in unordered_list:\n best_list.append(Course_Rank(course.course_name))\n c_index = find_course_index(best_list, course.course_name)\n best_list[c_index].arnav_val = float(course.avg_gpa * math.log2(course.samp_num))\n best_of_both = merge_sort(best_list, 'norm')\n index = 1\n for elt in best_of_both:\n #controls how many courses are printed per gen ed\n if(index < 200):\n print(index, ')', elt.course_name, \":\", elt.arnav_val)\n output = f'{index}) {elt.course_name} val:{elt.arnav_val}\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n index += 1\n output = '\\n'\n with open('data.txt', 'a') as the_file:\n the_file.write(output)\n\n\ndef merge_norm(left_half, right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0].arnav_val > right_half[0].arnav_val:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\ndef merge_prof_val(left_half, right_half):\n res = []\n while len(left_half) != 0 and len(right_half) != 0:\n if left_half[0].high_prof_arnav_val > right_half[0].high_prof_arnav_val:\n res.append(left_half[0])\n left_half.remove(left_half[0])\n else:\n res.append(right_half[0])\n right_half.remove(right_half[0])\n if len(left_half) == 0:\n res = res + right_half\n else:\n res = res + left_half\n return res\n\n\ndef merge_sort(unsorted_list, type_in):\n if len(unsorted_list) <= 1:\n return unsorted_list\n # Find the middle point and devide it\n middle = len(unsorted_list) // 2\n left_list = unsorted_list[:middle]\n right_list = unsorted_list[middle:]\n\n left_list = merge_sort(left_list, type_in)\n right_list = merge_sort(right_list, type_in)\n\n if (type_in == 'norm'):\n return list(merge_norm(left_list, right_list))\n elif (type_in == 'prof_val'):\n return list(merge_prof_val(left_list, right_list))\n\n\ngens_list = {\"DSHS\", \"DSHU\", \"DSNS\", \"DSNL\", \"DSSP\", \"DVCC\", \"DVUP\", \"SCIS\"}\nif __name__ == '__main__':\n open('data.txt', 'w').close()\n with open('course_data.pkl', 'rb') as input:\n while(True):\n try:\n curr_course = pickle.load(input)\n except:\n break\n course_dict[curr_course.course_name] = curr_course\n\n # for key, value in course_dict.items():\n # # print(course.course_name)\n # # print(value.course_name)\n # # print(value.avg_gpa)\n # for key2, prof in value.prof_list.items():\n # print(prof.name)\n # print(prof.avg_gpa)\n # print(prof.samp_size)\n # print(\"\")\n\n for gen in gens_list:\n print(gen)\n \n # this will run a alg that takes into the account individual proffessor avg GPA's\n prof_rank(gen)\n print(\"_____________________________\")\n \n # this will run the arnav alg\n arnav(gen)\n\n", "id": "6503397", "language": "Python", "matching_score": 3.83901047706604, "max_stars_count": 0, "path": "gen_ed_ranks.py" }, { "content": "import math\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom multiprocessing import Process\n\nimport os\nimport requests\nfrom selenium import webdriver\nimport selenium as se\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\n\nfrom threading import Thread\n#import time\nimport threading\nfrom queue import Queue\nimport time\nfrom time import sleep\nimport queue\nimport pickle\n\n# _________________________________________________\n\n\nclass Proffessor:\n def __init__(self, name, avg_gpa, samp_size):\n self.name = name\n self.avg_gpa = 0\n self.samp_size = 0\n\n def __eq__(self, other):\n if isinstance(other, Proffessor):\n return self.name.lower() == other.name.lower()\n return False\n\n\ndef prof_data(course_in, gen_ed, prof_name, driver):\n try:\n pterp_url = 'https://planetterp.com/'\n driver.get(pterp_url)\n\n search = WebDriverWait(driver, 2).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"main-search\"]')))\n search.send_keys(prof_name)\n\n enter = WebDriverWait(driver, 2).until(\n EC.presence_of_element_located((By.XPATH, '/html/body/form/button')))\n enter.click()\n try:\n view = WebDriverWait(driver, 3).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"grades-button\"]')))\n view.click()\n except TimeoutException:\n return -1\n\n try:\n # clicks on drop down menu\n\n course_xpath = WebDriverWait(driver, 20).until(\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"grades-by-course\"]')))\n\n course_xpath.click()\n found = False\n for course in course_xpath.find_elements_by_tag_name('option'):\n # equals to same course\n if course.get_attribute(\"value\") == course_in:\n course.click()\n found = True\n if found == False:\n return -1\n\n except NoSuchElementException:\n return -1\n\n try:\n gpa_text = driver.find_element(\n By.XPATH, '//*[@id=\"grade-statistics\"]').text\n except NoSuchElementException:\n return -1\n start_gpa_i = gpa_text.find(':')\n end_gpa_i = gpa_text.find(' ', start_gpa_i+2)\n if(start_gpa_i == -1):\n return -1\n gpa = float(gpa_text[start_gpa_i+2:end_gpa_i])\n\n index_of_course = find_course(gen_ed, course_in)\n # adds the gpa to the course obj\n samp_num_st = gpa_text.find('between')+8\n samp_num_end = gpa_text.find(' ', samp_num_st)\n samp_num = int(gpa_text[samp_num_st:samp_num_end].replace(',', ''))\n\n # adds the sample number to the course obj\n\n all_gens_dict[gen_ed][index_of_course].prof_list[prof_name].avg_gpa = gpa\n all_gens_dict[gen_ed][index_of_course].prof_list[prof_name].samp_size = samp_num\n except:\n print(\"failed\")\n return -1\n\n\nclass Course:\n def __init__(self, course_name):\n self.course_name = course_name\n self.prof_list = dict()\n self.gen_eds = []\n self.avg_gpa = 0\n self.samp_num = 0\n self.gpa_rank = 0\n self.samp_rank = 0\n self.comb_rank = 0\n\n def get_course_name(self):\n return self.course_name\n\n def __eq__(self, other):\n if isinstance(other, Course):\n return self.course_name.lower() == other.course_name.lower()\n return False\n\n\n# global vars\nall_gens_dict = dict()\n\n\ndef get_courses(gen_ed, driver):\n global all_gens_dict\n gen_url = \"https://app.testudo.umd.edu/soc/gen-ed/201908/\" + gen_ed\n print(gen_url)\n driver.get(gen_url)\n\n all_dept_list = driver.find_element(\n By.XPATH, '//*[@id=\"courses-page\"]')\n dept_row = all_dept_list.find_elements(\n By.CLASS_NAME, \"course-prefix-container\")\n for dept in dept_row:\n course_list = dept.find_elements(By.CLASS_NAME, \"course\")\n for course in course_list:\n course_name = course.get_attribute(\"id\")\n\n # adds each course to the set within the dict[gen_ed]\n all_gens_dict[gen_ed].append((Course(course_name)))\n\n # start of testing\n try:\n section_set = course.find_element_by_class_name(\n \"sections-fieldset\")\n view = WebDriverWait(section_set, 2).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'toggle-sections-link-text')))\n # clicks on expand sections\n view.click()\n\n section_grid = WebDriverWait(course, 10).until(\n EC.presence_of_element_located((By.CLASS_NAME, 'sections-container')))\n section_list = section_grid.find_elements(\n By.CLASS_NAME, \"delivery-f2f\")\n index_of_course = find_course(gen_ed, course_name)\n for section in section_list:\n s1 = section.find_element_by_class_name(\"section-instructor\")\n # prof_name = section.find_elements(By.XPATH, \"//div[@class='section-instructor']\")\n if s1.text != \"Instructor: TBA\":\n all_gens_dict[gen_ed][index_of_course].prof_list[s1.text] = (\n Proffessor(s1.text, 0, 0))\n except:\n continue\n\n\ndef add_gened(gen_ed):\n global all_gens_dict\n all_gens_dict[gen_ed] = []\n\n\ndef add_gpa_field(gen_ed, course_in, driver):\n global all_gens_dict\n pterp_url = 'https://planetterp.com/course/'+course_in\n try:\n driver.get(pterp_url)\n except TimeoutException:\n print(\"failed\")\n return -1\n try:\n gpa_text = driver.find_element(\n By.XPATH, '//*[@id=\"course-grades\"]/p[1]').text\n except NoSuchElementException:\n return -1\n start_gpa_i = gpa_text.find(':')\n end_gpa_i = gpa_text.find(' ', start_gpa_i+2)\n if(start_gpa_i == -1):\n return -1\n gpa = float(gpa_text[start_gpa_i+2:end_gpa_i])\n index_of_course = find_course(gen_ed, course_in)\n # adds the gpa to the course obj\n all_gens_dict[gen_ed][index_of_course].avg_gpa = gpa\n\n samp_num_st = gpa_text.find('between')+8\n samp_num_end = gpa_text.find(' ', samp_num_st)\n samp_num = int(gpa_text[samp_num_st:samp_num_end].replace(',', ''))\n\n # adds the sample number to the course obj\n all_gens_dict[gen_ed][index_of_course].samp_num = samp_num\n\n\ndef find_course(gen_ed, course_in):\n try:\n return all_gens_dict[gen_ed].index(Course(course_in))\n except:\n return -1\n\n\ndef remove_empty(gen_ed):\n for course in all_gens_dict[gen_ed][:]:\n if(course.avg_gpa == 0):\n all_gens_dict[gen_ed].remove(course)\n\n\ndef run(q):\n while not q.empty():\n gen = q.get()\n try:\n options = se.webdriver.ChromeOptions()\n # chrome is set to headless\n options.add_argument('headless')\n options.add_argument('--no-sandbox')\n options.add_argument('--no-default-browser-check')\n options.add_argument('--disable-gpu')\n options.add_argument('--disable-extensions')\n options.add_argument('--disable-default-apps')\n driver = se.webdriver.Chrome(chrome_options=options)\n\n add_gened(gen)\n get_courses(gen, driver)\n\n for course in all_gens_dict[gen]:\n for key, val in course.prof_list.items():\n prof_data(course.course_name, gen, key, driver)\n\n for course in all_gens_dict[gen]:\n add_gpa_field(gen, course.course_name, driver)\n\n remove_empty(gen)\n\n finally:\n driver.quit()\n q.task_done()\n\n\njobs = Queue()\ngens_list = {\"DSHS\", \"DSHU\", \"DSNS\", \"DSNL\", \"DSSP\", \"DVCC\", \"DVUP\", \"SCIS\"}\ncourse_dict = dict()\nif __name__ == '__main__':\n\n start_time = time.time()\n open('data.txt', 'w').close()\n for gen in gens_list:\n jobs.put(gen)\n\n # this changes the number of threads that will run at once (recommended number: 4 for normal computer)\n for i in range(4):\n worker = threading.Thread(target=run, args=(jobs,))\n worker.start()\n\n jobs.join()\n\n print(\"--- Completed in %s seconds ---\" %\n round(time.time() - start_time, 2))\n\n # adds the course into the dict\n for key, value in all_gens_dict.items():\n for course in value:\n course_dict[course.course_name] = course\n\n # adds the gen eds fields to each of the keys/val pair\n for key, courses in all_gens_dict.items():\n for course in courses:\n course_dict[course.course_name].gen_eds.append(key)\n\n # uploads it to the db\n with open('course_data.pkl', 'wb') as output:\n for key, value in course_dict.items():\n pickle.dump(value, output, pickle.HIGHEST_PROTOCOL)\n", "id": "5722810", "language": "Python", "matching_score": 4.21348237991333, "max_stars_count": 0, "path": "grab_data.py" }, { "content": "import os\nimport requests\nfrom selenium import webdriver\nimport selenium as se\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\n\nfrom threading import Thread\n\nimport time\nfrom time import sleep\nfrom random import randint\nfrom decimal import Decimal\n\nimport pytz\nimport datetime\n\nmessage_sent = []\nbot_id = 'bot_id'\nrequest_token = 'request_token'\nuser_id = 'user_id'\ngroup_id = 'group_id'\nusername = \"username\"\npassword = \"password\"\n\ncourse_list = []\ncourse_num = 0\nfirst_course_in = False\nneed_login = False\n\noptions = se.webdriver.ChromeOptions()\n\n# chrome is set to headless\noptions.add_argument('headless')\n\ndriver = se.webdriver.Chrome(chrome_options=options)\n\n\nclass Course:\n def __init__(self, course_name):\n self.course_name = course_name\n self.section_list = []\n\n def get_course_name(self):\n return self.course_name\n\n def add_section(self, section):\n self.section_list.append(section)\n\n\ndef login(username, password):\n try:\n username_input = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"username\"]')))\n username_input.send_keys(username)\n password_input = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"password\"]')))\n password_input.send_keys(password)\n sendd = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '/html/body/div[2]/div/div[2]/div[1]/form/div[4]/button')))\n sendd.click()\n # waits until authentification is finished\n while('https://app.testudo.umd.edu/' not in str(driver.current_url)):\n sleep(1)\n except:\n url = \"https://app.testudo.umd.edu/main/dropAdd\"\n driver.get(url)\n sleep(1)\n return -1\n\n\ndef get_term(user_term):\n try:\n table = driver.find_element(\n By.XPATH, '//*[@id=\"mainContent\"]/div[2]/div/div[1]/div/div[2]')\n list_terms = table.find_elements(By.CLASS_NAME, \"ng-binding\")\n counter = 1\n for term in list_terms:\n if user_term == term.text:\n term_xpath = '//*[@id=\"mainContent\"]/div[2]/div/div[1]/div/div[2]/button[' + str(\n counter) + \"]\"\n fall = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, term_xpath)))\n fall.click()\n counter += 1\n except:\n return -1\n\n\ndef sign_out_error():\n got_error = True\n while(got_error == True):\n try:\n error = WebDriverWait(driver, 5).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"mainContent\"]/div[2]/button')))\n error.click()\n except:\n got_error = False\n\n\ndef submit_course_by_name(courseName):\n try:\n course = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"crs_pending\"]/td[2]/input')))\n course.send_keys(courseName)\n sleep(1)\n submit = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"submit_changes\"]')))\n submit.click()\n except:\n driver.refresh\n sleep(2)\n sign_again = check_exists_by_xpath(\n '//*[@id=\"mainContent\"]/div[2]/button')\n full_login = check_exists_by_xpath('//*[@id=\"username\"]')\n if(sign_again):\n sign_out_error()\n return -1\n elif(full_login):\n login(username, password)\n return -1\n else:\n return -1\n\n\ndef get_section_data(course):\n sleep(randint(10, 15))\n try:\n table_id = driver.find_element(\n By.XPATH, '//*[@id=\"drop_add_form\"]/table/tbody/tr[6]/td/div/div[2]/table/tbody')\n except:\n sleep(2)\n sign_again = check_exists_by_xpath(\n '//*[@id=\"mainContent\"]/div[2]/button')\n full_login = check_exists_by_xpath('//*[@id=\"username\"]')\n need_cancel = check_exists_by_xpath(\n '//*[@id=\"drop_add_form\"]/table/tbody/tr[6]/td/div/div[3]/button[2]')\n if(need_cancel):\n cancel = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"drop_add_form\"]/table/tbody/tr[6]/td/div/div[3]/button[2]')))\n cancel.click()\n course_list.remove(get_course_index(course))\n elif(sign_again):\n sign_out_error()\n return -1\n elif(full_login):\n login(username, password)\n return -1\n else:\n return -1\n # get all of the rows in the table\n rows = table_id.find_elements(By.TAG_NAME, \"tr\")\n course_index = get_course_index(course)\n for row in rows:\n # Get the columns (all the column 2)\n # note: index start from 0, 1 is col 2\n section = row.find_elements(By.TAG_NAME, \"td\")[1]\n # note: index start from 0, 1 is col 2\n seats = row.find_elements(By.TAG_NAME, \"td\")[2]\n\n if(section.text in course_list[course_index].section_list):\n if(\"status on \"+course+\" \" + section.text+\" numSeates: \" + seats.text not in message_sent):\n post_params = {'bot_id': bot_id,\n 'text': \"status on \"+course+\" \" + section.text+\" numSeates: \" + seats.text}\n requests.post(\n 'https://api.groupme.com/v3/bots/post', params=post_params)\n message_sent.append(\n \"status on \"+course+\" \" + section.text+\" numSeates: \" + seats.text)\n\n cancel = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"drop_add_form\"]/table/tbody/tr[6]/td/div/div[3]/button[2]')))\n cancel.click()\n sleep(randint(3, 5))\n\n\ndef get_course_index(course):\n curr_counter = 0\n index_of_course = -1\n for curr_course in course_list:\n if(course.lower() == curr_course.course_name.lower()):\n index_of_course = curr_counter\n curr_counter += 1\n return index_of_course\n\n\ndef is_Testudo_on():\n east_tz = pytz.timezone('US/Eastern')\n\n now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)\n\n weekno = now.astimezone(east_tz).weekday()\n date_time = str(now.astimezone(east_tz))\n\n space_index = date_time.find(\" \")\n time = date_time[space_index:]\n # this is the current hour\n current_hour = time[1:3]\n\n is_weekend = False\n is_on_time = False\n # checks for 7-11 hours\n if(int(current_hour) > 6 and int(current_hour) < 23):\n is_on_time = True\n else:\n is_on_time = False\n # check for weekends\n if weekno < 5:\n is_weekend = True\n else:\n is_weekend = False\n # check both vars and returns\n if(is_weekend and is_on_time):\n return True\n else:\n return False\n\n\ndef get_messages():\n global course_list\n global course_num\n global first_course_in\n global need_login\n global group_id\n\n print(\"starting bot\")\n # if section not open, continuously check\n last_message = \"\"\n remove_mes = \"remove\"\n while True:\n request_params = {'token': request_token}\n request_params['limit'] = 1\n try:\n response_messages = requests.get(\n f'https://api.groupme.com/v3/groups/{group_id}/messages', params=request_params).json()['response']['messages']\n except:\n print(\"response error\")\n sleep(3)\n if(response_messages[0]['user_id'] == user_id and response_messages[0]['text'] != last_message):\n # list function\n did_cmmd = False\n if(response_messages[0]['text'].lower() == \"testing\"):\n print(\"testing\")\n post_params = {'bot_id': bot_id,\n 'text': \"still working\"}\n requests.post(\n 'https://api.groupme.com/v3/bots/post', params=post_params)\n last_message = \"asdf\"\n did_cmmd = True\n\n elif(response_messages[0]['text'].lower() == \"login\"):\n print(\"login commdn\")\n need_login = True\n last_message = \"asdf\"\n did_cmmd = True\n\n last_message = response_messages[0]['text']\n got_new = False\n try:\n index_of_space = response_messages['text'].find(\" \")\n # accepts new course\n new_course = response_messages[0]['text'][0:index_of_space]\n new_section_num = response_messages[0]['text'][index_of_space +\n 1: len(response_messages[0]['text'])]\n if(get_course_index(new_course) == -1):\n got_new = True\n did_cmmd == True\n except:\n print(last_message)\n\n # if this is a new course\n if (got_new == True and did_cmmd == False):\n print(\"creating new course\")\n # this is where we add a new course\n course_list.append(Course(new_course.lower()))\n course_index = get_course_index(new_course)\n course_list[course_index].section_list.append(\n new_section_num)\n course_num += 1\n first_course_in = True\n\n print(\"added new course\")\n did_cmmd = True\n # adds section to this list\n elif (got_new == False and did_cmmd == False):\n course_index = get_course_index(new_course)\n course_list[course_index].section_list.append(\n new_section_num)\n print(\"added new section\")\n did_cmmd = True\n\n did_cmmd = False\n\n\ndef stay_logged_in():\n try:\n driver.get('https://app.testudo.umd.edu/#/main/grades?null&termId=201901')\n profile_drop = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"user_button\"]/span[1]')))\n profile_drop.click()\n profile_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, ' /html/body/div/div/div[4]/div[1]/div[4]/div/ul/li[2]/a')))\n profile_btn.click()\n sleep(7)\n drop_down = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"nav_button\"]/div')))\n drop_down.click()\n click_grades = WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '//*[@id=\"Grades\"]')))\n click_grades.click()\n sleep(7)\n except:\n sign_again = check_exists_by_xpath(\n '//*[@id=\"mainContent\"]/div[2]/button')\n full_login = check_exists_by_xpath('//*[@id=\"username\"]')\n if(sign_again):\n sign_out_error()\n return -1\n elif(full_login):\n login(username, password)\n return -1\n else:\n return -1\n\n\ndef check_exists_by_xpath(xpath):\n try:\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, xpath)))\n except TimeoutException:\n return False\n except:\n return False\n return True\n\n\ndef redo_login():\n global need_login\n url = \"https://app.testudo.umd.edu/main/dropAdd\"\n driver.get(url)\n sleep(1)\n\n login(username, password)\n sleep(1)\n get_term(\"Fall 2019\")\n sleep(1)\n sign_out_error()\n post_params = {'bot_id': bot_id,\n 'text': \"done login\"}\n requests.post(\n 'https://api.groupme.com/v3/bots/post', params=post_params)\n need_login = False\n\n\ndef main():\n # goes on testudo for add or drop classes\n if(driver.current_url != 'https://app.testudo.umd.edu/#/main/dropAdd?null&termId=201908'):\n url = \"https://app.testudo.umd.edu/main/dropAdd\"\n driver.get(url)\n sleep(1)\n\n # starts group me thread\n\n login(username, password)\n sleep(1)\n get_term(\"Fall 2019\")\n sleep(1)\n sign_out_error()\n if (first_course_in == True):\n while(is_Testudo_on()):\n if(need_login):\n redo_login()\n for course in course_list:\n submit_course_by_name(course.course_name)\n sleep(1)\n get_section_data(course.course_name)\n\n\nif __name__ == '__main__':\n\n # starts message thread\n t = Thread(target=get_messages)\n t.start()\n\n # adding courses\n course_list.append(Course(\"cmsc216\"))\n course_list[0].section_list.append(\"0104\")\n course_list.append(Course(\"chem231\"))\n course_list[1].section_list.append(\"5421\")\n course_list[1].section_list.append(\"5422\")\n course_list[1].section_list.append(\"5423\")\n course_list[1].section_list.append(\"5441\")\n course_list[1].section_list.append(\"5442\")\n course_list[1].section_list.append(\"5443\")\n\n first_course_in = True\n\n print(course_list[1].course_name)\n print(course_list[1].section_list[1])\n print(is_Testudo_on())\n # .....................................\n\n while(True):\n if(is_Testudo_on()):\n main()\n if(is_Testudo_on() == False):\n stay_logged_in()\n", "id": "3207446", "language": "Python", "matching_score": 3.380544662475586, "max_stars_count": 0, "path": "testudo_main.py" }, { "content": "import os\nimport urllib\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nfrom time import sleep, strftime, gmtime\nfrom random import randint\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nsectionList = []\n\n\n# returns the unique semester identifier\n\ndef getSemester():\n # start a new web scraping session\n s = requests.session()\n\n # download the main page of classes\n try:\n html = s.get(\"https://ntst.umd.edu/soc\")\n except requests.exceptions.RequestException as e:\n post_params = { 'bot_id' : 'yourbotapi', 'text': \"something wrong\" }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n\n print(e)\n sleep(10)\n\n # parse the html of the class page\n options = BeautifulSoup(html.text, \"html.parser\")\n options = options.find(\"select\", {\"id\": \"term-id-input\"})\n options = str(options).split(\"</option>\")\n\n # find the option with the semester code in it\n for option in options:\n if '\"selected\"' in option:\n semester = option\n\n # extract the semester code\n semester = semester[semester.index('value=\"')+7:]\n semester = semester[:semester.index('\"')]\n\n # close the session\n s.close()\n\n return semester\n\n# returns a list of sections\n\n\ndef getSections(course):\n # start a new web scraping session\n s = requests.session()\n\n # begin composing the url\n url = \"https://ntst.umd.edu/soc/search\"\n url += \"?courseId=\" + course\n url += \"&sectionId=\"\n url += \"&termId=\"+getSemester()\n url += \"&_openSectionsOnly=on\"\n url += \"&creditCompare=\"\n url += \"&credits=\"\n url += \"&courseLevelFilter=ALL\"\n url += \"&instructor=\"\n url += \"&_facetoface=on\"\n url += \"&_blended=on\"\n url += \"&_online=on\"\n url += \"&courseStartCompare=\"\n url += \"&courseStartHour=\"\n url += \"&courseStartMin=\"\n url += \"&courseStartAM=\"\n url += \"&courseEndHour=\"\n url += \"&courseEndMin=\"\n url += \"&courseEndAM=\"\n url += \"&teachingCenter=ALL\"\n url += \"&_classDay1=on\"\n url += \"&_classDay2=on\"\n url += \"&_classDay3=on\"\n url += \"&_classDay4=on\"\n url += \"&_classDay5=on\"\n\n # download the list of classes\n try:\n html = s.get(url).text\n except requests.exceptions.RequestException as e:\n post_params = { 'bot_id' : 'yourbotapi', 'text': \"something wrong\" }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n\n print(e)\n sleep(10)\n\n # parse the html with bs4\n courses = BeautifulSoup(html, \"html.parser\").find_all(\n \"div\", {\"class\": \"section\"})\n\n # make an empty list to contain all sections\n sections = []\n\n # loop through every section in the course list\n for course in courses:\n\n # declare a blank list to hold section and time info\n section = []\n times = []\n\n # get the times avaiable\n slots = course.find(\"div\", {\"class\": \"class-days-container\"})\n slots = slots.find_all(\"div\", {\"class\": \"row\"})\n\n # loops thorugh and add all time to the list\n for slot in slots:\n time = slot.find(\"div\", {\"class\": \"section-day-time-group\"})\n time = \" \".join(time.text.strip().split(\"\\n\"))\n times.append(time)\n\n # get the name of the course\n name = str(course.find(\n \"div\", {\"class\": \"section-action-links-container\"}))\n name = name[name.index('value=\"')+7:]\n name = name[:name.index('\"')]\n\n # append the name of the course to the list\n section.append(name)\n\n # get the amount of open seats\n openSeatsCount = int(course.find(\n \"span\", {\"class\": \"open-seats-count\"}).text)\n\n # say whether class is open\n if openSeatsCount > 0:\n section.append(\"open\")\n else:\n section.append(\"closed\")\n\n # get the section number, and the instructor\n section.append(course.find(\n \"span\", {\"class\": \"section-id\"}).text.strip())\n section.append(course.find(\n \"span\", {\"class\": \"section-instructor\"}).text)\n sectionList.append(course.find(\n \"span\", {\"class\": \"section-id\"}).text.strip())\n\n # add the section information and the times\n sections.append(section)\n section.append(times)\n\n # close the current session\n s.close()\n\n # return all sections\n return sections\n\n# returns if a section is open\n\n\ndef isOpen(section):\n if section[1] != \"open\":\n return False\n else:\n return True\n\n\n# main function, continuously checks for openings\n\n#global vars\nrows = 15\ncolumns = 15\nsections_to_check = [[0 for x in range(columns)] for y in range(rows)]\nto_remove = [[0 for x in range(columns)] for y in range(rows)]\nbase_sections = []\ncourse = []\n\n\ndef testudo():\n post_params = { 'bot_id' : 'yourbotapi', 'text': \"Starting Bot\" }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n # if section not open, continuously check\n last_message = \"\"\n remove_mes = \"remove\"\n while True:\n request_params = {'token': 'your request token'}\n request_params['limit'] = 1\n response_messages = requests.get(\n 'https://api.groupme.com/v3/groups/yourgroupID/messages', params=request_params).json()['response']['messages']\n for message in response_messages:\n if(message['user_id'] == 'YourUserID' and message['text'] != last_message):\n # list function\n if(message['text'].lower() == \"list\"):\n listFunction()\n break\n if(remove_mes in message['text'].lower()):\n deleteSectionWithMessage(message['text'])\n print(message['text'])\n last_message = message['text']\n sleep(1)\n break\n print(message['text'])\n last_message = message['text']\n index_of_space = message['text'].find(\" \")\n # accepts new course\n new_course = message['text'][0:index_of_space]\n new_section_num = message['text'][index_of_space +\n 1: len(message['text'])]\n\n got_new = True\n for curr_course in course:\n if(new_course.lower() == curr_course.lower()):\n got_new = False\n # if this is a new course\n if (got_new == True):\n base_sections.append(getSections(new_course))\n print(\"creating new course\")\n #this is where we add a new course\n course.append(new_course.lower())\n # adds section to this list\n index_of_course = course.index(new_course.lower())\n curr_sections = getSections(course[index_of_course])\n counter = 0\n while(counter < len(curr_sections)):\n if(curr_sections[counter][2] == new_section_num):\n command = 'curl -X POST \\\"https://api.groupme.com/v3/bots/post?bot_id=yourbotapi&text=' + \\\n \"(ADDED)-->\" + course[index_of_course] + \"-->status:\" + \\\n curr_sections[counter][1] + \\\n \"-->Section:\" + new_section_num + '\\\"'\n os.system(command)\n counter += 1\n sections_to_check[index_of_course].append(new_section_num)\n break\n index_of_course = 0\n #This is where we check the status of each section of each course\n while (index_of_course < len(course)):\n checkStatus(index_of_course)\n index_of_course += 1\n sleep(randint(10, 20))\n # course: open/close: section#: proffName: times:\n\n\ndef listFunction():\n course_index = 0\n while (course_index < len(course)):\n sections = getSections(course[course_index])\n counter = 0\n while(counter < len(sections)):\n for curr_section in sections_to_check[course_index]:\n if(sections[counter][2] == curr_section):\n command = 'curl -X POST \\\"https://api.groupme.com/v3/bots/post?bot_id=yourbotapi&text=' + \\\n str(len(sections_to_check[course_index]))+\"_\" + course[course_index] + \"-->status:\" + \\\n sections[counter][1] + \\\n \"-->Section:\" + str(curr_section) + '\\\"'\n os.system(command)\n sleep(1)\n counter += 1\n course_index += 1\n\n\ndef checkStatus(course_index):\n if(len(to_remove[course_index]) > 0):\n for index in reversed(to_remove[course_index]):\n del sections_to_check[course_index][index]\n to_remove[course_index].clear()\n # print(course[course_index])\n if(course[course_index] != \"0\"):\n # checks for new sections\n newSection(course_index, base_sections[course_index])\n # gets new list of sections (updates)\n sections = getSections(course[course_index])\n counter = 0\n while(counter < len(sections)):\n indexForSection = 0\n for curr_section in sections_to_check[course_index]:\n #if(sections[counter][2] == curr_section):\n #print(\"checking \" +\n # course[course_index] + \"section: \" + curr_section)\n if(sections[counter][2] == curr_section and sections[counter][1] == \"open\"):\n #print(curr_section + \" is open\")\n command = 'curl -X POST \\\"https://api.groupme.com/v3/bots/post?bot_id=yourbotapi&text=' + \\\n str(len(sections_to_check[course_index]))+\"_\" + course[course_index] + \"__IS_OPEN__\" + \\\n \"-->Section:\" + curr_section + '\\\"'\n os.system(command)\n to_remove[course_index].append(indexForSection)\n indexForSection += 1\n counter += 1\n\n# returns if a new section is open\n\n\ndef deleteSectionWithMessage(message):\n checking_course = message[7:message.index(\" \", 8)].lower()\n section = message[message.index(\" \", 8)+1:len(message)]\n print(\"_\"+checking_course + \"_remove\")\n print(\"_\"+section + \"_remove\")\n if(checking_course in course):\n course_index = course.index(checking_course.lower())\n deleteSection(course_index, section)\n\n\ndef deleteSection(course_index, section_to_remove):\n print(\"courseindex:_\" + str(course_index) + \"_\")\n print(\"sectiontoremove_\" + section_to_remove + \"_\")\n print(\"sectiontocheck:_\"+sections_to_check[course_index][0])\n if(sections_to_check[course_index].count(section_to_remove) > 0):\n print(\"found section\")\n index = sections_to_check[course_index].index(section_to_remove)\n command = 'curl -X POST \\\"https://api.groupme.com/v3/bots/post?bot_id=yourbotapi&text=' + \\\n \"Removed:__\" + course[course_index] + \\\n \"-->Section:\" + section_to_remove + '\\\"'\n os.system(command)\n del sections_to_check[course_index][index]\n # if(len(sections_to_check[course_index]==0)):\n # del course[course_index]\n\n\ndef newSection(course_index, currsections):\n #print(\"checking new section: \"+ currsections[1][0])\n updated_section = getSections(course[course_index])\n counter = 0\n while(counter < len(updated_section)):\n section_number = updated_section[counter][2]\n if section_number not in currsections[counter]:\n command = 'curl -X POST \\\"https://api.groupme.com/v3/bots/post?bot_id=yourbotapi&text=' + \\\n course + \"_(NEW)section_open-->\" + section_number + '\\\"'\n os.system(command)\n base_sections[course_index] = getSections(course)\n counter += 1\n\n\n# define the command line arguments\nif __name__ == '__main__':\n testudo()\n", "id": "64829", "language": "Python", "matching_score": 1.8013302087783813, "max_stars_count": 0, "path": "course_bot_v1.py" }, { "content": "import requests\nfrom selenium import webdriver\nimport selenium as se\nfrom selenium.common.exceptions import NoSuchElementException\nimport time\nfrom time import sleep\nfrom random import randint\nfrom decimal import Decimal\n\npost_params = {'bot_id': 'your_bot_ID',\n 'text': \"Starting Uniqlo bot\"}\nrequests.post('https://api.groupme.com/v3/bots/post', params=post_params)\n\noptions = se.webdriver.ChromeOptions()\n\n# chrome is set to headless\noptions.add_argument('headless')\n\ndriver = se.webdriver.Chrome(options=options)\n\n# The Uniqlo product you want to track\ndriver.get(\"https://www.uniqlo.com/us/en/men/sale\")\n\n# global list for items that have already gotton a message\nsent_list = []\n\n\ndef check_exists_by_xpath(xpath, item):\n try:\n item.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True\n\n\ndef check_for_price(class_name, sale_price_path, standard_price_path, item_name_path):\n try:\n driver.refresh\n # gets all products and calls each of them item\n for item in driver.find_elements_by_class_name(class_name):\n # checking if the spans exists so there arn't any errors when weird stuff happens\n if check_exists_by_xpath(standard_price_path, item) == True and check_exists_by_xpath(sale_price_path, item) == True:\n product_standard_price = item.find_element_by_xpath(\n standard_price_path).text\n product_sale_price = item.find_element_by_xpath(\n sale_price_path).text\n else:\n # sets prices to make sure it gets printed out (better to be safe then sorry)\n product_standard_price = \"$1000.00\"\n product_sale_price = \"$1.01\"\n\n # product string format is in $__.__ so we use [1:] to get rid of $\n sale_percent = float(\n product_sale_price[1:]) / float(product_standard_price[1:])\n\n # 1-.05 is the min amount of sale there has to be\n if sale_percent < .26 and item.id not in sent_list:\n item_name = item.find_element_by_xpath(item_name_path).text\n # print for debugging\n print(\n f\"{item_name} ON SALE for {product_sale_price} at {round((1-sale_percent)*100,2)}% off\")\n post_params = {'bot_id': 'your_bot_ID',\n 'text': f\"{item_name} ON SALE for {product_sale_price} at {round((1-sale_percent)*100,2)}% off\"}\n requests.post(\n 'https://api.groupme.com/v3/bots/post', params=post_params)\n # adds id to sent list so the message isn't sent mutiple times\n sent_list.append(item.id)\n\n except requests.exceptions.RequestException as e:\n # Sends an error message and waits another 60 seconds\n post_params = {'bot_id': 'your_bot_ID',\n 'text': \"exception \" + str(e)}\n requests.post('https://api.groupme.com/v3/bots/post',\n params=post_params)\n sleep(60)\n return False\n\n\nwhile True:\n class_name = 'product-tile'\n sale_price_path = \".//span[@class='product-sales-price']\"\n standard_price_path = \".//span[@class='product-standard-price']\"\n item_name_path = \".//a[@class='name-link']\"\n # calls function every 30-60 seconds\n current_state = check_for_price(\n class_name, sale_price_path, standard_price_path, item_name_path)\n sleep(randint(30, 60))\n", "id": "4683942", "language": "Python", "matching_score": 4.511658668518066, "max_stars_count": 0, "path": "bot.py" }, { "content": "import requests\nfrom selenium import webdriver\nimport selenium as se\nfrom selenium.common.exceptions import NoSuchElementException \nimport time\nfrom time import sleep\nfrom random import randint\n\npost_params = { 'bot_id' : 'botID', 'text': \"starting product bot\" }\nrequests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n\noptions = se.webdriver.ChromeOptions()\n\n# chrome is set to headless\noptions.add_argument('headless')\n\ndriver = se.webdriver.Chrome(options=options)\n\n# The Amazon product you want to track\ndriver.get(\"https://www.amazon.com/Sony-Noise-Cancelling-Headphones-WH1000XM3/dp/B07G4MNFS1/ref=sr_1_2?crid=N5OCS4NJDH4M&keywords=sony+wh-1000xm3&qid=1551040801&s=gateway&sprefix=sony+%2Caps%2C120&sr=8-2\")\n\n# sets base price once\nglobal_base_price = driver.find_element_by_xpath('//*[@id=\"priceblock_ourprice\"]').text\n\ndef check_change_by_xpath(xpath, base_price):\n try:\n # refreshes the page, finds the price\n # if the price changed, the current price is returned\n driver.refresh\n current_price = driver.find_element_by_xpath('//*[@id=\"priceblock_ourprice\"]').text\n if current_price != base_price:\n return current_price\n except requests.exceptions.RequestException as e:\n #Sends an error message and waits another 60 seconds\n post_params = { 'bot_id' : 'botID', 'text': str(e) }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n sleep(60)\n return False\n\nwhile True:\n current_state = check_change_by_xpath('//*[@id=\"priceblock_ourprice\"]', global_base_price)\n print(\"curr state \", current_state)\n if current_state != False:\n global_base_price = current_state\n post_params = { 'bot_id' : 'botID', 'text': check_change_by_xpath('//*[@id=\"priceblock_ourprice\"]',global_base_price).text }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n sleep(randint(3,5))", "id": "4054759", "language": "Python", "matching_score": 0.43022000789642334, "max_stars_count": 0, "path": "sony_headphone.py" }, { "content": "import numpy as np\nimport cv2\ncap = cv2.VideoCapture(r'E:/test.mp4')\nsize = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\nfourcc = cv2.VideoWriter_fourcc(*'DIVX')\nvideo = cv2.VideoWriter(r'E:/6.avi', fourcc, 25, size)\nwhile(1):\n ret, frame = cap.read()\n if not ret:\n break\n frame = cv2.convertScaleAbs(frame)\n params = cv2.SimpleBlobDetector_Params()\n params.blobColor = 0\n params.filterByColor = True\n params.minArea = 0\n params.filterByArea = False\n params.minThreshold = 120;\n params.maxThreshold = 255;\n ver = (cv2.__version__).split('.')\n if int(ver[0]) < 3:\n detector = cv2.SimpleBlobDetector(params)\n else:\n detector = cv2.SimpleBlobDetector_create(params)\n keypoints = detector.detect(frame)\n im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n if ret == True:\n video.write(im_with_keypoints)\n cv2.imshow('frame', im_with_keypoints)\n else:\n cap.release()\n break\n k = cv2.waitKey(10) & 0xff\n if k == 27:\n break", "id": "7056909", "language": "Python", "matching_score": 1.6450815200805664, "max_stars_count": 0, "path": "colordetection.py" }, { "content": "import cv2 \nimport numpy as np\n\nimage = cv2.imread(\"brightspot.png\")\n\n# constants\nBINARY_THRESHOLD = 20\nCONNECTIVITY = 4\nDRAW_CIRCLE_RADIUS = 4\n\n# convert to gray\ngray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# extract edges\nbinary_image = cv2.Laplacian(gray_image, cv2.CV_8UC1)\n\n# fill in the holes between edges with dilation\ndilated_image = cv2.dilate(binary_image, np.ones((5, 5)))\n\n# threshold the black/ non-black areas\n_, thresh = cv2.threshold(dilated_image, BINARY_THRESHOLD, 255, cv2.THRESH_BINARY)\n\n# find connected components\ncomponents = cv2.connectedComponentsWithStats(thresh, CONNECTIVITY, cv2.CV_32S)\n\n# draw circles around center of components\n#see connectedComponentsWithStats function for attributes of components variable\ncenters = components[3]\nfor center in centers:\n cv2.circle(thresh, (int(center[0]), int(center[1])), DRAW_CIRCLE_RADIUS, (255), thickness=-1)\n\ncv2.imwrite(\"res.png\", thresh)\ncv2.imshow(\"result\", thresh)\ncv2.waitKey(0)", "id": "7875412", "language": "Python", "matching_score": 2.657590627670288, "max_stars_count": 0, "path": "brightspot.py" }, { "content": "import cv2\nimport numpy as np\n\nimg = cv2.imread('brightspot.png')\nimg = cv2.resize(img,(400,500))\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\nret,gray = cv2.threshold(gray,127,255,0)\ngray2 = gray.copy()\nmask = np.zeros(gray.shape,np.uint8)\n\ncontours, hier = cv2.findContours(gray,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\nfor cnt in contours:\n if 200<cv2.contourArea(cnt)<5000:\n cv2.drawContours(img,[cnt],0,(0,255,0),2)\n cv2.drawContours(mask,[cnt],0,255,-1)\ncv2.bitwise_not(gray2,gray2,mask)\n\ncv2.imshow('IMG',gray2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()", "id": "6118752", "language": "Python", "matching_score": 0.3348686397075653, "max_stars_count": 0, "path": "blobdetect.py" }, { "content": "#!/usr/bin/python\n#\n# Copyright 2018 BIG VISION LLC ALL RIGHTS RESERVED\n# \nfrom __future__ import print_function\nimport sys\nimport cv2\nfrom random import randint\nimport time\nimport numpy as np\n\nfrom flask_table import Table, Col\nfrom w3lib.html import replace_entities\n\nt0 = time.time()\ntracked_times=[]\nobj_location_list=[]\nfirst = True\ndata_time_sum = 0\n\n# def get_data_sum():\n# for data in obj_location_list:\n# data_time_sum += obj_location_list[data]\n\n\nclass Usage:\n def __init__(self, x, y, start_time):\n self.x = x\n self.y = y\n self.start_time = start_time\n self.total_time = 0.0\n\ntrackerTypes = ['BOOSTING', 'MIL', 'KCF','TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']\n\ndef createTrackerByName(trackerType):\n # Create a tracker based on tracker name\n if trackerType == trackerTypes[0]:\n tracker = cv2.TrackerBoosting_create()\n elif trackerType == trackerTypes[1]: \n tracker = cv2.TrackerMIL_create()\n elif trackerType == trackerTypes[2]:\n tracker = cv2.TrackerKCF_create()\n elif trackerType == trackerTypes[3]:\n tracker = cv2.TrackerTLD_create()\n elif trackerType == trackerTypes[4]:\n tracker = cv2.TrackerMedianFlow_create()\n elif trackerType == trackerTypes[5]:\n tracker = cv2.TrackerGOTURN_create()\n elif trackerType == trackerTypes[6]:\n tracker = cv2.TrackerMOSSE_create()\n elif trackerType == trackerTypes[7]:\n tracker = cv2.TrackerCSRT_create()\n else:\n tracker = None\n print('Incorrect tracker name')\n print('Available trackers are:')\n for t in trackerTypes:\n print(t)\n \n return tracker\n\n# if __name__ == '__main__':\n\n# print(\"Default tracking algoritm is CSRT \\n\"\n# \"Available tracking algorithms are:\\n\")\n# for t in trackerTypes:\n# print(t) \n\ndef match_with_obj(center_x_in, center_y_in):\n counter = 0\n for single_location in obj_location_list:\n checking_center_x = single_location.x \n checking_center_y = single_location.y\n # print(\"subtracting x \" + str(checking_center_x - center_x_in))\n # print(\"subtracting y\" + str(checking_center_y - center_y_in))\n if abs(checking_center_x - center_x_in)<= 15 and abs(checking_center_y - center_y_in)<= 15:\n print(\"returned index\" +str(counter))\n return counter\n elif counter+1 == len(obj_location_list):\n return -1\n counter +=1\n return 0\n\nclass ItemTable(Table):\n # start_time = Col('Start Time')\n total_time = Col('Total Time')\n\n# Get some objects\nclass Item(object):\n def __init__(self, total_time):\n # self.start_time = start_time\n self.total_time = total_time\n\ndef make_table():\n items = []\n\n for obj in obj_location_list:\n items.append(Item(obj.total_time))\n \n # Populate the table\n table = ItemTable(items)\n\n table_html = str(table.__html__().replace(\"<table>\",'<table class=\"table\">'))\n # print(table_html)\n table_html = replace_entities(table_html)\n\n # counter1 = count(1)\n # table_html = re.sub('data-target=\"#demo', lambda m: m.group() + str(next(counter1)), table_html)\n \n # table_html = table_html.replace(\"</td></tr>\", '</td></tr> <tr> <td colspan=\"6\" class=\"hiddenRow\"style=\"padding:0!important;\"><div class=\"accordian-body collapse\" id=\"demo\"> <ul class=\"list-group\"> [cmmt] </ul> </div></td></tr>')\n # counter2 = count(1)\n # table_html = re.sub('id=\"demo', lambda m: m.group() + str(next(counter2)), table_html)\n # for key, value in theme_dict.items():\n # for sub_theme in value:\n # table_html = table_html.replace('[cmmt]', get_cmmts(sub_theme.theme, theme_dict),1)\n # g.theme_dict = result_list\n\n return table_html\n\ndef get_cmmts(sub_theme_in, theme_dict_in):\n theme_dict = theme_dict_in\n\n result_str = \"\"\n for key, value in theme_dict.items():\n for sub_theme in value:\n if(sub_theme == SubTheme(sub_theme_in)):\n for ind_cmmt in sub_theme.comments:\n \n result_str += '<li class=\"list-group-item\">'+'<b>' + ind_cmmt.file_name + '</b>' + '<br>' + ind_cmmt.comment+\"</li>\"\n return result_str\n\n\n\ndef begin_tracking():\n global first\n trackerType = \"CSRT\" \n\n # Set video to load\n videoPath = \"videos/run.mp4\"\n \n # Create a video capture object to read videos\n cap = cv2.VideoCapture(1)\n \n # Read first frame\n success, frame = cap.read()\n # quit if unable to read the video file\n if not success:\n print('Failed to read video')\n sys.exit(1)\n\n ## Select boxes\n bboxes = []\n colors = [] \n\n # OpenCV's selectROI function doesn't work for selecting multiple objects in Python\n # So we will call this function in a loop till we are done selecting all objects\n while True:\n # draw bounding boxes over objects\n # selectROI's default behaviour is to draw box starting from the center\n # when fromCenter is set to false, you can draw box starting from top left corner\n bbox = cv2.selectROI('MultiTracker', frame)\n bboxes.append(bbox)\n colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))\n print(\"Press q to quit selecting boxes and start tracking\")\n print(\"Press any other key to select next object\")\n k = cv2.waitKey(0) & 0xFF\n if (k == 113): # q is pressed\n break\n \n print('Selected bounding boxes {}'.format(bboxes))\n\n ## Initialize MultiTracker\n # There are two ways you can initialize multitracker\n # 1. tracker = cv2.MultiTracker(\"CSRT\")\n # All the trackers added to this multitracker\n # will use CSRT algorithm as default\n # 2. tracker = cv2.MultiTracker()\n # No default algorithm specified\n\n # Initialize MultiTracker with tracking algo\n # Specify tracker type\n \n # Create MultiTracker object\n multiTracker = cv2.MultiTracker_create()\n\n # Initialize MultiTracker \n for bbox in bboxes:\n multiTracker.add(createTrackerByName(trackerType), frame, bbox)\n\n\n # Process video and track objects\n while cap.isOpened():\n success, frame = cap.read()\n if not success:\n break\n \n # get updated location of objects in subsequent frames\n success, boxes = multiTracker.update(frame)\n\n # draw tracked objects\n for i, newbox in enumerate(boxes):\n # box = cv2.boxPoints(i)\n t1 = time.time()\n print(\"t1 \", t1)\n total = t1-t0\n p1 = (int(newbox[0]), int(newbox[1]))\n center_x = (newbox[0] + newbox[3])/2\n center_y = (newbox[2] + newbox[3])/2\n center_text = \"Center: \"+str(center_x) +\", \" +str(center_y)\n print(center_text)\n index_of_obj = match_with_obj(center_x,center_y)\n if index_of_obj != -1 and not first:\n obj_location_list[index_of_obj].total_time = t1 - obj_location_list[index_of_obj].start_time\n\n print(\"time for index \"+str(index_of_obj)+ \": \"+str(obj_location_list[index_of_obj].total_time))\n else:\n obj_location_list.append(Usage(center_x,center_y,time.time() ))\n print(\"making new\")\n first = False\n p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))\n cv2.rectangle(frame, p1, p2, colors[i], 2, 1)\n\n # show frame\n cv2.imshow('MultiTracker', frame)\n \n\n # quit on ESC button\n if cv2.waitKey(1) & 0xFF == 27: # Esc pressed\n print(str(make_table()))\n return str(make_table())\n \n", "id": "4149626", "language": "Python", "matching_score": 3.2577762603759766, "max_stars_count": 0, "path": "multiTracker.py" }, { "content": "from bs4 import BeautifulSoup\nimport re\nfrom difflib import SequenceMatcher as SM\nfrom nltk.util import ngrams\nimport codecs\n\nfrom flask_table import Table, Col\nfrom flask import Flask, request, render_template, redirect, url_for, session, jsonify\nimport csv\nimport os.path\n\nfrom w3lib.html import replace_entities\n\nimport jsonpickle\nfrom flask import g\n\nfrom itertools import count\nimport re\n\n#global vars\nsoup = BeautifulSoup(features=\"html.parser\")\n\nclass Comment:\n def __init__(self, comment, file_name):\n self.file_name = file_name\n self.comment = comment\n\n def __eq__(self, other):\n if isinstance(other, Comment):\n return self.comment == other.comment\n return False\n\nclass SubTheme:\n def __init__(self, theme):\n self.theme = theme\n self.comments = []\n \n def add_cmmts(self, text, file_name):\n self.comments.append(Comment(text,file_name))\n\n def __eq__(self, other):\n if isinstance(other, SubTheme):\n return self.theme.lower() == other.theme.lower()\n return False\n\ndef add_code_from_txt():\n # global theme_dict\n # print(\"theme dict length from start: \" + str(len(session['theme_dict'])))\n theme_dict =dict()\n # print(\"theme dict length from start: \" + str(len(session['theme_dict'])))\n sub_code_list = g.sub_code_list\n my_path = os.path.abspath(os.path.dirname(__file__))\n path = os.path.join(my_path, \"../project/code_chart.txt\")\n main_theme_list = g.main_theme_list\n f = open(path,'r')\n curr_main_theme = \"\"\n for line in f:\n text = line.strip()\n if(text[0] == \"*\"):\n #adds to main_theme_list (raw)\n main_theme_list.append(text[1:])\n single_theme = text.split('|')[0][1:]\n curr_main_theme = single_theme\n #initalizes dict key\n theme_dict[single_theme] = []\n else:\n single_sub_theme = text.split('|')[0]\n #adds to sub_code_list (raw)\n sub_code_list.append(text)\n\n theme_dict[curr_main_theme].append(SubTheme(single_sub_theme))\n return theme_dict\n\n\ndef store_data(file_in, sim_value_in, initialized_list):\n global soup\n main_theme_list = g.main_theme_list\n sub_code_list = g.sub_code_list\n\n theme_dict = initialized_list\n\n soup = BeautifulSoup(file_in, \"html.parser\")\n\n all_cmmts = soup.find_all(\"a\", href=re.compile(\"cmnt_\"))\n for cmmt in all_cmmts:\n ref_num_indx = str(cmmt['href']).find('ref')\n ref_num = str(cmmt['href'])[ref_num_indx+3:]\n comment_link = soup.find(\"a\", href=re.compile(\"#cmnt\"+str(ref_num)))\n try:\n original_text_list = comment_link.parent.parent.find_all(\"span\")\n single_orig_text = \"\"\n for curr_text in original_text_list:\n single_orig_text += \"\\n\" + curr_text.text\n\n except:\n print(\"bad\")\n parent_of_cmmt = cmmt.parent.parent\n comments = parent_of_cmmt.find_all(\"span\")\n for comment in comments:\n if comment.text.replace(\" \", \"\") == \"\" or comment.text[0]==\"[\":\n print(\"contineudasdf\")\n continue\n comment_sub_list = []\n # splitter\n index_of_coln = comment.text.find(\":\")\n mod_comment = comment.text.replace(\"and\",\"/\")\n index_slash = mod_comment.find(\"/\")\n if index_slash > 0:\n comment_sub_list.append(mod_comment[index_of_coln+1:index_slash])\n comment_sub_list.append(mod_comment[index_slash+1:])\n else:\n if index_of_coln != -1:\n comment_sub_list.append(mod_comment[index_of_coln+1:])\n else:\n comment_sub_list.append(mod_comment)\n \n #sets main theme\n if index_of_coln != -1:\n main_theme = fuzzy_best_match(comment.text[0:index_of_coln], main_theme_list, sim_value_in)\n else:\n # print(comment.text)\n main_theme = fuzzy_best_match(comment.text, main_theme_list, sim_value_in-.05)\n\n for formated_comment in comment_sub_list:\n # main_theme = fuzzy_best_match(formated_comment, main_theme_list)\n # print(formated_comment)\n sub_theme = fuzzy_best_match(formated_comment, sub_code_list, sim_value_in)\n if sub_theme.replace(\" \",\"\") == \"\":\n continue\n #checks if the sub theme already exists\n sub_theme_in_list = False \n for key, value in theme_dict.items():\n for value_sub_theme in value:\n if SubTheme(sub_theme) == value_sub_theme:\n main_theme = key\n sub_theme_in_list = True\n\n if sub_theme_in_list == False: \n #checks if the main theme already exists\n theme_in_list = False\n for key, value in theme_dict.items():\n if main_theme == key:\n theme_in_list = True\n \n if theme_in_list == False:\n theme_dict[main_theme] = []\n \n #adds the appropriate \n theme_dict[main_theme].append(SubTheme(sub_theme))\n index_of_curr = theme_dict[main_theme].index(SubTheme(sub_theme))\n\n theme_dict[main_theme][index_of_curr].add_cmmts(replace_entities(single_orig_text),file_in.filename)\n return theme_dict\n\ndef fuzzy_best_match(cmmt, list_in, sim_value_in):\n sim_value = sim_value_in\n largest_sim_val = 0\n\n best_match = \"\"\n for code in list_in:\n single_code = code.split('|')[0]\n if single_code.lower().replace(' ','') == cmmt[0:].lower().replace(' ',''):\n return str(single_code)\n curr_sim_val = fuzzy_finder(code, cmmt)\n if curr_sim_val > largest_sim_val:\n largest_sim_val = curr_sim_val\n best_match = str(single_code)\n \n if largest_sim_val >= sim_value:\n return best_match\n else:\n return cmmt\n\n\ndef fuzzy_finder(needle_in, hay_in):\n needle = needle_in\n hay = hay_in\n needles = needle.split('|')\n \n overall_max_sim_val = 0\n\n for nddle in needles:\n needle_length = len(nddle.split())\n max_sim_val = 0\n max_sim_string = u\"\"\n\n for ngram in ngrams(hay.split(), needle_length + int(.2*needle_length)):\n hay_ngram = u\" \".join(ngram)\n similarity = SM(None, hay_ngram, nddle).ratio() \n if similarity > max_sim_val:\n max_sim_val = similarity\n max_sim_string = hay_ngram\n\n if max_sim_val > overall_max_sim_val:\n overall_max_sim_val = max_sim_val\n return overall_max_sim_val\n\ndef start(files_in, thresh_val):\n initialized = False\n\n sim_value = thresh_val\n\n for curr_file in files_in:\n if initialized == False:\n initialized_list=add_code_from_txt()\n result_list = store_data(curr_file, sim_value,initialized_list )\n initialized = True\n\n return result_list\n\n# for the table\nclass ItemTable(Table):\n m_theme = Col('Main Theme')\n sub_theme = Col('Sub Theme')\n count = Col('Count')\n bttn = Col('')\n\n# Get some objects\nclass Item(object):\n def __init__(self, m_theme, sub_theme,count, list_of_cmmts, bttn):\n self.m_theme = m_theme\n self.sub_theme = sub_theme\n self.count = count\n self.list_of_cmmts = list_of_cmmts\n self.bttn = bttn\n\ndef make_table(result_list):\n items = []\n theme_dict = result_list\n\n for key, value in theme_dict.items():\n for sub_theme in value:\n items.append(Item(key, sub_theme.theme, len(sub_theme.comments), sub_theme.comments, '<button type=\"button\" data-toggle=\"collapse\" data-target=\"#demo\" class=\"accordion-toggle btn btn-default\">Comments</button>'))\n \n # Populate the table\n table = ItemTable(items)\n\n table_html = str(table.__html__().replace(\"<table>\",'<table class=\"table\">'))\n # print(table_html)\n table_html = replace_entities(table_html)\n\n counter1 = count(1)\n table_html = re.sub('data-target=\"#demo', lambda m: m.group() + str(next(counter1)), table_html)\n \n table_html = table_html.replace(\"</td></tr>\", '</td></tr> <tr> <td colspan=\"6\" class=\"hiddenRow\"style=\"padding:0!important;\"><div class=\"accordian-body collapse\" id=\"demo\"> <ul class=\"list-group\"> [cmmt] </ul> </div></td></tr>')\n counter2 = count(1)\n table_html = re.sub('id=\"demo', lambda m: m.group() + str(next(counter2)), table_html)\n for key, value in theme_dict.items():\n for sub_theme in value:\n table_html = table_html.replace('[cmmt]', get_cmmts(sub_theme.theme, theme_dict),1)\n g.theme_dict = result_list\n\n return table_html\n\ndef get_cmmts(sub_theme_in, theme_dict_in):\n theme_dict = theme_dict_in\n\n result_str = \"\"\n for key, value in theme_dict.items():\n for sub_theme in value:\n if(sub_theme == SubTheme(sub_theme_in)):\n for ind_cmmt in sub_theme.comments:\n \n result_str += '<li class=\"list-group-item\">'+'<b>' + ind_cmmt.file_name + '</b>' + '<br>' + ind_cmmt.comment+\"</li>\"\n return result_str\n\n\ndef get_subtheme_list():\n theme_dict = g.theme_dict \n result_list = []\n\n for key, value in theme_dict.items():\n for sub_theme in value:\n if(sub_theme == SubTheme(sub_theme_in)):\n return sub_theme.comments\n return []\n\n", "id": "1032884", "language": "Python", "matching_score": 3.9337337017059326, "max_stars_count": 0, "path": "project/counter.py" }, { "content": "import os\n\nfrom flask import Flask, request, render_template, redirect, url_for, session, send_file\nfrom werkzeug.utils import secure_filename\n\nfrom bs4 import BeautifulSoup\nimport re\n\nfrom .counter import start, make_table, get_cmmts, Comment, SubTheme\n\nfrom flask_bootstrap import Bootstrap\n\nimport jsonpickle\nfrom flask import g\n\nimport keyring\n\n#global vars\napp = Flask(__name__) \nBootstrap(app)\n\nis_prod = os.environ.get('IS_HEROKU', None)\n\nif is_prod:\n auth_pass = os.environ.get('auth_pass')\nelse:\n auth_pass = \"<PASSWORD>\"\n\napp.secret_key = 'sads9f8b378asbfas9ah'\napp.config['UPLOAD_PATH'] = '../project'\n\nALLOWED_EXTENSIONS = set(['txt'])\n\ndef allowed_file(filename):\n\treturn '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\[email protected]('/download')\ndef downloadFile ():\n #For windows you need to use drive name [ex: F:/Example.pdf]\n path = \"../project/code_chart.txt\"\n return send_file(path, as_attachment=True)\n\[email protected]('/codebook-rules')\ndef sendRules ():\n return render_template('rules.html')\n\[email protected](\"/\", methods=[\"POST\"])\ndef upload():\n if request.form.get('submit_chart') ==\"Submit File\":\n # check if the post request has the file part\n if 'file' not in request.files:\n print('No file part')\n return redirect(request.url)\n file = request.files['file']\n if file.filename == '':\n print('No file selected for uploading')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n # if os.path.exists('../project/code_chart.txt'):\n # os.remove('../project/code_chart.txt')\n filename = \"code_chart.txt\"\n my_path = os.path.abspath(os.path.dirname(__file__))\n rel_path = os.path.join(my_path, app.config['UPLOAD_PATH'])\n file.save(os.path.join(rel_path, filename))\n print('File successfully uploaded')\n return redirect('/')\n else:\n print('Allowed file type is txt')\n return redirect(request.url)\n\n if request.form.get('submit_t') ==\"enter\":\n processed_text = request.form['text']\n print(processed_text)\n try:\n session['thresh_val'] = float(processed_text)\n if session['thresh_val'] > 1:\n session['thresh_val'] = 1.0\n except:\n session['thresh_val'] = .65\n return render_template('index.html',t_val = session['thresh_val'], auth_code = auth_pass)\n elif request.form.get('submit_f') ==\"Submit Files\":\n if 'thresh_val' not in session:\n session['thresh_val'] = .65\n\n g.theme_dict = dict()\n g.main_theme_list = []\n g.sub_code_list = []\n uploaded_files = request.files.getlist(\"file[]\")\n print (uploaded_files)\n try:\n result_list = start(uploaded_files,session['thresh_val'])\n table_html = make_table(result_list)\n return render_template('index.html', table = table_html, t_val = session['thresh_val'], auth_code = auth_pass)\n except:\n return render_template('index.html', table = \"error, prob a bad codebook\", t_val = session['thresh_val'], auth_code = auth_pass)\n\[email protected]('/')\ndef my_form():\n session.clear()\n print(\"sleared session\")\n try:\n return render_template('index.html',t_val = session['thresh_val'], auth_code = auth_pass)\n except:\n session['thresh_val'] = .65\n return render_template('index.html',t_val = .65, auth_code =auth_pass )\n\n\nif __name__ == '__main__':\n app.run()", "id": "10800052", "language": "Python", "matching_score": 3.1594741344451904, "max_stars_count": 0, "path": "project/app.py" }, { "content": "from flask import Flask, request, render_template, redirect, url_for, session, send_file\nfrom flask_bootstrap import Bootstrap\nfrom multiTracker import begin_tracking\n\t\t\napp = Flask(__name__)\n\t\t\nbootstrap = Bootstrap(app)\n\n'''\[email protected]('/')\ndef index():\n return 'Hello Flask!'\n'''\n\[email protected]('/')\ndef main_page():\n return render_template('Test.html')\n\[email protected]('/', methods=[\"POST\"])\ndef record():\n if request.form.get('record') == \"Record\":\n print(\"starting\")\n html_table = begin_tracking()\n # dataSum = get_()\n return render_template('Test.html', table = html_table)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "id": "3507568", "language": "Python", "matching_score": 0.44593650102615356, "max_stars_count": 0, "path": "mainapp.py" }, { "content": "import json\n\nimport requests\n\nfrom tinder_api.utils import config\n\n\ndef get(url):\n full_url = config.host + url\n r = requests.get(full_url, headers=config.headers)\n return r.json()\n\ndef post(url, p_data):\n full_url = config.host + url\n r = requests.post(full_url, headers=config.headers,\n data=json.dumps(p_data))\n return r.json()\n\ndef delete(url):\n full_url = config.host + url\n r = requests.delete(full_url, headers=config.headers)\n return r\n\ndef put(url, p_data):\n full_url = config.host + url\n r = requests.put(full_url, headers=config.headers,\n data=json.dumps(p_data))\n return r\n\n\nif __name__ == '__main__':\n pass\n", "id": "11052128", "language": "Python", "matching_score": 0.7591782212257385, "max_stars_count": 0, "path": "tinder_api/utils/request_handlers.py" }, { "content": "import praw\nimport os\nimport time\nfrom time import sleep\nimport requests\nfrom time import sleep, strftime, gmtime\nfrom random import randint\n\npost_params = { 'bot_id' : 'your_bot_id', 'text': \"starting bot reddit frugal bot\" }\nrequests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n \nreddit = praw.Reddit(user_agent='your agent name',\n client_id='your client id', client_secret=\"your client secret\")\n# initilizes the time the bot starts running\nstart_time = time.time()\nclient_error = False\ntime_list = []\nwhile True:\n try:\n for post in reddit.subreddit('frugalmalefashion').new(limit=5):\n # Sends message if reddit was down and has come back up\n if client_error == True:\n post_params = { 'bot_id' : 'your_bot_id', 'text': \"Reddit is back online\" }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n client_error = False\n # prints only the new posts \n if post.created_utc > start_time and post.created_utc not in time_list:\n post_params = { 'bot_id' : 'your_bot_id', 'text': post.title +\": https://www.reddit.com\" + post.permalink }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n time_list.append(post.created_utc)\n # sends an error message when reddit is down\n except:\n if client_error == False:\n post_params = { 'bot_id' : 'your_bot_id', 'text': \"client error\" }\n requests.post('https://api.groupme.com/v3/bots/post', params = post_params)\n client_error = True\n sleep(150)\n\n sleep(randint(60, 140))\n", "id": "977103", "language": "Python", "matching_score": 0.637498140335083, "max_stars_count": 0, "path": "fmf-subreddit.py" }, { "content": "import tinder_api.session\nimport itertools\nfrom datetime import datetime\nfrom datetime import datetime\nimport time\n\n\nsess = tinder_api.session.Session() # inits the session\n\nprint(\"Starting Bot...\")\nwhile(1):\n current_time = datetime.now().strftime(\"%I:%M %p\")\n sess.update_profile(bio=f\"It is {current_time} and you're on Tinder instead of being with me\")\n time.sleep(30) \n\n\n", "id": "6123628", "language": "Python", "matching_score": 0.16668003797531128, "max_stars_count": 0, "path": "bio_update.py" }, { "content": "import requests\nimport json\n\nbase_url = \"https://api.gotinder.com/v2/auth/\"\nheaders = {\n \"User-Agent\" : \"Tinder/11.4.0 (iPhone; iOS 12.4.1; Scale/2.00)\",\n \"Content-Type\" : \"application/json\"\n}\n\ndef send_number():\n phone_number = str(input(\"Enter your phone number in form 3211234567: \"))\n phone_number = \"1+\"+phone_number\n url = base_url+\"sms/send?auth_type=sms\"\n data = {\"phone_number\": phone_number}\n resp = requests.request(\"POST\", url,\n data=json.dumps(data), headers=headers)\n return phone_number\n\ndef validate_code(phone_number):\n url = base_url+\"sms/validate?auth_type=sms\"\n otp_code = input(f\"Enter the tinder code sent to {phone_number}: \")\n data = {\n \"otp_code\": otp_code,\n \"phone_number\": phone_number\n }\n resp = requests.request(\"POST\", url,\n data=json.dumps(data), headers=headers)\n return resp.json()['data']['refresh_token']\n\ndef save_token():\n phone_number = send_number()\n refresh_token = validate_code(phone_number)\n url = base_url+\"/login/sms\"\n data = {\"client_version\" : \"11.4.0\", \"refresh_token\": refresh_token}\n resp = requests.request(\"POST\", url,\n data=json.dumps(data), headers=headers)\n api_token = resp.json()['data']['api_token']\n with open('token.txt', 'w') as f:\n f.write(api_token)\n print(api_token)\n\nif __name__ == \"__main__\":\n save_token()\n", "id": "9735391", "language": "Python", "matching_score": 2.5902159214019775, "max_stars_count": 0, "path": "tinder_api/utils/sms_auth.py" }, { "content": "with open(\"tinder_api/utils/token.txt\", \"r\") as f:\n tinder_token = f.read()\n\n# it is best for you to write in the token to save yourself the file I/O\n# especially if you have python byte code off\n#tinder_token = \"\"\n\nheaders = {\n 'app_version': '6.9.4',\n 'platform': 'ios',\n 'content-type': 'application/json',\n 'User-agent': 'Tinder/7.5.3 (iPohone; iOS 10.3.2; Scale/2.00)',\n 'X-Auth-Token': 'enter_auth_token',\n}\n\nhost = 'https://api.gotinder.com'\n\nif __name__ == '__main__':\n pass\n", "id": "130695", "language": "Python", "matching_score": 0.7882116436958313, "max_stars_count": 0, "path": "tinder_api/utils/config.py" }, { "content": "from tinder_api import session\nfrom tinder_api.utils import request_handlers as r\nfrom tinder_api.utils.wrapper import JsonWrapper\n\nimport json\nimport datetime\nimport dateutil.parser\n\nclass UserModel():\n def __init__(self, uid, name, bio, age, birth_date, photos,\n gender, distance, job_name, job_title, school_name,\n school_id, ping_time, top_song, instagram_photos):\n self.id = uid\n self.name = name\n self.bio = bio\n self.age = age\n self.birth_date = birth_date\n self.photos = photos\n self.gender = gender\n self.distance = distance\n self.job_name = job_name\n self.job_title = job_title\n self.school_name = school_name\n self.school_id = school_id\n self.ping_time = ping_time\n self.top_song = top_song\n self.instagram_photos = instagram_photos\n\n def report(self, cause, text=''):\n \"\"\"Reports the user\n Cause:\n 0 : 'other' requires text\n 1 : 'spam'\n 4 : 'inappropriate photos'\n \"\"\"\n resp = r.post('/report/{}'.format(uid),\n {\"cause\": cause, \"text\": text})\n return resp\n\nclass NormalUser(UserModel):\n def __init__(self, uid, name, bio, age, birth_date, photos, gender,\n distance, job_name, job_title, school_name, school_id,\n ping_time, top_song, instagram_photos):\n super().__init__(uid, name, bio, age, birth_date, photos, gender,\n distance, job_name, job_title, school_name, school_id,\n ping_time, top_song, instagram_photos)\n\n def like(self):\n \"\"\"Likes (swipes right) the user\"\"\"\n resp = r.get('/like/{}'.format(self.id))\n return resp['match']\n\n def super_like(self):\n \"\"\"Super likes (swipes up) the user\"\"\"\n resp = r.post('/like/{}/super'.format(self.id), {})\n return resp['match']\n\n def dislike(self):\n \"\"\"Dislikes (swipes left) the user\"\"\"\n resp = r.post('/pass/{}'.format(self.id))\n return 'passed'\n\n\nclass MatchUser(UserModel):\n def __init__(self, uid, match_id, name, bio, age, birth_date, photos, gender,\n distance, job_name, job_title, school_name, school_id,\n ping_time, top_song, instagram_photos):\n super().__init__(uid, name, bio, age, birth_date, photos, gender,\n distance, job_name, job_title, school_name, school_id,\n ping_time, top_song, instagram_photos)\n\n self.match_id = match_id\n self.match_data = self.get_match_data()\n\n def get_match_data(self):\n \"\"\"Returns a [] of match data\"\"\"\n return [x for x in session.Session().list_matches() if x['_id'] == self.match_id][0]\n\n def message(self, body):\n \"\"\"Messages the user\"\"\"\n resp = r.post('/user/matches/{}'.format(self.match_id),\n {\"message\": str(body)})\n return resp['sent_date']\n\n def get_messages(self):\n \"\"\"Constructs a Message() object for each message\"\"\"\n return [Message(x, self.id, self.name) for x in self.match_data['messages']]\n\nclass Message():\n def __init__(self, data, uid, name):\n self.message_id = data['_id']\n self.data = data\n self.sent = dateutil.parser.parse(data['sent_date'])\n self.body = data['message']\n if data['from'] == uid:\n self.sender = name\n else:\n self.sender = \"Me\"\n if data['to'] == uid:\n self.to = name\n else:\n self.to = \"Me\"\n\n def like_message(self):\n \"\"\"Likes a message\"\"\"\n resp = r.post('/message/{}/like'.format(self.message_id), {})\n if 'error' in resp:\n return \"Error, unable to like message\"\n return resp\n\n def unlike_message(self):\n \"\"\"Unlikes a message\"\"\"\n resp = r.delete('/message/{}/like'.format(self.message_id))\n if resp.status_code == 204:\n return resp\n return \"Error, unable to unlike the message\"\n\n def is_liked(self):\n \"\"\"Returns True if the messages is liked, otherwise False\"\"\"\n liked_messages = [x['message_id'] for x in session.Session().get_updates()['liked_messages']]\n for mess_id in liked_messages:\n if mess_id == self.message_id:\n return True\n return False\n\n def __unicode__(self):\n return self.body\n\n def __str__(self):\n return self.body.encode('utf-8')\n\n def __repr__(self):\n return repr(self.body)\n\n\n\nclass UserController:\n def __init__(self, uid):\n self.id = uid\n self.me_id = session.Session().get_id()\n self.user_type = self._decode_user_type()\n self.data = self.get_data()\n self.const = JsonWrapper(self.data, iter_keys_only=False)\n\n def get_data(self):\n \"\"\"Returns the data of the user\"\"\"\n if self.user_type is 'Me':\n data = r.get('/profile')\n return data\n else:\n data = r.get('/user/{}'.format(self.id))\n if 'error' in data:\n print('Error user was not found')\n return data['results']\n\n def _decode_user_type(self):\n \"\"\"Returns the user_type (Me, Match, Normal) based on uid\"\"\"\n if self.me_id == self.id:\n return 'Me'\n elif self.me_id in self.id:\n self.match_id = self.id\n self.id = self.id.replace(self.me_id, '')\n return 'Match'\n else:\n return 'Normal'\n\n def get_user(self):\n \"\"\"Constructs the correct User Object based on user_type\"\"\"\n name = self.const.name\n bio = self.const.bio\n birth_date = self._decode_birth_date()\n age = self._decode_age()\n photos = [photo.url for photo in self.const.photos]\n gender = self._decode_gender()\n distance = self._decode_distance()\n job_name = self.const.jobs[0].company.name\n job_title = self.const.jobs[0].title.name\n school_name = self.const.schools[0].name\n school_id = self.const.schools[0].id\n ping_time = self.const.ping_time\n top_song = self._decode_theme_song()\n instagram_photos = [photo.image for photo in self.const.instagram.photos]\n if self.user_type is 'Normal':\n return NormalUser(self.id, name, bio, age, birth_date, photos, gender,\n distance, job_name, job_title, school_name, school_id,\n ping_time, top_song, instagram_photos)\n elif self.user_type is 'Match':\n return MatchUser(self.id, self.match_id, name, bio, age, birth_date, photos, gender,\n distance, job_name, job_title, school_name, school_id,\n ping_time, top_song, instagram_photos)\n elif self.user_type is 'Me':\n return UserModel(self.id, name, bio, age, birth_date, photos, gender,\n distance, job_name, job_title, school_name, school_id,\n ping_time, top_song, instagram_photos)\n\n def _decode_birth_date(self):\n \"\"\"Returns the human readable birth_date\"\"\"\n return dateutil.parser.parse(self.const.birth_date)\n\n def _decode_age(self):\n \"\"\"Returns age as an int\"\"\"\n today = datetime.date.today()\n return (today.year - self._decode_birth_date().year -\n ((today.month, today.day) <\n (self._decode_birth_date().month,\n self._decode_birth_date().day)))\n\n def _decode_gender(self):\n \"\"\"Converts gender to a human readable format\"\"\"\n gender = self.const.gender\n if gender is 1:\n return 'female'\n elif gender is 0:\n return 'male'\n\n def _decode_distance(self):\n \"\"\"Returns distance in miles\"\"\"\n if 'distance_mi' in self.data:\n return self.const.distance_mi\n elif 'distance_km' in self.data:\n return self.const.distance_km * 0.621371\n\n def _decode_jobs(self):\n \"\"\"Returns a [] of job names\"\"\"\n return [job.company.name for job in self.const.jobs]\n\n def _decode_theme_song(self):\n \"\"\"Returns a {name, id, artist} of the user's spotify theme\"\"\"\n theme_s = self.const.spotify_theme_track\n return {'name': theme_s.name,\n 'id': theme_s.id,\n 'artist': theme_s.artists[0].name}\n\nif __name__ == '__main__':\n pass\n", "id": "5123349", "language": "Python", "matching_score": 2.3400914669036865, "max_stars_count": 0, "path": "tinder_api/user.py" }, { "content": "import json\nimport pprint\n\n#old version of the wrapper -- thanks to https://github.com/sharkbound\n\nclass MissingValue:\n __slots__ = ()\n VALUE = '<MissingValue>'\n\n def __getattr__(self, item):\n return self\n\n def __getitem__(self, item):\n return self\n\n def __iter__(self):\n yield from ()\n\n def __contains__(self, item):\n return False\n\n def __bool__(self):\n return False\n\n def __repr__(self):\n return self.VALUE\n\n def __str__(self):\n return self.VALUE\n\n\nMISSING_VALUE = MissingValue()\n\n\nclass JsonWrapper:\n def __init__(self, data, iter_keys_only=False):\n \"\"\"\n :param data: the JSON data to wrap, can be a list, tuple, or dict\n :param iter_keys_only:\n sets the behavior when iterating over the wrapper when it contains a dict.\n False means that it will iterate over dict KEY/VALUE pairs,\n aka {'name': '<NAME>'} would iterate as: ('name', '<NAME>');\n True means the it will only iterate over dict KEYS,\n aka {'name': '<NAME>'} would iterate as: ('name')\n \"\"\"\n self.iter_keys_only = iter_keys_only\n self._data = data\n\n def pretty(self):\n \"\"\"\n returns a pretty printed version of the data this wrapper holds\n \"\"\"\n return pprint.pformat(self._data)\n\n def _wrap(self, key, value, no_key=False):\n if value is not MISSING_VALUE and isinstance(value, (dict, list, tuple)):\n value = self.__class__(value, iter_keys_only=self.iter_keys_only)\n if not no_key:\n self._data[key] = value\n return value\n\n def _get_value(self, item):\n value = None\n if isinstance(self._data, dict):\n value = self._data.get(item)\n elif (isinstance(self._data, (list, tuple)) and\n isinstance(item, int) and\n -len(self._data) <= item < len(self._data)):\n value = self._data[item]\n return self._wrap(item, value if value is not None else MISSING_VALUE)\n\n def __getattr__(self, item):\n if item in self.__dict__:\n return self.__dict__[item]\n\n return self._get_value(item)\n\n def __getitem__(self, item):\n return self._get_value(item)\n\n def __contains__(self, item):\n if isinstance(item, int) and isinstance(self._data, (list, tuple)):\n l = len(self._data)\n return -l <= item < l\n\n return item in self._data\n\n def __bool__(self):\n return bool(self._data)\n\n def __iter__(self):\n if isinstance(self._data, dict):\n yield from (self._data if self.iter_keys_only else self._data.items())\n elif isinstance(self._data, (list, tuple)):\n yield from (self._wrap(None, v, no_key=True) for v in self._data)\n else:\n try:\n yield from self._data\n except TypeError:\n yield from ()\n\n def __repr__(self):\n return f'<JsonWrapper {self._data.__class__.__name__}>'\n\n def __str__(self):\n if isinstance(self._data, dict):\n return pprint.pformat(tuple(self._data))\n\n return pprint.pformat(self._data)\n\n\ndef has_value(value):\n \"\"\"\n verifies that a value is not MISSING_VALUE\n True return means it is not MISSING_VALUE\n False return means that the value was MISSING_VALUE\n :param value: the value to check if it is not MISSING_VALUE\n \"\"\"\n return value is not MISSING_VALUE\n\n# def test():\n# data = JsonWrapper({\n# 'bio': r'¯\\_(ツ)_/¯',\n# 'photos': [\n# {\n# 'url': 'url here'\n# }\n# ]\n# })\n#\n# assert data.photos[0].url == 'url here'\n# assert data.photos[100].url is MISSING_VALUE\n# assert data.bio == r'¯\\_(ツ)_/¯'\n# assert data.missing is MISSING_VALUE\n", "id": "3738705", "language": "Python", "matching_score": 0.2755444049835205, "max_stars_count": 0, "path": "tinder_api/utils/wrapper.py" }, { "content": "\ndef cli():\n print('This is suroegin's package - sort')\n", "id": "1960818", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "myvirtualenv/lib/python3.7/site-packages/sort.py" }, { "content": "from datetime import datetime\n\n\nclass DeltaTimer:\n def __init__(self):\n self.marked = 0\n\n def mark(self):\n self.marked = datetime.now()\n \n def dt(self):\n print(self.marked - datetime.now())\n", "id": "9516931", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tinder_api/utils/delta.py" } ]
1.80133
ehoehn
[ { "content": "def kristallviolett(): #indicator band: 1605 - 1630\n return [110, 298, 294, 504, 711, 890, 1043, 1086, 1247, 1319, 1426, 1510, 1566, 1685, 1873]\n\ndef methylbenzenethiol3(): #3-Methylbenzenethiol #indicator band: 979 - 1018\n return [979, 1018, 1048, 1122, 824, 877, 1201, 1227]\n\ndef brilliantgreen(): #indicator band: 1605 - 1654\n return [190, 286, 386, 483, 1142, 1202, 1257, 1317, 1406, 1527, 1558, 1651]\n\ndef kristallviolett_al_Raja(): #indicator band: 1152 - 1215\n return [1093, 1240]\n\ndef malachitegreen(): #indicator band: 1440 - 1520\n return [1440, 1520]\n\n# def rodamine6g(): #indicator band: 1629 - 1674\n# return [1629, 1674]\n\ndef rhodamine6g(): #indicator band: 583 - 632\n return [583, 632]\n\ndef trypanblue(): #indicator band: 1546 - 1637\n return [1546, 1637]\n\ndef thiram(): #indicator band: 1338, 1440\n return [1338, 1440]\n\ndef benzene14dithiol(): #indicator band: 1017, 1111\n return [1017, 1111]\n\ndef brilliantblau(): #indicator band: 1545, 1682\n return [1545, 1682]\n\ndef vier_pyridinecarbonitrile(): #indicator band: 779, 913\n return [779, 913]\n\ndef vier_mercaptobenzoesaeure(): #indicator band: 1040, 1119\n return [1040, 1119]\n\ndef viervierstrich_dipyridyl(): #indicator band: 979, 1061\n return [979, 1061]\n\ndef PMBA_nach_Kevin(): #indicator band: 1514, 1640\n return [1514, 1640]", "id": "7236715", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/lib/analyte.py" }, { "content": "print(69)\nprint(69*0.75)\nprint(69*0.5)\nprint(69*0.25)\nprint(0)", "id": "9060704", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/silly calculations.py" }, { "content": "import serial\nimport serial.tools.list_ports\nimport regex as re\nimport io\nimport sys\nimport datetime, time\n\n\nser = serial.Serial()\n# print(ser)\n\n#ser.baudrate = 19200\nser.baudrate = 115200\n\n\nfo = open(\"foo.txt\", \"w\", encoding='utf-8')\n\nfor i in serial.tools.list_ports.comports():\n print(i)\n# for j in serial.tools.list_ports.grep('.'):\n# print(j)\n\nser.port = 'COM6'\n\nprint(ser)\n# with ser as open:\n# print(ser.open())\nser.open()\nprint(ser.is_open)\n\ntry:\n print(ser.read(100))\nexcept:\n ser.close()\n# print(ser.read())\n# ser.close()\n\n# sio = io.TextIOWrapper(io.BufferedRWPair(ser, ser))\n\n\n\n\n# https://stackoverflow.com/questions/13890935/does-pythons-time-time-return-the-local-or-utc-timestamp\n# https://stackoverflow.com/questions/54438914/adding-a-duration-to-a-start-time-string-to-get-a-finish-time\n# print(int.from_bytes(b'\\x11', byteorder=sys.byteorder))#\n\n# duration = input('Bitte Messdauer eingeben [h:mm:ss] ')\n# if duration == '':\n# duration = '0:00:02'\n# else:\n# duration = int(duration)\n#\n# ts = datetime.datetime.now()\n#\n# for i in range(1000):\n# i = ser.read()\n# i = int.from_bytes(i, byteorder=sys.byteorder)\n# tf = datetime.datetime.now()\n# te = tf - ts\n# #print(str(te).split('.'))\n# if str(te).split('.')[0] == str(duration):\n# break\n# fo.write(str(te) + ';' + str(i) + '\\n')\nfo.close()\n\n\n\n\n", "id": "6799057", "language": "Python", "matching_score": 0.7156352400779724, "max_stars_count": 0, "path": "serialsss.py" }, { "content": "import os\nimport numpy as np\nimport pandas as pd\nfrom datetime import time, timedelta, datetime\n#import datetime, time\n\n\ndef liste_in_string_umwandeln(input):\n ft = []\n for i in input:\n k = np.str(i)\n ft.append(k)\n return ft\n\n\ndef separate_the_two_columns(fd):\n timestamp = []\n intensity = []\n cnt = 0\n for l in fd:\n # print(l)\n if cnt % 2 == 0:\n timestamp.append(l.strip()[0:-1])\n if cnt % 2 == 1:\n intensity.append(l.strip())\n cnt += 1\n #print(len(timestamp))\n #print(len(intensity))\n return timestamp, intensity\n\n\ndef split_the_merged_stuff(intensity):\n for m in range(len(intensity)):\n # print(len(i))\n if len(intensity[m]) > 3:\n # print(intensity[m])\n subs = []\n for i in range(0, len(intensity[m]), 3):\n subs.append(intensity[m][i:i + 3])\n #print(subs)\n intensity[m] = subs\n #print(intensity)\n return subs, intensity\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.log') and dateiname.startswith('output_2019-02-04_16-04-53Einspritzen'):\n print(dateiname)\n with open(dateiname) as fd:\n timestamp, intensity = separate_the_two_columns(fd)\n\n # subs, intensity = split_the_merged_stuff(intensity)\n\n\n\n # print(timestamp)\n # print(len(timestamp), len(intensity))\n\n newtimestamp = []\n newintensity = []\n #print(newtimestamp)\n\n for ti in range(len(timestamp)-1, -1, -1):\n # print(ti)\n #print(timestamp[ti])\n # print(intensity[ti])\n if isinstance(intensity[ti], list):\n #print(True)\n #print(len(intensity[ti]))\n for k in range(len(intensity[ti])-1, -1, -1):\n # print(timestamp[ti], intensity[ti][k])\n newtimestamp.insert(0, timestamp[ti])\n newintensity.insert(0, intensity[ti][k])\n else:\n #print(timestamp[ti], intensity[ti])\n newtimestamp.insert(0, timestamp[ti])\n newintensity.insert(0, intensity[ti])\n\n #print(newtimestamp)\n # print(newintensity)\n #tim = pd.to_datetime(newtimestamp)\n #print(tim)\n #periodenindex = pd.PeriodIndex(tim, freq='ms')\n #print(periodenindex)\n # print(isinstance(newtimestamp[0], str))\n # print(newtimestamp[0])\n # t = time()\n #t = time.strptime(str(newtimestamp[0]), '%H:%M:%S.%f')\n #t = time.strptime('02:02:03.574', '%H:%M:%S.%f')\n\n #s=time()\n # s.fromisoformat(newtimestamp[1])\n # delta=t-s\n # print(delta)\n # timdelta = pd.to_timedelta(newtimestamp)\n # DateTime(newtimestamp[0])\n # print(pd.to_datetime(newtimestamp[1]) - pd.to_datetime(newtimestamp[0]))\n # print(timdelta[1] + newtimestamp[0])\n timee = [datetime.strptime(newtimestamp[0], '%H:%M:%S.%f') - datetime.strptime(newtimestamp[0],'%H:%M:%S.%f')]\n for n in range(len(newtimestamp)-1):\n timeelapsed = datetime.strptime(newtimestamp[n+1], '%H:%M:%S.%f') - datetime.strptime(newtimestamp[0],'%H:%M:%S.%f')\n timee.append(str(timeelapsed))\n timee = liste_in_string_umwandeln(timee)\n # print(timeelapsed)\n print(timee)\n\n # print(str(datetime.strptime(newtimestamp[1], '%H:%M:%S.%f')) + ' - ' + str(datetime.strptime(newtimestamp[0], '%H:%M:%S.%f')))\n\n #print(datetime.strptime(newtimestamp[1], '%H:%M:%S.%f') - datetime.strptime(newtimestamp[0], '%H:%M:%S.%f'))\n #print(datetime.combine(datetime.strptime(newtimestamp[1], '%H:%M:%S.%f')))\n # #- datetime.combine(datetime.strptime(newtimestamp[0], '%H:%M:%S.%f')))\n\n #timee = periodenindex.to_timestamp(freq='ms')\n #print(timee.nanosecond[0]-timee.nanosecond[5])\n #print(time[0])\n #print(pd.Period(time[0], freq='ms') - pd.Period(time[1], freq='ms'))\n # print(newintensity)\n df = pd.DataFrame(timee, columns=['time[hh:mm:ss]'])\n df2 = pd.DataFrame(newintensity, columns=['intensity[a. u.]'])\n #print(df2)\n df['intensity[a. u.]'] = df2['intensity[a. u.]']\n # df = df.groupby(df['time[hh:mm:ss]'])['intensity[a. u.]'].sum()\n # print(df)\n df.to_csv(dateiname.split('.')[0]+'_.csv', sep=';')\n\n", "id": "8532050", "language": "Python", "matching_score": 0.5554742813110352, "max_stars_count": 0, "path": "UV-Detektor/readfiles.py" }, { "content": "from lib.allgemein import liste_in_floats_umwandeln\nimport pandas as pd\nimport untangle\nfrom decimal import *\n\n\n#written by <NAME>\n\n\ndef get_xml_RecordTime_excitationwl(dateiname):\n obj = untangle.parse(dateiname)\n RecordTime = obj.XmlMain.Documents.Document['RecordTime']\n excitationwl = float(obj.XmlMain.Documents.Document.xDim.Calibration['LaserWave'])\n return RecordTime, excitationwl\n\n\ndef get_timestamps(dateiname):\n obj = untangle.parse(dateiname)\n predf = []\n for i in range(0, len(obj.XmlMain.Documents.Document.Data.Frame)):\n timestamp = obj.XmlMain.Documents.Document.Data.Frame[i]['TimeStamp']\n timestamp = Decimal(timestamp)\n predf.append(timestamp)\n posi = list(range(0, len(predf), 1))\n colunames = []\n for i in posi:\n colu = 'Frame ' + str(i + 1)\n colunames.append(colu)\n df = pd.DataFrame(predf, index=colunames, columns=['timestamp'])\n df_timestamps = df.transpose()\n return df_timestamps\n\n\ndef get_positions(dateiname):\n obj = untangle.parse(dateiname)\n predf = []\n for i in range(0,len(obj.XmlMain.Documents.Document.Data.Frame)):\n positions = obj.XmlMain.Documents.Document.Data.Frame[i]['ValuePosition']\n z = positions.split(\";\")\n ft = liste_in_floats_umwandeln(z)\n predf.append(ft)\n posi=list(range(0, len(predf),1))\n colunames = []\n for i in posi:\n colu = 'Frame ' + str(i + 1)\n colunames.append(colu)\n df = pd.DataFrame(predf, index=colunames, columns=['x [µm]','y [µm]','z [µm]'])\n df = df.transpose()\n return df\n\n\ndef get_relwavenumber(dateiname):\n obj = untangle.parse(dateiname)\n relwavenumber = obj.XmlMain.Documents.Document.xDim.Calibration['ValueArray']\n relwavenumber = relwavenumber.split('|')\n predf = liste_in_floats_umwandeln(relwavenumber)\n del predf[0]\n df1 = pd.DataFrame(predf, columns=['relWavenumber [1/cm]'])\n excitationwl = float(obj.XmlMain.Documents.Document.xDim.Calibration['LaserWave'])\n relwavenumbers = 1 / excitationwl * 10000000 - 1 / df1 * 10000000\n return relwavenumbers\n\n\ndef get_intensities(filename):\n relwavenumbers = get_relwavenumber(filename)\n obj = untangle.parse(filename)\n try:\n df = get_intensities_1Spectrum(relwavenumbers, obj)\n except:\n df = get_intensities_morethanoneSpectra(relwavenumbers, obj)\n return df\n\ndef get_intensities_morethanoneSpectra(relwavenumbers, obj):\n predf = []\n for i in range(0,len(obj.XmlMain.Documents.Document.Data.Frame)):\n inte = obj.XmlMain.Documents.Document.Data.Frame[i].cdata\n z = inte.split(\";\")\n z1 = liste_in_floats_umwandeln(z)\n predf.append(z1)\n colu = list(range(0,len(predf),1))\n colunames=[]\n for i in colu:\n colu='Frame ' + str(i+1)\n colunames.append(colu)\n df = pd.DataFrame(predf, index=colunames, columns=relwavenumbers['relWavenumber [1/cm]'])\n df = df.transpose()\n return df\n\n\ndef get_intensities_1Spectrum(relwavenumbers, obj):\n inte = obj.XmlMain.Documents.Document.Data.Frame.cdata\n z = inte.split(\";\")\n predf = liste_in_floats_umwandeln(z)\n colu = list(range(0,len(predf),1))\n colunames=[]\n for i in colu:\n colu='Frame ' + str(i+1)\n colunames.append(colu)\n df = pd.DataFrame(predf, columns=['Intensity [a. u.]'], index=relwavenumbers['relWavenumber [1/cm]'])\n return df\n\n\ndef get_times(dateiname):\n df_timestamps = get_timestamps(dateiname)\n nwtime = []\n colunames=[]\n for frame in df_timestamps:\n colunames.append(frame)\n for i in range(0, len(colunames)):\n df2 = (df_timestamps[colunames[i]]['timestamp'] - df_timestamps[colunames[0]]['timestamp']) / Decimal(10000000)\n # df2 = float(df1)\n nwtime.append(df2)\n df = pd.DataFrame(nwtime, columns=['time [s]'], index=[colunames])\n df = df.transpose()\n return df\n", "id": "7352052", "language": "Python", "matching_score": 3.327996253967285, "max_stars_count": 0, "path": "Ramanspektren/lib/xml_import.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei mit Einzelaufnahme/ Einzelspektrum\noutput file: Graph mit Spektrum: Spektrum des Frames nach Baselinekorrektur\n\nVersion für Anna mit allen Funktionen drin\n'''\n#written by <NAME>\n\n\nimport os\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nimport untangle\nimport numpy as np\nimport pandas as pd\nimport regex as re\nfrom scipy import stats\n\n\nsuffix_for_new_filename = '_1Spectrum.html'\npunkte_baseline = [1566, 1685, 1873]\n\n\n\ndef split_in_frames_and_back(df, spectrum_values):\n copy_df = df.copy()\n predf=[]\n for frame in copy_df:\n framenummer = frame\n nframe = split_in_baselines(copy_df[frame], spectrum_values, framenummer)\n predf.append(nframe)\n ndf = pd.DataFrame(predf)\n ndf = ndf.transpose()\n return ndf\n\n\ndef split_in_baselines(frame, spectrum_values, framenummer):\n copy_frame = frame.copy()\n for i in range(0, len(spectrum_values) - 1):\n if i == 0:\n points = [spectrum_values[i], spectrum_values[i + 1]]\n kurvenabschnitt = copy_frame.ix[points[0]:points[1]]\n gefittet = fitten(kurvenabschnitt, spectrum_values, i, framenummer)\n a = gefittet.ix[spectrum_values[i]:spectrum_values[i + 1]]\n else:\n points = [spectrum_values[i], spectrum_values[i + 1]]\n kurvenabschnitt = copy_frame.ix[points[0]:points[1]]\n gefittet = fitten(kurvenabschnitt, spectrum_values, i, framenummer)\n b = gefittet.ix[spectrum_values[i] + 1:spectrum_values[i + 1]]\n a = a.append(b)\n nframe = copy_frame - copy_frame + a\n nframe = nframe.fillna(0)\n return nframe\n\n\ndef fitten(kurvenabschnitt, spectrum_values, i, framenummer): #, [spectrum_values[i]: spectrum_values[i + 1]]):\n copy_kurvenabschnitt = kurvenabschnitt.copy()\n dataset = copy_kurvenabschnitt.ix[[spectrum_values[i], spectrum_values[i + 1]]]\n x = dataset.index.values.tolist()\n y = dataset.values.tolist()\n m, t = lin_fit(x, y)\n x_bl = copy_kurvenabschnitt\n zwischenDF = pd.DataFrame(x_bl)\n zwischenDF[framenummer] = (copy_kurvenabschnitt.index * m) + t\n zwischenDF = pd.Series(zwischenDF[framenummer])\n return zwischenDF\n\n\ndef lin_fit(x, y, xlab=None, ylab=None, **kwargs):\n \"\"\"Fit a set of data with stats.lingress and plot it.\"\"\"\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n ''' #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html\n slope : float\n slope of the regression line\n intercept : float\n intercept of the regression line\n rvalue : float\n correlation coefficient\n pvalue : float\n two-sided p-value for a hypothesis test whose null hypothesis is that the slope is zero.\n stderr : float\n Standard error of the estimated gradient.\n '''\n return slope, intercept\n\ndef get_spectrum_values(df,punkte_baseline):\n ind = df.index.values.tolist()\n ks = []\n for i in range(0, len(punkte_baseline)):\n for k in ind:\n if re.match(str(punkte_baseline[i]) + '\\.[0-9]+', str(k)):\n ks.append(k)\n return ks\n\ndef baselinecorrection(intensities, punkte_baseline):\n df = intensities.copy()\n punkte_baseline.sort()\n spectrum_values = get_spectrum_values(df, punkte_baseline)\n dff_neue_BL = split_in_frames_and_back(df, spectrum_values)\n df_korregiert = df - dff_neue_BL\n return df_korregiert\n\ndef liste_in_floats_umwandeln(input):\n ft = []\n for i in input:\n k = np.float64(i)\n ft.append(k)\n return ft\n\ndef get_relwavenumber(dateiname):\n obj = untangle.parse(dateiname)\n relwavenumber = obj.XmlMain.Documents.Document.xDim.Calibration['ValueArray']\n relwavenumber = relwavenumber.split('|')\n predf = liste_in_floats_umwandeln(relwavenumber)\n del predf[0]\n df1 = pd.DataFrame(predf, columns=['relWavenumber [1/cm]'])\n relwavenumbers = 1 / 473 * 10000000 - 1 / df1 * 10000000\n return relwavenumbers\n\ndef get_intensities(filename):\n relwavenumbers = get_relwavenumber(filename)\n obj = untangle.parse(filename)\n try:\n df = get_intensities_1Spectrum(relwavenumbers, obj)\n except:\n df = get_intensities_morethanoneSpectra(relwavenumbers, obj)\n return df\n\ndef get_intensities_morethanoneSpectra(relwavenumbers, obj):\n predf = []\n for i in range(0,len(obj.XmlMain.Documents.Document.Data.Frame)):\n inte = obj.XmlMain.Documents.Document.Data.Frame[i].cdata\n z = inte.split(\";\")\n z1 = liste_in_floats_umwandeln(z)\n predf.append(z1)\n colu = list(range(0,len(predf),1))\n colunames=[]\n for i in colu:\n colu='Frame ' + str(i+1)\n colunames.append(colu)\n df = pd.DataFrame(predf, index=colunames, columns=relwavenumbers['relWavenumber [1/cm]'])\n df = df.transpose()\n return df\n\n\ndef get_intensities_1Spectrum(relwavenumbers, obj):\n inte = obj.XmlMain.Documents.Document.Data.Frame.cdata\n z = inte.split(\";\")\n predf = liste_in_floats_umwandeln(z)\n colu = list(range(0,len(predf),1))\n colunames=[]\n for i in colu:\n colu='Frame ' + str(i+1)\n colunames.append(colu)\n df = pd.DataFrame(predf, columns=['Intensity [a. u.]'], index=relwavenumbers['relWavenumber [1/cm]'])\n return df\n\n\ndef plotly_Spectrum_2dscatter_layout(ind, xaxis_title, yaxis_title, range_nr, dtick_nr, ausan=False, positionsangabe='', annotation_y=''):\n layout = go.Layout(\n autosize=False,\n width=800,\n height=430,\n showlegend=True,\n legend=dict(\n x=0.85, y=1,\n font=dict(family='Arial, sans-serif',\n size=16,\n color='#000000')),\n yaxis=dict(\n title='<b>' + yaxis_title + '</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF'),\n xaxis=dict(\n title='<b>' + xaxis_title + '</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial bold, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=False,\n ticks='outside',\n tick0=50,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=range_nr,\n dtick=dtick_nr),\n annotations=[dict(\n visible=ausan,\n text=positionsangabe,\n font=dict(family='Arial, sans-serif',\n size=14,\n color='#000000'),\n x=range_nr[1] - range_nr[1] / 15, y=annotation_y,\n showarrow=False,\n )])\n return layout\n\n\ndef generate_filename(dateiname, suffix_for_new_filename):\n name = dateiname.split('.')\n del name[-1]\n separator = \".\"\n nwname = separator.join(name)\n nwfile = nwname + suffix_for_new_filename\n return nwfile\n\ndef plotly_Spectrum_1Spektrum_2dscatter_data(intensities, framenumber):\n ind = intensities.index.values.tolist()\n thirdCol = intensities[framenumber].values.tolist() # trace1 = go.Scatter(\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name=framenumber)\n return [trace3], ind\n\ndef plotly_Spectrum_1Spectrum(intensities, dateiname, suffix_for_new_filename):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_Spectrum_1Spektrum_2dscatter_data(intensities, 'Intensity [a. u.]')\n layout = plotly_Spectrum_2dscatter_layout(ind, xaxis_title='rel. Wavenumber [cm<sup>-1</sup>]', yaxis_title='Intensity [a. u.]', range_nr=[50, 2000], dtick_nr=200)\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.plot(fig, filename=nwfile) #, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('nahme.tvf') or dateiname.endswith('nahme.TVF'):\n print(dateiname)\n\n #try:\n intensities = get_intensities(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n plotly_Spectrum_1Spectrum(df_korregiert, dateiname, suffix_for_new_filename)\n # except:\n # print('does not work')\n", "id": "3758435", "language": "Python", "matching_score": 6.571447849273682, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVistaEinzelaufnahme to baseline_corr Plotly_Spektrum.py" }, { "content": "import pandas as pd\nimport regex as re\nfrom scipy import stats\nfrom lib.allgemein import liste_in_floats_umwandeln\n\n\ndef baselinecorrection(intensities, punkte_baseline):\n df = intensities.copy()\n punkte_baseline.sort()\n spectrum_values = get_spectrum_values(df, punkte_baseline)\n dff_neue_BL = split_in_frames_and_back(df, spectrum_values)\n df_korregiert = df - dff_neue_BL\n return df_korregiert\n\n\ndef get_spectrum_values(df, punkte_baseline):\n ind = df.index.values.tolist()\n ks = []\n for i in range(0, len(punkte_baseline)):\n for k in ind:\n if re.match(str(punkte_baseline[i]) + '\\.[0-9]+', str(k)):\n ks.append(k)\n break\n return ks\n\n\ndef split_in_frames_and_back(df, spectrum_values):\n copy_df = df.copy()\n predf=[]\n for frame in copy_df:\n framenummer = frame\n nframe = split_in_baselines(copy_df[frame], spectrum_values, framenummer)\n predf.append(nframe)\n ndf = pd.DataFrame(predf)\n ndf = ndf.transpose()\n return ndf\n\n\ndef split_in_baselines(frame, spectrum_values, framenummer):\n copy_frame = frame.copy()\n for i in range(0, len(spectrum_values) - 1):\n if i == 0:\n points = [spectrum_values[i], spectrum_values[i + 1]]\n kurvenabschnitt = copy_frame.ix[points[0]:points[1]]\n gefittet = fitten(kurvenabschnitt, spectrum_values, i, framenummer)\n a = gefittet.ix[spectrum_values[i]:spectrum_values[i + 1]]\n else:\n points = [spectrum_values[i], spectrum_values[i + 1]]\n kurvenabschnitt = copy_frame.ix[points[0]:points[1]]\n gefittet = fitten(kurvenabschnitt, spectrum_values, i, framenummer)\n b = gefittet.iloc[1:]\n a = a.append(b)\n nframe = copy_frame - copy_frame + a\n nframe = nframe.fillna(0)\n return nframe\n\n\ndef fitten(kurvenabschnitt, spectrum_values, i, framenummer): #, [spectrum_values[i]: spectrum_values[i + 1]]):\n copy_kurvenabschnitt = kurvenabschnitt.copy()\n dataset = copy_kurvenabschnitt.ix[[spectrum_values[i], spectrum_values[i + 1]]]\n x = liste_in_floats_umwandeln(dataset.index.values.tolist())\n y = liste_in_floats_umwandeln(dataset.values.tolist())\n m, t = lin_fit(x, y)\n x_bl = liste_in_floats_umwandeln(copy_kurvenabschnitt)\n print(x_bl)\n print(copy_kurvenabschnitt.index)\n\n lllist = pd.DataFrame(copy_kurvenabschnitt.index)\n print(lllist)\n zwischenDF = pd.DataFrame(x_bl)\n zwischenDF[framenummer] = (lllist * m) + t\n zwischenDF = pd.Series(zwischenDF[framenummer])\n return zwischenDF\n\n\ndef lin_fit(x, y, xlab=None, ylab=None, **kwargs):\n \"\"\"Fit a set of data with stats.lingress and plot it.\"\"\"\n slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)\n ''' #https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html\n slope : float\n slope of the regression line\n intercept : float\n intercept of the regression line\n rvalue : float\n correlation coefficient\n pvalue : float\n two-sided p-value for a hypothesis test whose null hypothesis is that the slope is zero.\n stderr : float\n Standard error of the estimated gradient.\n '''\n return slope, intercept\n", "id": "10185938", "language": "Python", "matching_score": 3.0251882076263428, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/lib/baseline_corr_for_csv.py" }, { "content": "import regex as re\n\n\ndef get_idxfcore(df, spectrum_values):\n copy_df = df.copy()\n idxfcore = []\n for i in range(0, len(spectrum_values)):\n if i + 1 == len(spectrum_values):\n break\n else:\n points = [spectrum_values[i], spectrum_values[i + 1]]\n sp = copy_df.ix[points[0]:points[1]]\n x_bl = sp.index.values.tolist()\n for j in x_bl[:-1]:\n idxfcore.append(j)\n idxfcore.append(x_bl[-1])\n return idxfcore\n\n\ndef get_point_values(df, punkt):\n ind = df.ix[0].values.tolist()\n ind = liste_in_floats_umwandeln(ind)\n ks = []\n for i in range(0, len([punkt])):\n for k in ind:\n if re.match(str(punkt) + '\\.[0-9]+', str(k)):\n ks.append(k)\n return ks", "id": "4056075", "language": "Python", "matching_score": 0.6900181174278259, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/lib/nicht_in_benutzung.py" }, { "content": "'''\nimput file: .csv-Datei aus Osziloskop\noutput file: eine Datei mit allen Kennzahlen, die nach Glattung einer berechnet wurden\n'''\n#written by <NAME>\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport scipy.signal\n\n\nused_resistor = 1000 # in [Ohm]\n\n\ndef get_voltage(dateiname):\n nr = dateiname.split('_')\n for i in nr:\n if i.endswith('V'):\n if not i.endswith('CSV'):\n nr = i[:-1]\n nr = nr.replace(',', '.')\n voltage = float(nr)\n return voltage\n\ndef import_from_oszi_data(dateiname):\n df = pd.read_csv(dateiname, sep=',', header=0, index_col=0, skiprows=16, names=['time [s]', 'measured voltage [V]', 'leer'])\n del df['leer']\n df1 = df.replace('Null', np.nan)\n df1.dropna(axis=0, how='all', inplace=True)\n df2 = df1.apply(pd.to_numeric, errors='raise')\n return df2\n\ndef get_current(df, used_resistor):\n df_current = pd.DataFrame(df['measured voltage [V] smoothed'] / used_resistor).rename(columns={'measured voltage [V] smoothed':'current [A]'})\n df_current['current [mA]'] = df_current['current [A]'] * 1000\n df_current['current [µA]'] = df_current['current [mA]'] * 1000\n return df_current\n\ndef split_in_sections(data_to_cut, cutting_points, applyed_voltage):\n copy_data_to_cut = data_to_cut.copy()\n for i in range(0, len(cutting_points) - 1):\n if i == 0:\n points = [cutting_points[i], cutting_points[i + 1]]\n kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n a = bearbeitet.ix[cutting_points[i]:cutting_points[i + 1]]\n else:\n points = [cutting_points[i], cutting_points[i + 1]]\n kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n b = bearbeitet.ix[cutting_points[i] + 1:cutting_points[i + 1]]\n a = a.append(b)\n ndata_to_cut = copy_data_to_cut - copy_data_to_cut + a\n ndata_to_cut = ndata_to_cut.fillna(0)\n return ndata_to_cut\n\ndef bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage): # , [spectrum_values[i]: spectrum_values[i + 1]]):\n copy_kurvenabschnitt = kurvenabschnitt.copy()\n dataset = copy_kurvenabschnitt.ix[cutting_points[i] : cutting_points[i + 1]]\n dataset = applyed_voltage - abs(dataset)\n return dataset\n\ndef get_voltage_in_chip(df, applyed_voltage):\n predf_voltage = split_in_sections(df['measured voltage [V] smoothed'], [34, 145], applyed_voltage)\n df_voltage = pd.DataFrame(predf_voltage).rename(columns={'measured voltage [V] smoothed':'in chip voltage [V]'})\n return df_voltage\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('.csv') or dateiname.endswith('.CSV'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n measured_voltages = import_from_oszi_data(list_dateiname[i])\n # print(measured_voltages)\n measured_voltages['measured voltage [V] smoothed'] = scipy.signal.savgol_filter(measured_voltages, window_length=21, polyorder=1, axis=0, mode='nearest') # ggf auch polyorder=2 oder 3\n print(measured_voltages)\n measured_voltages.to_csv('OsziData_allImportantNumbersIn1.csv', sep=';')\n current = get_current(measured_voltages, used_resistor)\n #in_chip_voltage = get_voltage_in_chip(measured_voltages, get_voltage(list_dateiname[i]))\n # df_in_chip_data = in_chip_voltage.copy()\n # df_in_chip_data['current [µA]'] = current['current [µA]']\n # maxima = df_in_chip_data.apply(max, axis=0)\n # maxima['in chip resistance [Ohm]'] = maxima['in chip voltage [V]'] * 1000000 / maxima['current [µA]'] # resistance [Ohm]\n # df_maxima = (pd.DataFrame(maxima, index=maxima.index)).transpose()\n # df_a = df_maxima\n # df_a = df_a.set_index([[list_dateiname[i]]])\n # if i is not 0:\n # with open(list_dateiname[i]) as fd:\n # measured_voltages = import_from_oszi_data(list_dateiname[i])\n # measured_voltages['measured voltage [V] smoothed'] = scipy.signal.savgol_filter(measured_voltages, window_length=21, polyorder=1, axis=0, mode='nearest') # ggf auch polyorder=2 oder 3\n # current = get_current(measured_voltages, used_resistor)\n # in_chip_voltage = get_voltage_in_chip(measured_voltages, get_voltage(list_dateiname[i]))\n # df_in_chip_data = in_chip_voltage.copy()\n # df_in_chip_data['current [µA]'] = current['current [µA]']\n # maxima = df_in_chip_data.apply(max, axis=0)\n # maxima['in chip resistance [Ohm]'] = maxima['in chip voltage [V]'] * 1000000 / maxima['current [µA]'] # resistance [Ohm]\n # df_maxima = (pd.DataFrame(maxima, index=maxima.index)).transpose()\n # df_b = df_maxima\n # df_b = df_b.set_index([[list_dateiname[i]]])\n # # print(i)\n # df_a = df_a.append(df_b)\n\n# df_a.to_csv('OsziData_allImportantNumbersIn1.csv', sep=';')\n", "id": "10021935", "language": "Python", "matching_score": 5.947372913360596, "max_stars_count": 0, "path": "Oszilloskop/Oszi-Data to allImportantNumbersIn1.py" }, { "content": "'''\nimput file: .csv-Datei aus Osziloskop\noutput file: eine Datei mit allen Kennzahlen, die nach Glattung einer berechnet wurden\n'''\n#written by <NAME>\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport scipy.signal\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nfrom decimal import *\n\n# used_resistor = 1000 # in [Ohm]\n#\n# def generate_filename(dateiname, suffix_for_new_filename):\n# name = dateiname.split('.')\n# del name[-1]\n# separator = \".\"\n# nwname = separator.join(name)\n# nwfile = nwname + suffix_for_new_filename\n# return nwfile\n#\n# def get_voltage(dateiname):\n# nr = dateiname.split('_')\n# for i in nr:\n# if i.endswith('V'):\n# if not i.endswith('CSV'):\n# nr = i[:-1]\n# nr = nr.replace(',', '.')\n# voltage = float(nr)\n# return voltage\n#\n# def import_from_oszi_data(dateiname):\n# df = pd.read_csv(dateiname, sep=',', header=0, index_col=0, skiprows=16, names=['time [s]', 'measured voltage [V]', 'leer'])\n# del df['leer']\n# df1 = df.replace('Null', np.nan)\n# df1.dropna(axis=0, how='all', inplace=True)\n# df2 = df1.apply(pd.to_numeric, errors='raise')\n# return df2\n#\n# def get_current(df, used_resistor):\n# df_current = pd.DataFrame(df['measured voltage [V] smoothed'] / used_resistor).rename(columns={'measured voltage [V] smoothed':'current [A]'})\n# df_current['current [mA]'] = df_current['current [A]'] * 1000\n# df_current['current [µA]'] = df_current['current [mA]'] * 1000\n# return df_current\n#\n# def split_in_sections(data_to_cut, cutting_points, applyed_voltage):\n# copy_data_to_cut = data_to_cut.copy()\n# for i in range(0, len(cutting_points) - 1):\n# if i == 0:\n# points = [cutting_points[i], cutting_points[i + 1]]\n# kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n# bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n# a = bearbeitet.ix[cutting_points[i]:cutting_points[i + 1]]\n# else:\n# points = [cutting_points[i], cutting_points[i + 1]]\n# kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n# bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n# b = bearbeitet.ix[cutting_points[i] + 1:cutting_points[i + 1]]\n# a = a.append(b)\n# ndata_to_cut = copy_data_to_cut - copy_data_to_cut + a\n# ndata_to_cut = ndata_to_cut.fillna(0)\n# return ndata_to_cut\n#\n# def bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage): # , [spectrum_values[i]: spectrum_values[i + 1]]):\n# copy_kurvenabschnitt = kurvenabschnitt.copy()\n# dataset = copy_kurvenabschnitt.ix[cutting_points[i] : cutting_points[i + 1]]\n# dataset = applyed_voltage - abs(dataset)\n# return dataset\n#\n# def get_voltage_in_chip(df, applyed_voltage):\n# predf_voltage = split_in_sections(df['measured voltage [V] smoothed'], [34, 145], applyed_voltage)\n# df_voltage = pd.DataFrame(predf_voltage).rename(columns={'measured voltage [V] smoothed':'in chip voltage [V]'})\n# return df_voltage\n#\n# # list_dateiname = []\n# # for dateiname in os.listdir():\n# # if dateiname.endswith('.csv') or dateiname.endswith('.CSV'):\n# # print(dateiname)\n# # list_dateiname.append(dateiname)\n# # for i in range(0, len(list_dateiname)):\n# # if i == 0:\n# # with open(list_dateiname[i]) as fd:\n# # measured_voltages = import_from_oszi_data(list_dateiname[i])\n# # # print(measured_voltages)\n\n\ndef liste_in_decimals_umwandeln(input):\n ft = []\n for i in input:\n k = Decimal(i)\n ft.append(k)\n return ft\n\ndef liste_in_floats_umwandeln(input):\n ft = []\n for i in input:\n k = np.float64(i)\n ft.append(k)\n return ft\n\ndef generate_filename(dateiname, suffix_for_new_filename):\n name = dateiname.split('.')\n del name[-1]\n separator = \".\"\n nwname = separator.join(name)\n nwfile = nwname + suffix_for_new_filename\n return nwfile\n\n\ndef fit_func(x, A0, frequ, Lambda):\n return A0 * np.sin(x * 2 * np.pi * 1/frequ + Lambda)\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('180427_16_5MHz_DS0008_w21_o1_s.csv'): # or dateiname.endswith('.CSV'):\n with open(dateiname, 'r') as fd:\n print(dateiname)\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n # print(df)\n x = df.index.values.tolist()\n # print(x)\n y = df['current [µA]'].values.tolist()\n # print(y)\n # print(int(df['current [µA]'].max()))\n A = int(df['current [µA]'].max())\n # print(A)\n # x = np.array([1, 2, 3, 9])\n # y = np.array([1, 4, 1, 3])\n\n\n\n # def fit_func(x, a, b):\n # return a * x + b\n\n\n # params = curve_fit(fit_func, x, y)\n params, r = curve_fit(fit_func, x, y, p0=[1, 1000000, 0.5])\n # print(x)\n # # for i in x:\n # # print(i)\n # print(np.dtype(params[0]))\n # print(np.dtype(params[1]))\n # print(np.dtype(params[2]))\n # # # print(np.dtype(params[0]))\n # # # x = liste_in_floats_umwandeln(x)\n # print(isinstance(x[0], float))\n # print(x[1])\n # x = liste_in_decimals_umwandeln(x)\n\n # print(x)\n print(params)\n yaj = fit_func(df.index,\n A0=params[0],\n frequ=params[1],\n Lambda=params[2])\n # print(yaj)\n data = pd.DataFrame(params, index=['A0=','frequenz=','Lambda='], columns=['A0 * np.sin(x * 2 * np.pi * 1/frequ + Lambda'])\n # data.append('A0 * np.sin(x * 2 * np.pi * 1/frequ + Lambda')\n # data.append('A0=')\n # data.append(params[0])\n # data.append('frequenz=')\n # data.append(params[1])\n # data.append('Lambda=')\n # data.append(params[2])\n\n print(data)\n data.to_csv(generate_filename(dateiname, '_FitParameters.csv'), sep=';')\n\n plt.plot(x, y, 'x', x, yaj, 'r-')\n plt.show()\n\n# measured_voltages = import_from_oszi_data(dateiname)\n# measured_voltages['measured voltage [V] smoothed'] = scipy.signal.savgol_filter(measured_voltages, window_length=21, polyorder=1, axis=0, mode='nearest') # ggf auch polyorder=2 oder 3\n# print(measured_voltages)\n# #measured_voltages.to_csv('OsziData_allImportantNumbersIn1.csv', sep=';')\n# current = get_current(measured_voltages, used_resistor)\n# print(current)\n# measured_voltages['current [µA]'] = current['current [µA]']\n# print(measured_voltages)\n# measured_voltages.to_csv(generate_filename(dateiname, '_w21_o1_s.csv'), sep = ';')\n# #in_chip_voltage = get_voltage_in_chip(measured_voltages, get_voltage(list_dateiname[i]))\n# # df_in_chip_data = in_chip_voltage.copy()\n# # df_in_chip_data['current [µA]'] = current['current [µA]']\n# # maxima = df_in_chip_data.apply(max, axis=0)\n# # maxima['in chip resistance [Ohm]'] = maxima['in chip voltage [V]'] * 1000000 / maxima['current [µA]'] # resistance [Ohm]\n# # df_maxima = (pd.DataFrame(maxima, index=maxima.index)).transpose()\n# # df_a = df_maxima\n# # df_a = df_a.set_index([[list_dateiname[i]]])\n# # if i is not 0:\n# # with open(list_dateiname[i]) as fd:\n# # measured_voltages = import_from_oszi_data(list_dateiname[i])\n# # measured_voltages['measured voltage [V] smoothed'] = scipy.signal.savgol_filter(measured_voltages, window_length=21, polyorder=1, axis=0, mode='nearest') # ggf auch polyorder=2 oder 3\n# # current = get_current(measured_voltages, used_resistor)\n# # in_chip_voltage = get_voltage_in_chip(measured_voltages, get_voltage(list_dateiname[i]))\n# # df_in_chip_data = in_chip_voltage.copy()\n# # df_in_chip_data['current [µA]'] = current['current [µA]']\n# # maxima = df_in_chip_data.apply(max, axis=0)\n# # maxima['in chip resistance [Ohm]'] = maxima['in chip voltage [V]'] * 1000000 / maxima['current [µA]'] # resistance [Ohm]\n# # df_maxima = (pd.DataFrame(maxima, index=maxima.index)).transpose()\n# # df_b = df_maxima\n# # df_b = df_b.set_index([[list_dateiname[i]]])\n# # # print(i)\n# # df_a = df_a.append(df_b)\n#\n# # df_a.to_csv('OsziData_allImportantNumbersIn1.csv', sep=';')\n", "id": "5310125", "language": "Python", "matching_score": 6.0176544189453125, "max_stars_count": 0, "path": "Oszilloskop/Fit.py" }, { "content": "'''\nimput file: .csv-Datei aus Osziloskop\noutput file: #eine Datei mit allen Kennzahlen, die nach Glattung einer berechnet wurden\n'''\n#written by <NAME>\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport scipy.signal\nimport plotly\nfrom plotly import graph_objs as go\n#import lib.plotlygraphen\n\n\nsuffix_for_new_filename = '_xy.html'\n\n\ndef generate_filename(dateiname, suffix_for_new_filename):\n name = dateiname.split('.')\n del name[-1]\n separator = \".\"\n nwname = separator.join(name)\n nwfile = nwname + suffix_for_new_filename\n return nwfile\n\ndef import_from_oszi_data(dateiname, ll):\n df = pd.read_csv(dateiname, sep=',', header=0, usecols=[0,1], index_col=0, skiprows=ll-1, names=['time stamp', 'intensity unit'])\n df1 = df.replace('Null', np.nan)\n df1.dropna(axis=0, how='all', inplace=True)\n df2 = df1.apply(pd.to_numeric, errors='raise')\n return df2\n\ndef import_from_oszi_data_all_channels(dateiname, columnnames, ll):\n columnnames.insert(0, 'time stamp')\n #print(isinstance(columnnames, list))\n df = pd.read_csv(dateiname, sep=',', header=0, index_col=0, skiprows=ll-1)\n # print(df.head())\n del df[df.columns[-1]]\n # print(df.head())\n df1 = df.replace('Null', np.nan)\n df1.dropna(axis=0, how='all', inplace=True)\n df2 = df1.apply(pd.to_numeric, errors='raise')\n # print(df2.head())\n #print(len(df2.columns))\n if len(df2.columns) > 1:\n for i in df2[df2.columns]:\n # print(i)\n if i.startswith('Waveform Data'):\n del df2[i]\n # print(df2.head()) # names=columnnames\n for j in range(len(columnnames), 0, -1):\n # print(columnnames[j - 1])\n if not columnnames[j - 1].startswith('CH'):\n del columnnames[j - 1]\n # print(columnnames)\n df2.columns = columnnames # , axis=1, inplace=True)\n # print(df2.head())\n return df2\n if len(df2.columns) == 1:\n return df2\n\n\n# for c, line in enumerate(search, 1):\n# line = line.rstrip() # remove '\\n' at end of line\n# if line.startswith('Waveform Data'):\n\ndef split_in_sections(data_to_cut, cutting_points, applyed_voltage):\n copy_data_to_cut = data_to_cut.copy()\n for i in range(0, len(cutting_points) - 1):\n if i == 0:\n points = [cutting_points[i], cutting_points[i + 1]]\n kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n a = bearbeitet.ix[cutting_points[i]:cutting_points[i + 1]]\n else:\n points = [cutting_points[i], cutting_points[i + 1]]\n kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n b = bearbeitet.ix[cutting_points[i] + 1:cutting_points[i + 1]]\n a = a.append(b)\n ndata_to_cut = copy_data_to_cut - copy_data_to_cut + a\n ndata_to_cut = ndata_to_cut.fillna(0)\n return ndata_to_cut\n\ndef bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage): # , [spectrum_values[i]: spectrum_values[i + 1]]):\n copy_kurvenabschnitt = kurvenabschnitt.copy()\n dataset = copy_kurvenabschnitt.ix[cutting_points[i] : cutting_points[i + 1]]\n dataset = applyed_voltage - abs(dataset)\n return dataset\n\n\n\ndef plotly_xy_yFehler_data(x_values, y_values, errorx_values, errory_values, errorx_ausan = False, errory_ausan = False):\n print(plotly.__version__)\n if errorx_values is not None:\n errorx_ausan = True\n if errory_values is not None:\n errory_ausan = True\n\n nrCol = []\n for l in y_values:\n measu = y_values[l].values.tolist()\n nrCol.append(measu)\n #print(nrCol)\n # for l in y_values:\n # #print(l)\n # # print(y_values[l][:].name)\n # # print(y_values[:][l].name)\n # #\n # #print(isinstance(y_values[l].name, tuple))\n # y_valuename = str(y_values[l].name)\n # #print(y_valuename[2:5])\n # measu = str(y_values[l].name)\n # nrCol.append(measu[2:5])\n # # print(nrCol)\n # #print(nrCol)\n\n names = []\n # for k in y_values:\n # nr = k.split('_')\n # n = nr[7]\n # # print(nr)\n # r = n.split('n')\n # names.append(r)\n\n traces = []\n for t in range(0, len(nrCol)):\n # print(t)\n trace = go.Scatter(\n x=x_values,\n y=nrCol[t],\n error_x=dict(\n type='data',\n array=errorx_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errorx_ausan\n ),\n error_y=dict(\n type='data',\n array=errory_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errory_ausan\n ),\n mode='lines',\n # name=names_numbers[t],\n name=y_values.columns[t],\n line=dict(\n width='1',\n # color=colors[t],\n # dash=lineform[t]\n # colorscale = Ramanspektren.lib.plotlygraphen.jet[t]\n # color='rgb(166, 166, 166)'\n\n )\n )\n # marker=dict(\n # sizemode='diameter',\n # sizeref=1, #relative Größe der Marker\n # sizemin=20,\n # size=10,\n # color='#000000',\n # # opacity=0.8,\n # line=dict(color='rgb(166, 166, 166)',\n # width=0)))\n\n traces.append(trace)\n\n return traces\n\n\ndef plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick):\n layout = go.Layout(\n autosize=True,\n width=600,\n height=430,\n margin=dict(l=100),\n legend=dict(x=1, y=1, # legend=dict(x=0.85, y=1,\n font=dict(family='Arial, sans-serif',\n size=20,\n color='#000000')),\n xaxis=dict(\n title='<b>' + xaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=24,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n # autotick=True,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=x_range,\n # range=[0, 2.5],\n dtick=x_dtick\n ),\n yaxis=dict(\n title='<b>' + yaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=24,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=y_range,\n # range=[0, 105],\n dtick=y_dtick\n ))\n return layout\n\n\ndef plotly_xy_yFehler(x_values, y_values, errorx=None, errory=None, dateiname=None, suffix_for_new_filename=None, x_range=None, y_range=None, x_dtick=None, y_dtick=None, xaxis_title='', yaxis_title='', x_lables=True, y_lables=True, z_lables=True):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_xy_yFehler_data(x_values, y_values, errorx, errory),\n layout=plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick))\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) #, image_filename=nwfile) #, image='png', image_width=1600, image_height=860)\n\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.csv') or dateiname.endswith('.CSV'):\n with open(dateiname) as search:\n for c, line in enumerate(search, 1):\n line = line.rstrip() # remove '\\n' at end of line\n if line.startswith('Waveform Data'):\n ll = c\n if line.startswith('Source'):\n columnnames = line.split(',')\n for j in range(len(columnnames), 0, -1):\n # print(j, columnnames[j-1])\n if not columnnames[j-1].startswith('CH'):\n del columnnames[j-1]\n # print(columnnames)\n\n # with open(dateiname) as fd:\n print(dateiname)\n measured_data = import_from_oszi_data_all_channels(dateiname, columnnames, ll)\n # measured_voltages['measured voltage [V] smoothed'] = scipy.signal.savgol_filter(measured_voltages, window_length=21, polyorder=1, axis=0, mode='nearest') # ggf auch polyorder=2 oder 3\n # print(measured_data.head())\n # df = pd.read_csv(dateiname, sep=',', nrows=ll, header=None, usecols=[0, 1], index_col=0, skiprows=0,\n # names=['parameter name', 'parameter value'])\n # print(df)\n # print(df.loc['Vertical Units'])\n # print(df.loc['Vertical Scale'])\n # print(measured_data.index*float(df.loc['Vertical Scale']))\n indexasnumeric = pd.to_numeric(measured_data.index)\n # print(indexasnumeric)\n # measured_data = measured_data.reindex(indexasnumeric)\n # print(measured_data)\n # measured_data.index.names = ['time [s]']\n # measured_data.rename({'time stamp':'time [s]', 'intensity unit':'intensssss'}, inplace=True)\n # print(measured_data)\n measured_data['time [s]'] = indexasnumeric - indexasnumeric[0]\n # print(measured_data['time [s]'])\n measured_data.reset_index(inplace=True)\n\n # print(measured_data.head())\n measured_data.set_index(measured_data.iloc[:,-1], inplace=True)\n #measured_data.set_index('time [s]', inplace=True)\n # print(measured_data.head())\n # measured_data.to_csv('kjh')\n #print(measured_data)\n measured_data.index.rename('time [s]', inplace=True)\n del measured_data['time [s]']\n del measured_data[measured_data.columns[0]]\n # print(measured_data.head())\n dfg = measured_data\n\n # print(df)\n #df.columns = ['time [s]']\n # df.rename(columns={'intensity unit':'voltage [V]'}, inplace=True)\n\n # print(df.head())\n\n # #measured_voltages.to_csv('OsziData_allImportantNumbersIn1.csv', sep=';')\n # current = get_current(measured_voltages, used_resistor)\n # print(current)\n # measured_voltages['current [µA]'] = current['current [µA]']\n # print(measured_voltages)\n # measured_voltages.to_csv(generate_filename(dateiname, '_w21_o1_s.csv'), sep = ';')\n # in_chip_voltage = get_voltage_in_chip(measured_voltages, get_voltage(list_dateiname[i]))\n # df_in_chip_data = in_chip_voltage.copy()\n # df_in_chip_data['current [µA]'] = current['current [µA]']\n # maxima = df_in_chip_data.apply(max, axis=0)\n # maxima['in chip resistance [Ohm]'] = maxima['in chip voltage [V]'] * 1000000 / maxima['current [µA]'] # resistance [Ohm]\n # df_maxima = (pd.DataFrame(maxima, index=maxima.index)).transpose()\n # df_a = df_maxima\n # df_a = df_a.set_index([[list_dateiname[i]]])\n # if i is not 0:\n # with open(list_dateiname[i]) as fd:\n # measured_voltages = import_from_oszi_data(list_dateiname[i])\n # measured_voltages['measured voltage [V] smoothed'] = scipy.signal.savgol_filter(measured_voltages, window_length=21, polyorder=1, axis=0, mode='nearest') # ggf auch polyorder=2 oder 3\n # current = get_current(measured_voltages, used_resistor)\n # in_chip_voltage = get_voltage_in_chip(measured_voltages, get_voltage(list_dateiname[i]))\n # df_in_chip_data = in_chip_voltage.copy()\n # df_in_chip_data['current [µA]'] = current['current [µA]']\n # maxima = df_in_chip_data.apply(max, axis=0)\n # maxima['in chip resistance [Ohm]'] = maxima['in chip voltage [V]'] * 1000000 / maxima['current [µA]'] # resistance [Ohm]\n # df_maxima = (pd.DataFrame(maxima, index=maxima.index)).transpose()\n # df_b = df_maxima\n # df_b = df_b.set_index([[list_dateiname[i]]])\n # # print(i)\n # df_a = df_a.append(df_b)\n\n # df_a.to_csv('OsziData_allImportantNumbersIn1.csv', sep=';')\n\n # print(df)\n x = dfg.index\n # print(x)\n # print(x.iloc[-1])\n y = dfg.iloc[:, 0:]\n\n # print(str(y.columns[0]))\n # print(y.iloc[:, 0].max())\n # print(df.index.names)\n # print(y)\n plotly_xy_yFehler(x_values=x, y_values=y, dateiname=dateiname, suffix_for_new_filename=suffix_for_new_filename, xaxis_title=str(dfg.index.name), yaxis_title='voltage [V]', x_lables=True, y_lables=True, z_lables=True)\n", "id": "11360298", "language": "Python", "matching_score": 8.26419448852539, "max_stars_count": 0, "path": "Oszilloskop/Oszi-Data to Graph all Channels universal.py" }, { "content": "'''\nimput file: .csv-Datei aus Osziloskop\noutput file: #eine Datei mit xy-Graph\n'''\n#written by <NAME>\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport plotly\nfrom plotly import graph_objs as go\n\n\nsuffix_for_new_filename = '_xy.html'\n\n\ndef generate_filename(dateiname, suffix_for_new_filename):\n name = dateiname.split('.')\n del name[-1]\n separator = \".\"\n nwname = separator.join(name)\n nwfile = nwname + suffix_for_new_filename\n return nwfile\n\ndef import_from_oszi_data(dateiname, ll):\n df = pd.read_csv(dateiname, sep=',', header=0, usecols=[0,1], index_col=0, skiprows=ll-1, names=['time stamp', 'intensity unit'])\n df1 = df.replace('Null', np.nan)\n df1.dropna(axis=0, how='all', inplace=True)\n df2 = df1.apply(pd.to_numeric, errors='raise')\n return df2\n\n\ndef plotly_xy_yFehler_data(x_values, y_values, errorx_values, errory_values, errorx_ausan = False, errory_ausan = False):\n print(plotly.__version__)\n if errorx_values is not None:\n errorx_ausan = True\n if errory_values is not None:\n errory_ausan = True\n\n nrCol = []\n for l in y_values:\n measu = y_values[l].values.tolist()\n nrCol.append(measu)\n\n names = []\n\n traces = []\n for t in range(0, len(nrCol)):\n trace = go.Scatter(\n x=x_values,\n y=nrCol[t],\n error_x=dict(\n type='data',\n array=errorx_values,\n color='#000000',\n visible=errorx_ausan\n ),\n\n name=y_values.columns[t],\n line=dict(\n width='1',\n )\n )\n\n traces.append(trace)\n return traces\n\n\ndef plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick):\n layout = go.Layout(\n autosize=True,\n width=600,\n height=430,\n margin=dict(l=100),\n legend=dict(x=1, y=1,\n font=dict(family='Arial, sans-serif',\n size=20,\n color='#000000')),\n xaxis=dict(\n title='<b>' + xaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=24,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=x_range,\n dtick=x_dtick\n ),\n yaxis=dict(\n title='<b>' + yaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=24,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=y_range,\n dtick=y_dtick\n ))\n return layout\n\n\ndef plotly_xy_yFehler(x_values, y_values, errorx=None, errory=None, dateiname=None, suffix_for_new_filename=None, x_range=None, y_range=None, x_dtick=None, y_dtick=None, xaxis_title='', yaxis_title='', x_lables=True, y_lables=True, z_lables=True):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_xy_yFehler_data(x_values, y_values, errorx, errory),\n layout=plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick))\n plotly.offline.plot(fig, filename=nwfile)\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.csv') or dateiname.endswith('.CSV'):\n with open(dateiname) as search:\n for c, line in enumerate(search, 1):\n line = line.rstrip() # remove '\\n' at end of line\n if 'Waveform Data,' == line:\n ll = c\n print(dateiname)\n measured_data = import_from_oszi_data(dateiname, ll)\n df = pd.read_csv(dateiname, sep=',', nrows=ll, header=None, usecols=[0, 1], index_col=0, skiprows=0,\n names=['parameter name', 'parameter value'])\n indexasnumeric = pd.to_numeric(measured_data.index)\n measured_data = measured_data.reindex(indexasnumeric)\n measured_data['time [s]'] = indexasnumeric - indexasnumeric[0]\n df = measured_data.set_index('time [s]')\n df.rename(columns={'intensity unit':'voltage [V]'}, inplace=True)\n x = df.index\n y = pd.DataFrame(df.iloc[:, 0:])\n plotly_xy_yFehler(x_values=x, y_values=y, dateiname=dateiname, suffix_for_new_filename=suffix_for_new_filename, xaxis_title=str(df.index.name), yaxis_title=str(str(y.columns[0])), x_lables=True, y_lables=True, z_lables=True)\n", "id": "5828487", "language": "Python", "matching_score": 5.291739463806152, "max_stars_count": 0, "path": "Oszilloskop/Oszi-Data to Graph simpel.py" }, { "content": "import pandas as pd\nimport os\n\n\nimport plotly\nfrom plotly import graph_objs as go\nfrom lib.allgemein import generate_filename\nimport lib.plotlygraphen\nimport numpy as np\nimport plotly.io as pio\n#plotly.offline.init_notebook_mode(connected=True)\n#plotly.__version__\n\nsuffix_for_new_filename = '_waterf.html'\n\n\ndef plotly_xyz_yFehler_data(x_values, y_values, z_values, errorx_values, errory_values, errorz_values, errorx_ausan = False, errory_ausan = False, errorz_ausan = False):\n colors = lib.plotlygraphen.br()\n lineform = lib.plotlygraphen.lineforms()\n names_numbers = lib.plotlygraphen.numbers()\n names_letters = lib.plotlygraphen.letters()\n print(plotly.__version__)\n if errorx_values is not None:\n errorx_ausan = True\n if errory_values is not None:\n errory_ausan = True\n if errorz_values is not None:\n errorz_ausan = True\n\n # nrCol = []\n # for l in z_values:\n # # print(l)\n # measu = z_values[l].values.tolist()\n # nrCol.append(measu)\n # print(nrCol)\n # print(z_values[l])\n # names = []\n # for k in y_values:\n # nr = k.split('_')\n # n = nr[7]\n # # print(nr)\n # r = n.split('n')\n # names.append(r)\n # print(z_values)\n # print(range(0, len(z_values.columns)-1))\n # for t in range(0, len(z_values.columns)):\n # print(t)\n # print(x_values)\n x = x_values.values.tolist()\n # print(x)\n\n y = y_values.values.tolist()\n # print(y_values)\n # print(y)\n # y = y_values.values.tolist(),\n #y = list(y)\n# print(y)\n y2 = []\n # print(range(0, len(y)))\n #\n for i in range(0, len(y)):\n y2.append(np.float64(y[i][0]))\n # print(y2)\n y = y2\n # print(y)\n\n\n # print(y)\n #y = y_values.values.tolist(),\n # y = list(y[0])\n # y2 = []\n # for i in range(0, len(y)):\n # y2.append(np.float64((y[i][0])))\n # y = y2\n # print(len(y))\n # print(y)\n # z2 = []\n z = z_values.transpose().values.tolist()\n # print(z_values.transpose())\n # for t in range(0, len(z_values.columns)):\n # z = z_values[z_values.columns[t]]\n # z2.append(z.values.tolist())\n # # print(z_values[z_values.columns[t]].values.tolist())\n # z = z2\n # print(len(z))\n # print(z)\n\n # print(isinstance(x_values[0].values.tolist()[1], str))\n\n # print()\n traces = []\n for t in range(0, len(x)):\n # print(t)\n # # print([t]*len(y_values))\n # print(len(x_values[0].values.tolist()[t])),\n # print(len([t]*len(y_values)))\n# print([t]*len(y_values))\n # print(y_values['index'])\n # print(z_values.iloc[:, t])\n\n # print(t)\n trace = go.Scatter3d(\n #type='scatter3d',\n # name=x_values[0].values.tolist()[t],\n # legendgroup=x_values[0].values.tolist()[t],\n showlegend=False,\n x=[t]*len(y_values),\n # x=x_values[0].values.tolist() * 2 + x_values[0].values.tolist()[0],\n y=y,\n z=z_values.iloc[:, t],\n # surfaceaxis=0,\n error_x=dict(\n type='data',\n array=errorx_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errorx_ausan\n ),\n error_y=dict(\n type='data',\n array=errory_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errory_ausan\n ),\n error_z=dict(\n type='data',\n array=errorz_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errorz_ausan\n ),\n mode='lines',\n # name=names_numbers[t],\n line=dict(\n width=6,\n color=colors[t],\n # dash=lineform[t]\n # colorscale = Ramanspektren.lib.plotlygraphen.jet[t]\n # color='rgb(166, 166, 166)'\n )\n\n )\n # marker=dict(\n # sizemode='diameter',\n # sizeref=1, #relative Größe der Marker\n # sizemin=20,\n # size=10,\n # color='#000000',\n # # opacity=0.8,\n # line=dict(color='rgb(166, 166, 166)',\n # width=0)))\n\n # print(trace)\n traces.append(trace)\n\n return traces\n\n\ndef plotly_xyz_yFehler_layout(xaxis_title, yaxis_title, zaxis_title, x_range, y_range, z_range, x_dtick, y_dtick, z_dtick, x_lables, y_lables, z_lables, ticktext, tickvals):\n layout = dict(\n autosize=True,\n width=1000,\n height=717,\n legend=dict(x=0.65, y=0.65,\n # legend=dict(x=0.85, y=1,\n font=dict(family='Arial, sans-serif',\n size=14,\n color='#000000')),\n scene=dict(\n xaxis=dict(\n title=xaxis_title,\n titlefont=dict(family='Arial bold, sans-serif',\n size=18,\n color='#000000'),\n showticklabels=x_lables,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=16,\n color='#000000'),\n # showgrid=False,\n # showline=True,\n # linewidth=2,\n # zeroline=False,\n # autotick=True,\n # ticks='outside',\n # tick0=0,\n # ticklen=5,\n # tickwidth=1,\n # tickcolor='#FFFFFF',\n# tickvals=list(range(1, len(x), 2)),\n tickvals=list(range(1, len(x))),\n\n ticktext=ticktext,\n tickmode='array',\n range=x_range,\n dtick=x_dtick,\n gridcolor='rgb(100, 100, 100)',\n zerolinecolor = 'rgb(0, 0, 0)',\n showbackground=False,\n # backgroundcolor='rgb(230, 230, 230)',\n ),\n yaxis=dict(\n title=yaxis_title,\n titlefont=dict(family='Arial, sans-serif',\n size=18,\n color='#000000'),\n showticklabels=y_lables,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=16,\n color='#000000'),\n # showgrid=False,\n # showline=True,\n # linewidth=2,\n # zeroline=False,\n # # autotick=True,\n # ticks='outside',\n # # tick0=0,\n # # ticklen=5,\n # # tickwidth=1,\n # tickcolor='#FFFFFF',\n\n range=y_range,\n dtick=y_dtick,\n gridcolor='rgb(100, 100, 100)',\n zerolinecolor='rgb(0, 0, 0)',\n showbackground=False,\n # backgroundcolor='rgb(230, 230, 230)',\n ),\n zaxis=dict(\n title=zaxis_title,\n titlefont=dict(family='Arial, sans-serif',\n size=18,\n color='#000000'),\n showticklabels=z_lables,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=16,\n color='#000000'),\n # showgrid=False,\n # showline=True,\n # linewidth=2,\n # zeroline=False,\n # # autotick=True,\n # ticks='outside',\n # tick0=0,\n # ticklen=5,\n # tickwidth=1,\n # tickcolor='#FFFFFF',\n range=z_range,\n\n dtick=z_dtick,\n gridcolor='rgb(100, 100, 100)',\n zerolinecolor='rgb(0, 0, 0)',\n showbackground=False,\n # backgroundcolor='rgb(230, 230, 230)',\n ),\n aspectratio=dict(x=1.7, y=1, z=1),\n aspectmode='manual',\n camera = dict(eye=dict(x=1.7, y=1.7, z=0.5),\n center=dict(x=0, y=0, z=-0.3))))\n return layout\n\n\ndef plotly_xyz_yFehler(x_values, y_values, z_values, errorx=None, errory=None, errorz=None, dateiname=None, suffix_for_new_filename=None, x_range=None, y_range=None, z_range=None, x_dtick=None, y_dtick=None, z_dtick=None, xaxis_title='', yaxis_title='', zaxis_title='', x_lables=False, y_lables=False, z_lables=False, ticktext=None, tickvals=None):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_xyz_yFehler_data(x_values, y_values, z_values, errorx, errory, errorz),\n layout=plotly_xyz_yFehler_layout(xaxis_title, yaxis_title, zaxis_title, x_range, y_range, z_range, x_dtick, y_dtick, z_dtick, x_lables, y_lables, z_lables, ticktext, tickvals))\n # plotly.offline.plot(fig, filename=nwfile)#, auto_open=False) #, image_filename=nwfile) #, image='png', image_width=1600, image_height=860)\n plotly.offline.plot(fig, filename=nwfile, auto_open=True, image_filename=nwfile, image='svg', image_width=1000, image_height=717, )\n # pio.write_image(fig, 'fig1.svg')\n # img_bytes = pio.to_image(fig, format='png', width=600, height=350, scale=2)\n # Image(img_bytes)\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('_pdD_forGraph_reversed.csv'):\n print(dateiname)\n with open(dateiname) as fd:\n df = pd.read_csv(fd, index_col=0, header=0, sep=';')\n # print(df)\n df.reset_index(level=0, inplace=True)\n # print(df)\n x = pd.DataFrame(df.iloc[0, 1:])\n\n # print(x.index) # Zeit\n y = pd.DataFrame(df.iloc[1:, 0])\n # print(y) # Wellenlängenverschiebung\n z = pd.DataFrame(df.iloc[1:, 1:])\n # print(z) # Intensitäten\n plotly_xyz_yFehler(x_values=x, y_values=y, z_values=z, x_range=None, y_range=None, z_range=None, dateiname=dateiname, suffix_for_new_filename=suffix_for_new_filename, xaxis_title=' ',\n #yaxis_title='rel. wavenumber [cm<sup>-1</sup>]', zaxis_title='intensity [a. u.]',\n x_lables=True, y_lables=True, z_lables=True, ticktext=x.values.tolist(), tickvals=[x,y])\n\n# https://plot.ly/python/static-image-export/", "id": "7075099", "language": "Python", "matching_score": 8.184895515441895, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/graph Wasserfall langgezogen.py" }, { "content": "import pandas as pd\nimport os\n\n\nimport plotly\nfrom plotly import graph_objs as go\nfrom lib.allgemein import generate_filename\nimport lib.plotlygraphen\nimport numpy as np\n\n\nsuffix_for_new_filename = '_shifted.html'\n#xaxislable = 'Raman Shift (cm<sup>-1</sup>)'\nxaxislable = 'rel. wavenumber [cm<sup>-1</sup>]'\n#yaxislable = 'Intensity (a. u.)'\nyaxislable = 'intensity [a. u.]'\n\n\ns1 = 200\ns2 = s1 + 1500\ns3 = s2 + 4000\ns4 = s3 + 1500\ns5 = s4 + 4000\ns6 = s5 + 1500\ns7 = s6 + 5000\ns8 = s7 + 1500\ns9 = s8 + 5000\ns10 = s9 + 1500\ns11 = s10 + 4000\ns12 = s11 + 1500\ns13 = s12 + 3500\ns14 = s13 + 1500\ns15 = s14 + 4500\ns16 = s15 + 1500\n\nshift = [s16, s15, s14, s13, s12, s11, s10, s9, s8, s7, s6, s5, s4, s3, s2, s1]\n# print(len (shift))\n\n# factor = 5000\n# shift = [\n# factor * 15 - 1000, factor * 14 - 0,\n# factor * 13 - 1000, factor * 12 - 0,\n# factor * 11 - 1000, factor * 10 - 0,\n# factor * 9 - 1000, factor * 8 - 0,\n# factor * 7 - 1000, factor * 6 - 0,\n# factor * 5 - 1000, factor * 4 - 0,\n# factor * 3 - 1000, factor * 2 - 0,\n# factor * 1 - 1000, factor * 0 - 0]\n\n\ndef plotly_xy_yFehler_data(x_values, y_values, errorx_values, errory_values, errorx_ausan = False, errory_ausan = False):\n colors = ['#2ca02c', '#000000', '#9467bd', '#000000', '#2ca02c', '#000000', '#9467bd', '#000000',\n '#2ca02c', '#000000', '#9467bd', '#000000', '#2ca02c', '#000000', '#9467bd', '#000000']\n\n lineform = lib.plotlygraphen.lineforms()\n names_numbers = lib.plotlygraphen.numbers()\n names_letters = lib.plotlygraphen.letters()\n print(plotly.__version__)\n if errorx_values is not None:\n errorx_ausan = True\n if errory_values is not None:\n errory_ausan = True\n\n # print(x_values)\n # x = x_values.values.tolist()\n # print(x)\n x_values = x\n\n y = y_values.values.tolist()\n for h in range(len(y_values.columns)):\n # print(h)\n y_values[y_values.columns[h]] = y_values[y_values.columns[h]] + shift[h]\n # print(y[0])\n # print(y)\n # y = y_values.values.tolist(),\n # y = list(y)\n # print(y)\n y2 = []\n # print(range(0, len(y)))\n #\n for i in range(0, len(y)):\n # print(i)\n y2.append(np.float64(y[i][0]))\n # print(y2)\n nrCol = [y2]\n# print(y)\n\n # print(y)\n # y = y_values.values.tolist(),\n # y = list(y[0])\n # y2 = []\n # for i in range(0, len(y)):\n # y2.append(np.float64((y[i][0])))\n # y = y2\n # print(len(y))\n # print(y)\n # z2 = []\n# z = z_values.transpose().values.tolist()\n# print(z_values.transpose())\n # for t in range(0, len(z_values.columns)):\n # z = z_values[z_values.columns[t]]\n # z2.append(z.values.tolist())\n # # print(z_values[z_values.columns[t]].values.tolist())\n # z = z2\n # print(len(z))\n# print(z)\n\n # print(y_values.iloc[:, 1])\n # for m in range(len(y_values.columns)):\n # print(m)\n # print(y_values[y_values.columns[m]])\n # print(isinstance(y_values[y_values.columns[m]][56], str))\n\n # print(isinstance(y_values, object))\n nrCol = []\n for l in y_values:\n # print(l)\n measu = y_values[l].values.tolist()\n # print(measu)\n nrCol.append(measu)\n # print(nrCol)\n\n names = []\n # for k in y_values:\n # nr = k.split('_')\n # n = nr[7]\n # # print(nr)\n # r = n.split('n')\n # names.append(r)\n\n traces = []\n for t in range(0, len(nrCol)):\n # print(t)\n trace = go.Scatter(\n x=x_values,\n y=nrCol[t],\n error_x=dict(\n type='data',\n array=errorx_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errorx_ausan\n ),\n error_y=dict(\n type='data',\n array=errory_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errory_ausan\n ),\n mode='lines',\n name=y_values.columns[t],\n # name=y_values.columns[t],\n line=dict(\n width=3,\n color=colors[t],\n # dash=lineform[t]\n # colorscale = Ramanspektren.lib.plotlygraphen.jet[t]\n # color='rgb(166, 166, 166)'\n\n )\n )\n # marker=dict(\n # sizemode='diameter',\n # sizeref=1, #relative Größe der Marker\n # sizemin=20,\n # size=10,\n # color='#000000',\n # # opacity=0.8,\n # line=dict(color='rgb(166, 166, 166)',\n # width=0)))\n\n traces.append(trace)\n traces.append(go.Scatter(\n x=[250, 250],\n y=[42900, 44900],\n error_x=dict(\n type='data',\n array=[0,0],\n thickness=2,\n width=5,\n color='#000000',\n visible=False),\n error_y=dict(\n type='data',\n array=[0, 0],\n thickness=2,\n width=5,\n color='#000000',\n visible=True),\n mode='lines',\n name=' ',\n line=dict(\n width=2,\n color='#000000',)))\n return traces\n\n\ndef plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick):\n layout = go.Layout(\n autosize=True,\n width=600,\n # height=760,\n height=930,\n margin=dict(l=100),\n legend=dict(x=1, y=1, # legend=dict(x=0.85, y=1,\n font=dict(family='Arial, sans-serif',\n size=20,\n color='#000000')),\n xaxis=dict(\n title='<b>' + xaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=24,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n # autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=x_range,\n # range=[0, 2.5],\n dtick=x_dtick\n ),\n yaxis=dict(\n title='<b>' + yaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=24,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#FFFFFF'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n # autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=y_range,\n rangemode='tozero',\n # range=[0, 105],\n dtick=y_dtick,\n\n ))\n return layout\n\n\ndef plotly_xy_yFehler(x_values, y_values, errorx=None, errory=None, dateiname=None, suffix_for_new_filename=None, x_range=None, y_range=None, x_dtick=None, y_dtick=None, xaxis_title='', yaxis_title='', x_lables=True, y_lables=True, z_lables=True):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_xy_yFehler_data(x_values, y_values, errorx, errory),\n layout=plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick))\n #plotly.offline.plot(fig, filename=nwfile) #, auto_open=False) #, image_filename=nwfile) #, image='png', image_width=1600, image_height=860)\n # plotly.offline.plot(fig, filename=nwfile, auto_open=True, image_filename=nwfile, image='svg', image_width=600, image_height=760)\n plotly.offline.plot(fig, filename=nwfile, auto_open=True, image_filename=nwfile, image='svg', image_width=600, image_height=930)\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('and lowest intensities_w9_o1_s_pdD.csv'):\n print(dateiname)\n with open(dateiname) as fd:\n df = pd.read_csv(fd, index_col=0, header=1, sep=';')\n # print(df)\n df.reset_index(level=0, inplace=True)\n # # print(df)\n # x = pd.DataFrame(df.iloc[0, 1:])\n\n x = df.iloc[1:, 0]\n # print(x) # Wellenlängenverschiebung\n y = pd.DataFrame(df.iloc[1:, 1:])\n # print(y) # Intensitäten\n\n plotly_xy_yFehler(x_values=x, y_values=y, x_range=[150,2000], y_range=[0,45000], dateiname=dateiname, suffix_for_new_filename=suffix_for_new_filename, xaxis_title=xaxislable, yaxis_title=yaxislable, x_lables=True, y_lables=True)\n", "id": "385882", "language": "Python", "matching_score": 7.468109607696533, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/graph Spectrae uebereinander for alternating analytes F1 and lowest.py" }, { "content": "import pandas as pd\nimport os\n\n\nimport plotly\nfrom plotly import graph_objs as go\nfrom lib.allgemein import generate_filename\nimport Ramanspektren.lib.plotlygraphen\nimport numpy as np\nimport Ramanspektren.lib.auswertung\nimport Ramanspektren.lib.analyte\n\n\n\nband_start = 1152\nband_end = 1215\n\n\nsuffix_for_new_filename = '_bar.html'\n\n\ndef plotly_barChart_data(x_values, y_values, errorx_values, errory_values, errorx_ausan = False, errory_ausan = False):\n colors = Ramanspektren.lib.plotlygraphen.jet()\n lineform = Ramanspektren.lib.plotlygraphen.lineforms()\n names_numbers = Ramanspektren.lib.plotlygraphen.numbers()\n names_letters = Ramanspektren.lib.plotlygraphen.letters()\n print(plotly.__version__)\n if errorx_values is not None:\n errorx_ausan = True\n if errory_values is not None:\n errory_ausan = True\n\n # print(isinstance(x_values, object))\n # print(y_values.ix[0])\n # print(x_values)\n\n # print(y_values.values.tolist())\n # nrCol = []\n # for l in y_values:\n # measu = y_values[l].values.tolist()\n # nrCol.append(measu)\n # print(nrCol)\n\n names = []\n # for k in y_values:\n # nr = k.split('_')\n # n = nr[7]\n # # print(nr)\n # r = n.split('n')\n # names.append(r)\n\n traces = []\n for t in range(0, 1):\n # print(t)\n trace = go.Bar(\n x=y_values.columns,\n y=y_values.ix[0],\n # error_x=dict(\n # type='data',\n # array=errorx_values,\n # # thickness=1,\n # # width=0,\n # color='#000000',\n # visible=errorx_ausan\n # ),\n # error_y=dict(\n # type='data',\n # array=errory_values,\n # # thickness=1,\n # # width=0,\n # color='#000000',\n # visible=errory_ausan\n # ),\n # mode='lines',\n # name=names_letters[t],\n # line=dict(\n # width='3',\n # # color=colors[t],\n # dash=lineform[t]\n # colorscale = Ramanspektren.lib.plotlygraphen.jet[t]\n # color='rgb(166, 166, 166)'\n\n )\n # )\n # marker=dict(\n # sizemode='diameter',\n # sizeref=1, #relative Größe der Marker\n # sizemin=20,\n # size=10,\n # color='#000000',\n # # opacity=0.8,\n # line=dict(color='rgb(166, 166, 166)',\n # width=0)))\n\n traces.append(trace)\n\n return traces\n\n\ndef plotly_barChart_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick):\n layout = go.Layout(\n autosize=True,\n width=600,\n height=430,\n margin=dict(l=100),\n\n # legend=dict(x=1, y=1, # legend=dict(x=0.85, y=1,\n # font=dict(family='Arial, sans-serif',\n # size=20,\n # color='#000000')),\n xaxis=dict(\n # title='<b>' + xaxis_title + '</b>',\n # titlefont=dict(family='Arial bold, sans-serif',\n # size=24,\n # color='#000000'),\n # showticklabels=True,\n tickangle=50,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#000000'),\n # showgrid=False,\n # showline=True,\n # linewidth=2,\n # zeroline=False,\n # autotick=True,\n # ticks='outside',\n # tick0=0,\n # ticklen=5,\n # tickwidth=1,\n # tickcolor='#FFFFFF',\n # range=x_range,\n # # range=[0, 2.5],\n # dtick=x_dtick\n ),\n yaxis=dict(\n title='<b>' + yaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=24,\n color='#000000'),\n # showticklabels=True,\n # tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=24,\n color='#000000'),\n # showgrid=False,\n # showline=True,\n # linewidth=2,\n # zeroline=False,\n # autotick=True,\n # ticks='outside',\n # tick0=0,\n # ticklen=5,\n # tickwidth=1,\n # tickcolor='#FFFFFF',\n # range=y_range,\n # # range=[0, 105],\n # dtick=y_dtick\n )\n )\n return layout\n\n\ndef plotly_barChart(x_values, y_values, errorx=None, errory=None, dateiname=None, suffix_for_new_filename=None, x_range=None, y_range=None, x_dtick=None, y_dtick=None, xaxis_title='', yaxis_title='', x_lables=True, y_lables=True, z_lables=True):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_barChart_data(x_values, y_values, errorx, errory),\n layout=plotly_barChart_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick))\n plotly.offline.plot(fig, filename=nwfile)#, auto_open=False) #, image_filename=nwfile) #, image='png', image_width=1600, image_height=860)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('Graph.csv'):\n print(dateiname)\n with open(dateiname) as fd:\n df = pd.read_csv(fd, index_col=0, header=1, sep=';')\n\n # print(df)\n # df.reset_index(level=0, inplace=True)\n # print(df)\n x = pd.DataFrame(df.iloc[0, 1:])\n # print(x) # Label\n # print(x.index) # Zeit\n y = pd.DataFrame(df.iloc[1:, 0])\n # print(y) # Wellenlängenverschiebung\n z = df.iloc[1:, 0:]\n print(z) # Intensitäten\n\n wn_with_highest_intensity = Ramanspektren.lib.auswertung.compute_wn_with_highest_intensity_labelbased(z, band_start,\n band_end)\n # print(wn_with_highest_intensity)\n highest_intensity = pd.DataFrame(\n Ramanspektren.lib.auswertung.grep_highest_intensity(z, wn_with_highest_intensity))\n # print(highest_intensity)\n\n plotly_barChart(x_values=x, y_values=highest_intensity, x_range=None, y_range=None, dateiname=dateiname, suffix_for_new_filename=suffix_for_new_filename, xaxis_title=' ', yaxis_title='intensity [a. u.]', x_lables=True, y_lables=True, z_lables=True)\n", "id": "3233786", "language": "Python", "matching_score": 3.8347017765045166, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/bar chart neu_blau.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein 2D-Graph mit Intensität gegen z-Position\n'''\n#written by <NAME>\n\n\nimport os\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom Ramanspektren.lib import analyte\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_positions\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\n\n\nsuffix_for_new_filename = '_graphPositionen2D.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_nach_positionen_2dscatter_layout(ind):\n layout = go.Layout(\n autosize=False,\n width=800,\n height=430,\n showlegend=True,\n legend=dict(x=0.85, y=1),\n yaxis=dict(title='<b>Intensity [a. u.]</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=14,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=14,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF'\n ),\n xaxis=dict(title='<b>z [µm]</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=14,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial bold, sans-serif',\n size=14,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=False,\n # ticks='outside',\n # tick0=0,\n # ticklen=5,\n # tickwidth=1,\n # tickcolor='#FFFFFF',\n dtick=20\n # range=[0, ind],\n # dtick=round(len(ind) / 10, -1)\n ))\n return layout\n\n\ndef plotly_nach_positionen_2dscatter_data(highest_intensity, positions):\n ind = positions.ix['z [µm]'].values.tolist()\n #print(ind)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\n\ndef plotly_positionen2D(highest_intensity, positions, dateiname):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_nach_positionen_2dscatter_data(highest_intensity, positions)\n fig = go.Figure(data=data, layout=plotly_nach_positionen_2dscatter_layout(ind))\n plotly.offline.plot(fig, filename=nwfile, image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n positions = get_positions(dateiname)\n plotly_positionen2D(highest_intensity, positions, dateiname)\n", "id": "1714284", "language": "Python", "matching_score": 2.998565912246704, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_nachPositionen2DanZentlang.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein DataFrame mit wellenzahlen in Zeilen und Frames in Spalten mit korregierter Baseline\n'''\n#written by <NAME>\n\n\nimport os\nfrom Ramanspektren.lib.xml_import import get_positions\nfrom Ramanspektren.lib import analyte\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.xml_import import get_intensities\n\n\nsuffix_for_new_filename = '_normalisiert.csv'\npunkte_baseline = analyte.kristallviolett()\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n positions = get_positions(dateiname)\n intensities = get_intensities(dateiname)\n # print(intensities)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n\n normalisiert = df_korregiert.apply(lambda x: x / df['Frame 100'] * 100, axis=0)\n\n normalisiert.to_csv(nwfile, sep=';')\n\n\n# df2 = df.apply(lambda x: x + df['eee'], axis=0)\n# print(df2)\n# df3 = df.apply(lambda x: x + df.ix[1], axis=1)\n# print(df3)\n\n# either the DataFrame’s index (axis=0) or the columns (axis=1)", "id": "3729051", "language": "Python", "matching_score": 2.2387940883636475, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to normalisiert.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei mit xyz-Mappting\noutput file: ein Graph mit Positionen eines Mappings xyz\n'''\n#written by <NAME>\n\n\nimport os\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib.xml_import import get_positions\nfrom lib import analyte\nfrom lib.xml_import import get_intensities\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.allgemein import generate_filename\nfrom lib.auswertung import compute_wn_with_highest_intensity\nfrom lib.auswertung import grep_highest_intensity\nfrom lib.plotlygraphen import viridis_plus_rot, plotly_nach_positionen_3dscatter_layout\nfrom lib.plotlygraphen import plotly_nach_positionen_3dscatter_data\n\n\nsuffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_nach_positionen(highest_intensity, positions, dateiname):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_nach_positionen_3dscatter_data(x_positions=positions.ix['x [µm]'].values.tolist(),\n y_positions=positions.ix['y [µm]'].values.tolist(),\n z_positions=positions.ix['z [µm]'].values.tolist(),\n highest_intensities=highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n ), layout=plotly_nach_positionen_3dscatter_layout(x_lables=True, y_lables=True, z_lables=True))\n plotly.offline.plot(fig, filename=nwfile, image_filename=nwfile, auto_open=False) #, image='png', image_width=1600, image_height=860)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('xyz.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n\n try:\n positions = get_positions(dateiname)\n plotly_nach_positionen(highest_intensity, positions, dateiname)\n except:\n print('no positions')\n", "id": "4184458", "language": "Python", "matching_score": 5.78206205368042, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_nachPositionen3D_xyzMapping.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei mit y-Mapping\noutput file: ein Graph mit Positionen eines Mappings (Auto-Zoon ist ausgeglichen)\n'''\n#written by <NAME>\n\n\nimport os\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nfrom lib import analyte\nfrom Ramanspektren.lib.xml_import import get_positions\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.plotlygraphen import viridis_plus_rot, plotly_nach_positionen_3dscatter_layout\nfrom Ramanspektren.lib.plotlygraphen import plotly_nach_positionen_3dscatter_data\n\n\nsuffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_nach_positionen(highest_intensity, positions, dateiname):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_nach_positionen_3dscatter_data(x_positions=positions.ix['x [µm]'].values.tolist(),\n y_positions=[0] * len(positions.ix['x [µm]'].values.tolist()),\n z_positions=[0] * len(positions.ix['x [µm]'].values.tolist()),\n highest_intensities=highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n ), layout=plotly_nach_positionen_3dscatter_layout(x_lables=True, y_lables=False, z_lables=False))\n plotly.offline.plot(fig, filename=nwfile, image_filename=nwfile) #, image='png', image_width=1600, image_height=860)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('ex.tvf') or dateiname.endswith('ex.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n\n try:\n positions = get_positions(dateiname)\n plotly_nach_positionen(highest_intensity, positions, dateiname)\n except:\n print('no positions')\n", "id": "10637397", "language": "Python", "matching_score": 5.841047286987305, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_nachPositionen3D_xMapping.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei mit y-Mapping\noutput file: output file: ein 3D-Graph mit Intensität gegen z-Position\n'''\n#written by <NAME>\n\n\n\nimport os\nimport plotly\nfrom Ramanspektren.lib import analyte\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.plotlygraphen import plotly_nach_positionen_3dscatter_layout\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_positions\nfrom Ramanspektren.lib.plotlygraphen import plotly_nach_positionen_3dscatter_data\n\n\nsuffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_nach_positionen(highest_intensity, positions, dateiname, suffix_for_new_filename):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_nach_positionen_3dscatter_data(x_positions=[0] * len(positions.ix['y [µm]'].values.tolist()),\n y_positions=[0] * len(positions.ix['y [µm]'].values.tolist()),\n z_positions=positions.ix['z [µm]'].values.tolist(),\n highest_intensities=highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n ), layout=plotly_nach_positionen_3dscatter_layout(x_lables=False, y_lables=False, z_lables=True))\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) #, image_filename=nwfile, image='png', image_width=1600, image_height=860)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('uche.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n try:\n positions = get_positions(dateiname)\n plotly_nach_positionen(highest_intensity, positions, dateiname, suffix_for_new_filename)\n except:\n print('no positions')\n", "id": "5055821", "language": "Python", "matching_score": 1.596229076385498, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_nachPositionen3D_zMapping.py" }, { "content": "'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\nimport os\nfrom lib import analyte\nfrom lib.xml_import import get_intensities, get_times\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.auswertung import compute_wn_with_highest_intensity_labelbased\nfrom lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity_labelbased\nfrom Ramanspektren.lib.plotlygraphen import plotly_Spectrum_2dscatter_layout\nimport scipy.signal\nimport pandas as pd\nimport os\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nimport scipy.signal\nimport pandas as pd\nfrom lib.allgemein import generate_filename\nimport Ramanspektren.lib.xml_import\nimport Ramanspektren.lib.baseline_corr\n\n\n# suffix_for_new_filename = '_graphMapping.html'\n# punkte_baseline = analyte.kristallviolett_al_Raja()\n# band_start = punkte_baseline[0]\n# band_end = punkte_baseline[1]\n\n\ndef verarbeitungderdaten(dateinamei):\n intensities = get_intensities(dateinamei)\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n df_out = smoothed_intensities.apply(lambda x: x - x.min())\n df_a = pd.DataFrame(df_out['Intensity [a. u.]'])\n df_a[dateinamei] = df_a['Intensity [a. u.]']\n df_a = df_a.drop(labels=['Intensity [a. u.]'], axis=1)\n return df_a\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n output = verarbeitungderdaten(list_dateiname[i])\n # print(output)\n df_a = output\n # print(df_a)\n # wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_out, band_start, band_end)\n # highest_intensity = grep_highest_intensity(df_out, wn_with_highest_intensity)\n # df_a = highest_intensity\n # df_a = df_a.set_index([[list_dateiname[i]]])\n # df_a = df_a.transpose()\n # df_a = df_a.set_index([list(range(1, len(df_a.index) + 1))])\n # df_a = df_a.transpose()\n # print(df_a)\n # framenumber = compute_frame_with_lowest_intensity_labelbased(df_out, band_start, band_end)\n # print(framenumber)\n # df_a[list_dateiname[i] + ' Frame ' + str(framenumber)] = df_out['Frame ' + str(framenumber)]\n # print(df_a)\n# times = times.transpose()\n# times = times.set_index([list(range(1, len(times.index) + 1))])\n# times = times.transpose()\n# df_a = times.append(df_a)\n# # print(i)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n output = verarbeitungderdaten(list_dateiname[i])\n df_b = output\n # print(output)\n # df_a[list_dateiname[i]] = df_b.iloc[:,0]\n # print(df_a)\n # print(df_b.iloc[:,0])\n # print(df_a.index)\n df_b = df_b.set_index(df_a.index)\n # df_voltage = pd.DataFrame(df_b).rename(index=df_a.index)\n # print(df_b)\n df_a[list_dateiname[i]] = df_b.iloc[:,0]\nprint(df_a)\n\n# smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n# smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n# df_korregiert = baselinecorrection(smoothed_intensities, punkte_baseline)\n# wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_korregiert, band_start, band_end)\n# highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n# df_b = highest_intensity\n# df_b = df_b.set_index([[list_dateiname[i]]])\n# df_b = df_b.transpose()\n# df_b = df_b.set_index([list(range(1, len(df_b.index) + 1))])\n# df_b = df_b.transpose()\n# # # print(i)\n# df_a = df_a.append(df_b)\n#\n# df_a = df_a.transpose()\ndf_a.to_csv('alleSpektrenInEinem_w9_o1_s_pdD Brilliantblau.csv', sep=';')\n#df_a.to_csv(generate_filename(list_dateiname[i], 'spec_alt_F1_lowest_int_w9_o1_s_pdD.csv'), sep=';')\n\n#\n\n\n # print(intensities)\n\n#\n#\n# print(df_a)\n# df_a.to_csv('spectra alternating F1 and lowest intensities.csv', sep=';')\n#\n# # plotly_SpectrumMitNiedrigsterIntensitaet(intensities, dateiname)\n# # df_korregiert = baselinecorrection(intensities, punkte_baseline)\n# # wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n# # highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n# #\n# # try:\n# # positions = get_positions(dateiname)\n# # except:\n# # print('no positions')\n# # df_out.to_csv('alks.csv', sep=';')\n", "id": "2587398", "language": "Python", "matching_score": 6.579944610595703, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to all EinzelaufnahmeSpectra inOne.py" }, { "content": "'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\nimport os\nfrom lib.xml_import import get_intensities, get_times\nfrom Ramanspektren.lib.xml_import import get_intensities\nimport scipy.signal\nimport pandas as pd\n\n\ndef verarbeitungderdaten(dateinamei):\n intensities = get_intensities(dateinamei)\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n df_out = smoothed_intensities.apply(lambda x: x - x.min())\n df_a = pd.DataFrame(df_out['Intensity [a. u.]'])\n df_a[dateinamei] = df_a['Intensity [a. u.]']\n df_a = df_a.drop(labels=['Intensity [a. u.]'], axis=1)\n return df_a\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n output = verarbeitungderdaten(list_dateiname[i])\n df_a = output\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n output = verarbeitungderdaten(list_dateiname[i])\n df_b = output\n df_b = df_b.set_index(df_a.index)\n df_a[list_dateiname[i]] = df_b.iloc[:,0]\n\ndf_a.to_csv('alleSpektrenInEinem_w9_o1_s_pdD para-Mercaptobenzoic acid.csv', sep=';')\n", "id": "9848643", "language": "Python", "matching_score": 3.9127554893493652, "max_stars_count": 0, "path": "Ramanspektren/tvf-TriVista to all EinzelaufnahmeSpectra inOne.py" }, { "content": "'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\nimport os\nfrom lib import analyte\nfrom lib.xml_import import get_intensities, get_times\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.auswertung import compute_wn_with_highest_intensity_labelbased\nfrom lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity_labelbased\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity\nfrom Ramanspektren.lib.plotlygraphen import plotly_Spectrum_2dscatter_layout\nimport scipy.signal\nimport pandas as pd\n\n\n# suffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = analyte.kristallviolett_al_Raja()\nband_start = punkte_baseline[0]\nband_end = punkte_baseline[1]\n\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('_pdD.csv') or dateiname.endswith('_dD.CSV'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n intensities = pd.DataFrame(df.iloc[1:, 0:])\n times = pd.DataFrame(df.iloc[0, 0:]).transpose()\n # intensities = get_intensities(list_dateiname[i])\n # times = get_times(list_dateiname[i])\n # smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n # smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n # # print(smoothed_intensities)\n # #df_out = smoothed_intensities.apply(lambda x: x - x.min())\n # df_out = baselinecorrection(smoothed_intensities, punkte_baseline)\n\n # print(df_out)\n df_a = pd.DataFrame(intensities['Frame 1'])\n df_a[list_dateiname[i] + ' Frame 1'] = df_a['Frame 1']\n df_a = df_a.drop(labels=['Frame 1'], axis=1)\n #print(df_a)\n # wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_out, band_start, band_end)\n # highest_intensity = grep_highest_intensity(df_out, wn_with_highest_intensity)\n # df_a = highest_intensity\n # df_a = df_a.set_index([[list_dateiname[i]]])\n # df_a = df_a.transpose()\n # df_a = df_a.set_index([list(range(1, len(df_a.index) + 1))])\n # df_a = df_a.transpose()\n # print(df_a)\n # print(intensities)\n framenumber = compute_frame_with_lowest_intensity(intensities, band_start, band_end)\n # print(framenumber)\n df_a[list_dateiname[i] + ' Frame ' + str(framenumber)] = intensities['Frame ' + str(framenumber)]\n # print(df_a)\n# times = times.transpose()\n# times = times.set_index([list(range(1, len(times.index) + 1))])\n# times = times.transpose()\n# df_a = times.append(df_a)\n# # print(i)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n intensities = pd.DataFrame(df.iloc[1:, 0:])\n times = pd.DataFrame(df.iloc[0, 0:]).transpose()\n # intensities = get_intensities(list_dateiname[i])\n # smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n # smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n # # print(smoothed_intensities)\n # df_out = smoothed_intensities.apply(lambda x: x - x.min())\n df_b = pd.DataFrame(intensities['Frame 1'])\n df_a[list_dateiname[i] + ' Frame 1'] = df_b['Frame 1']\n # print(df_b)\n df_b = df_b.drop(labels=['Frame 1'], axis=1)\n # print(df_b)\n # wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_out, band_start, band_end)\n # highest_intensity = grep_highest_intensity(df_out, wn_with_highest_intensity)\n # df_a = highest_intensity\n # df_a = df_a.set_index([[list_dateiname[i]]])\n # df_a = df_a.transpose()\n # df_a = df_a.set_index([list(range(1, len(df_a.index) + 1))])\n # df_a = df_a.transpose()\n # print(df_a)\n framenumber = compute_frame_with_lowest_intensity(intensities, band_start, band_end)\n print(framenumber)\n df_a[list_dateiname[i] + ' Frame ' + str(framenumber)] = intensities['Frame ' + str(framenumber)]\n print(df_b)\n #df_a = df_a.append(df_b)\nprint(df_a)\n\n# smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n# smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n# df_korregiert = baselinecorrection(smoothed_intensities, punkte_baseline)\n# wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_korregiert, band_start, band_end)\n# highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n# df_b = highest_intensity\n# df_b = df_b.set_index([[list_dateiname[i]]])\n# df_b = df_b.transpose()\n# df_b = df_b.set_index([list(range(1, len(df_b.index) + 1))])\n# df_b = df_b.transpose()\n# # # print(i)\n# df_a = df_a.append(df_b)\n#\n# df_a = df_a.transpose()\ndf_a.to_csv('spectra alternating F1 and lowest intensities_w9_o1_s_pdDGraph.csv', sep=';')\n#\n#\n\n\n # print(intensities)\n\n#\n#\n# print(df_a)\n# df_a.to_csv('spectra alternating F1 and lowest intensities.csv', sep=';')\n#\n# # plotly_SpectrumMitNiedrigsterIntensitaet(intensities, dateiname)\n# # df_korregiert = baselinecorrection(intensities, punkte_baseline)\n# # wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n# # highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n# #\n# # try:\n# # positions = get_positions(dateiname)\n# # except:\n# # print('no positions')\n# # df_out.to_csv('alks.csv', sep=';')\n", "id": "4113084", "language": "Python", "matching_score": 7.785111904144287, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/csv to spectra F1 and lowest intensity inOne.py" }, { "content": "import os\nfrom lib.xml_import import get_intensities, get_times\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.auswertung import compute_wn_with_highest_intensity_labelbased\nfrom lib.auswertung import grep_highest_intensity\nfrom lib.xml_import import get_intensities\nfrom lib.allgemein import generate_filename\nfrom lib.auswertung import compute_frame_with_lowest_intensity_labelbased\nfrom lib.plotlygraphen import plotly_Spectrum_2dscatter_layout\nimport scipy.signal\nimport pandas as pd\nimport os\nfrom lib.xml_import import get_intensities\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.auswertung import compute_wn_with_highest_intensity_labelbased\nfrom lib.auswertung import grep_highest_intensity\nimport pandas as pd\n\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('.csv') or dateiname.endswith('.TVF'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n df = pd.read_csv(fd, sep=';', header=None,\n index_col=None) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n df_a = pd.DataFrame(df[0])\n print(df[df.columns[0]])\n for c in df.columns:\n # print(c)\n\n df_a[list_dateiname[i] + ' ' + str(df.columns[c])] = df[df.columns[c]]\n #df_a = df_a.drop(labels=['0'], axis=1)\n #df_a([list_dateiname[i] + df.columns[c]] = df.columns[c])\n # df_a = df_a.drop(labels=['Frame 1'], axis=1)\n # if i is not 0:\n # with open(list_dateiname[i]) as fd:\n # df = pd.read_csv(fd, sep=';', header=None,\n # index_col=None) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n # df_a = df\n # print(df)\n print(df_a)\n\n\n\n\n\"\"\"\n\n'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\n\n\n\n# suffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = analyte.kristallviolett_al_Raja()\nband_start = punkte_baseline[0]\nband_end = punkte_baseline[1]\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n times = get_times(list_dateiname[i])\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n # print(smoothed_intensities)\n df_out = smoothed_intensities.apply(lambda x: x - x.min())\n df_a = pd.DataFrame(df_out['Frame 1'])\n df_a[list_dateiname[i] + ' Frame 1'] = df_a['Frame 1']\n df_a = df_a.drop(labels=['Frame 1'], axis=1)\n # wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_out, band_start, band_end)\n # highest_intensity = grep_highest_intensity(df_out, wn_with_highest_intensity)\n # df_a = highest_intensity\n # df_a = df_a.set_index([[list_dateiname[i]]])\n # df_a = df_a.transpose()\n # df_a = df_a.set_index([list(range(1, len(df_a.index) + 1))])\n # df_a = df_a.transpose()\n # print(df_a)\n framenumber = compute_frame_with_lowest_intensity_labelbased(df_out, band_start, band_end)\n print(framenumber)\n df_a[list_dateiname[i] + ' Frame ' + str(framenumber)] = df_out['Frame ' + str(framenumber)]\n print(df_a)\n# times = times.transpose()\n# times = times.set_index([list(range(1, len(times.index) + 1))])\n# times = times.transpose()\n# df_a = times.append(df_a)\n# # print(i)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n # print(smoothed_intensities)\n df_out = smoothed_intensities.apply(lambda x: x - x.min())\n df_b = pd.DataFrame(df_out['Frame 1'])\n df_a[list_dateiname[i] + ' Frame 1'] = df_b['Frame 1']\n df_b = df_b.drop(labels=['Frame 1'], axis=1)\n # wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_out, band_start, band_end)\n # highest_intensity = grep_highest_intensity(df_out, wn_with_highest_intensity)\n # df_a = highest_intensity\n # df_a = df_a.set_index([[list_dateiname[i]]])\n # df_a = df_a.transpose()\n # df_a = df_a.set_index([list(range(1, len(df_a.index) + 1))])\n # df_a = df_a.transpose()\n # print(df_a)\n framenumber = compute_frame_with_lowest_intensity_labelbased(df_out, band_start, band_end)\n print(framenumber)\n df_a[list_dateiname[i] + ' Frame ' + str(framenumber)] = df_out['Frame ' + str(framenumber)]\n print(df_b)\n #df_a = df_a.append(df_b)\nprint(df_a)\n\n# smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n# smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n# df_korregiert = baselinecorrection(smoothed_intensities, punkte_baseline)\n# wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_korregiert, band_start, band_end)\n# highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n# df_b = highest_intensity\n# df_b = df_b.set_index([[list_dateiname[i]]])\n# df_b = df_b.transpose()\n# df_b = df_b.set_index([list(range(1, len(df_b.index) + 1))])\n# df_b = df_b.transpose()\n# # # print(i)\n# df_a = df_a.append(df_b)\n#\n# df_a = df_a.transpose()\ndf_a.to_csv('spectra alternating F1 and lowest intensities_w9_o1_s_pdD.csv', sep=';')\n#df_a.to_csv(generate_filename(list_dateiname[i], 'spec_alt_F1_lowest_int_w9_o1_s_pdD.csv'), sep=';')\n\n#\n\n\n # print(intensities)\n\n#\n#\n# print(df_a)\n# df_a.to_csv('spectra alternating F1 and lowest intensities.csv', sep=';')\n#\n# # plotly_SpectrumMitNiedrigsterIntensitaet(intensities, dateiname)\n# # df_korregiert = baselinecorrection(intensities, punkte_baseline)\n# # wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n# # highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n# #\n# # try:\n# # positions = get_positions(dateiname)\n# # except:\n# # print('no positions')\n# # df_out.to_csv('alks.csv', sep=';')\n\n\n\"\"\"\n\n\n\n\"\"\"\n\n'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\n\n\n\n# suffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = punkte_baseline[0]\nband_end = punkte_baseline[1]\n\n\n# for dateiname in os.listdir():\n# if dateiname.endswith('smoothed.csv') or dateiname.endswith('smoothed.CSV'):\n# print(dateiname)\n# with open(dateiname, 'r') as fd:\n# df = pd.read_csv(fd, sep=';', header=0, index_col=0) #, names=['time [s]', 'measured voltage [V]', 'leer'])\n# intensities = pd.DataFrame(df.iloc[1:,0:])\n# df_out = intensities.transform(lambda x: x - x.min())\n# df_out.to_csv(generate_filename(dateiname, '_drawnDown.csv'), sep=';')\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('_dD.csv') or dateiname.endswith('_drawnDown.csv'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n times = pd.DataFrame(df.iloc[0, 0:]).transpose()\n intensities = pd.DataFrame(df.iloc[1:, 0:])\n # print(intensities)\n wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(intensities, band_start, band_end)\n # print(wn_with_highest_intensity)\n highest_intensity = pd.DataFrame(grep_highest_intensity(intensities, wn_with_highest_intensity))\n df_a = highest_intensity\n df_a = df_a.set_index([[list_dateiname[i]]])\n df_a = df_a.transpose()\n df_a = df_a.set_index([list(range(1, len(df_a.index)+1))])\n df_a = df_a.transpose()\n times = times.transpose()\n times = times.set_index([list(range(1, len(times.index) + 1))])\n times = times.transpose()\n df_a = times.append(df_a)\n # print(df_a)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n intensities = pd.DataFrame(df.iloc[1:, 0:])\n wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(intensities, band_start, band_end)\n highest_intensity = pd.DataFrame(grep_highest_intensity(intensities, wn_with_highest_intensity))\n df_b = highest_intensity\n df_b = df_b.set_index([[list_dateiname[i]]])\n df_b = df_b.transpose()\n df_b = df_b.set_index([list(range(1, len(df_b.index) + 1))])\n df_b = df_b.transpose()\n # print(df_b)\n # # # print(i)\n df_a = df_a.append(df_b)\n#print(df_a)\ndf_a = df_a.transpose()\n#print(df_a)\nif list_dateiname[0].split('_').count('primitive') == 1:\n df_a.to_csv('allIndicatorBandsInOne_primitive_bscorr.csv', sep=';')\nelse:\n df_a.to_csv('allIndicatorBandsInOne.csv', sep=';')\n\n\n#plotly_zeitlVerlauf_normalisiert(all_highest_intensities)\n\n\n\"\"\"", "id": "6196824", "language": "Python", "matching_score": 7.838415622711182, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/takeAllSpectraInOneFile.py" }, { "content": "'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\nimport os\nimport lib.analyte\nfrom lib.xml_import import get_intensities\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.auswertung import compute_wn_with_highest_intensity_labelbased\nfrom lib.auswertung import grep_highest_intensity\nimport pandas as pd\n\n\n# suffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = punkte_baseline[0]\nband_end = punkte_baseline[1]\n\n\n# for dateiname in os.listdir():\n# if dateiname.endswith('smoothed.csv') or dateiname.endswith('smoothed.CSV'):\n# print(dateiname)\n# with open(dateiname, 'r') as fd:\n# df = pd.read_csv(fd, sep=';', header=0, index_col=0) #, names=['time [s]', 'measured voltage [V]', 'leer'])\n# intensities = pd.DataFrame(df.iloc[1:,0:])\n# df_out = intensities.transform(lambda x: x - x.min())\n# df_out.to_csv(generate_filename(dateiname, '_drawnDown.csv'), sep=';')\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('_dD.csv') or dateiname.endswith('_drawnDown.csv'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n times = pd.DataFrame(df.iloc[0, 0:]).transpose()\n intensities = pd.DataFrame(df.iloc[1:, 0:])\n # print(intensities)\n wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(intensities, band_start, band_end)\n # print(wn_with_highest_intensity)\n highest_intensity = pd.DataFrame(grep_highest_intensity(intensities, wn_with_highest_intensity))\n df_a = highest_intensity\n df_a = df_a.set_index([[list_dateiname[i]]])\n df_a = df_a.transpose()\n df_a = df_a.set_index([list(range(1, len(df_a.index)+1))])\n df_a = df_a.transpose()\n times = times.transpose()\n times = times.set_index([list(range(1, len(times.index) + 1))])\n times = times.transpose()\n df_a = times.append(df_a)\n # print(df_a)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) # , names=['time [s]', 'measured voltage [V]', 'leer'])\n intensities = pd.DataFrame(df.iloc[1:, 0:])\n wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(intensities, band_start, band_end)\n highest_intensity = pd.DataFrame(grep_highest_intensity(intensities, wn_with_highest_intensity))\n df_b = highest_intensity\n df_b = df_b.set_index([[list_dateiname[i]]])\n df_b = df_b.transpose()\n df_b = df_b.set_index([list(range(1, len(df_b.index) + 1))])\n df_b = df_b.transpose()\n # print(df_b)\n # # # print(i)\n df_a = df_a.append(df_b)\n#print(df_a)\ndf_a = df_a.transpose()\n#print(df_a)\nif list_dateiname[0].split('_').count('primitive') == 1:\n df_a.to_csv('allIndicatorBandsInOne_primitive_bscorr.csv', sep=';')\nelse:\n df_a.to_csv('allIndicatorBandsInOne.csv', sep=';')\n\n\n#plotly_zeitlVerlauf_normalisiert(all_highest_intensities)\n", "id": "9906746", "language": "Python", "matching_score": 6.0360822677612305, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/takeIndicatorBandAndPutInOneFile.py" }, { "content": "'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\nimport os\nfrom lib import analyte\nfrom lib.xml_import import get_intensities\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.auswertung import compute_wn_with_highest_intensity\nfrom lib.auswertung import grep_highest_intensity\n\n\n# suffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = 1152\nband_end = 1215\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n df_a = highest_intensity\n df_a = df_a.set_index([[list_dateiname[i]]])\n # print(i)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n df_b = highest_intensity\n df_b = df_b.set_index([[list_dateiname[i]]])\n # print(i)\n df_a = df_a.append(df_b)\n\ndf_a.to_csv('Zusammenfassung_Renata_grep.csv', sep=';')\n\n\n#plotly_zeitlVerlauf_normalisiert(all_highest_intensities)\n", "id": "4357735", "language": "Python", "matching_score": 3.8589882850646973, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to allRenataGrepIn1.py" }, { "content": "'''\nimput files: mehrere .tvf-TriVista-Dateien mit zeitl Verlauf\noutput file: eine Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\n'''\n#written by <NAME>\n\n\nimport os\nfrom lib import analyte\nfrom lib.xml_import import get_intensities, get_times\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.auswertung import compute_wn_with_highest_intensity_labelbased\nfrom lib.auswertung import grep_highest_intensity\nimport scipy.signal\nimport pandas as pd\n\n\n# suffix_for_new_filename = '_graphMapping.html'\npunkte_baseline = analyte.kristallviolett_al_Raja()\nband_start = punkte_baseline[0]\nband_end = punkte_baseline[1]\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n times = get_times(list_dateiname[i])\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n df_korregiert = baselinecorrection(smoothed_intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n df_a = highest_intensity\n df_a = df_a.set_index([[list_dateiname[i]]])\n df_a = df_a.transpose()\n df_a = df_a.set_index([list(range(1, len(df_a.index) + 1))])\n df_a = df_a.transpose()\n times = times.transpose()\n times = times.set_index([list(range(1, len(times.index) + 1))])\n times = times.transpose()\n df_a = times.append(df_a)\n # print(i)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n df_korregiert = baselinecorrection(smoothed_intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity_labelbased(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n df_b = highest_intensity\n df_b = df_b.set_index([[list_dateiname[i]]])\n df_b = df_b.transpose()\n df_b = df_b.set_index([list(range(1, len(df_b.index) + 1))])\n df_b = df_b.transpose()\n# # print(i)\n df_a = df_a.append(df_b)\n\ndf_a = df_a.transpose()\ndf_a.to_csv('smoothed_bscorrOneband_BandsInOne.csv', sep=';')\n", "id": "7875733", "language": "Python", "matching_score": 3.5218496322631836, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista smoothed und bscorr to allRenataGrepIn1.py" }, { "content": "import pandas as pd\nimport scipy.signal\nimport regex as re\n\n\ndef compute_wn_with_highest_intensity(df_korregiert, band_start, band_end):\n interval = df_korregiert.ix[band_start:band_end]\n wn_with_highest_intensity = interval.idxmax(axis=0)\n return wn_with_highest_intensity\n\ndef compute_wn_with_highest_intensity_labelbased(df, band_start, band_end):\n ind = df.index.values.tolist()\n k = []\n for i in ind:\n if re.match(str(band_start) + '\\.[0-9]+', str(i)):\n # print(i)\n break\n elif re.match(str(band_start+1) + '\\.[0-9]+', str(i)):\n # print(i)\n break\n for j in ind:\n if re.match(str(band_end) + '\\.[0-9]+', str(j)):\n # print(j)\n break\n elif re.match(str(band_end+1) + '\\.[0-9]+', str(j)):\n # print(j)\n break\n interval = df.loc[i:j]\n wn_with_highest_intensity = interval.idxmax(axis=0)\n return wn_with_highest_intensity\n\n\ndef grep_highest_intensity(df_korregiert, wn_with_highest_intensity):\n highest_intensity = []\n for i in range(0, len(wn_with_highest_intensity)):\n highest_intensity.append(df_korregiert[wn_with_highest_intensity.index[i]][wn_with_highest_intensity[i]])\n df_highest_intensity = pd.DataFrame(highest_intensity, index=wn_with_highest_intensity.index, columns=['highest intensity [a. u.]'])\n df_highest_intensity = df_highest_intensity.transpose()\n return df_highest_intensity\n\n\ndef compute_frame_with_highest_intensity(intensities, band_start, band_end):\n copy_intensities = intensities.copy()\n interval = copy_intensities.ix[band_start:band_end]\n band = interval.apply(max, axis=0)\n lowest = band.idxmax()\n dfn = lowest.split(' ')\n framenumber = int(dfn[1])\n return framenumber\n\n\ndef compute_frame_with_lowest_intensity(intensities, band_start, band_end):\n copy_intensities = intensities.copy()\n interval = copy_intensities.ix[band_start:band_end]\n band = interval.apply(max, axis=0)\n lowest = band.idxmin()\n dfn = lowest.split(' ')\n framenumber = int(dfn[1])\n return framenumber\n\n\ndef compute_frame_with_lowest_intensity_labelbased(intensities, band_start, band_end):\n ind = intensities.index.values.tolist()\n # print(ind)\n k = []\n for i in ind:\n if re.match(str(band_start) + '\\.[0-9]+', str(i)):\n # print(i)\n break\n for j in ind:\n if re.match(str(band_end) + '\\.[0-9]+', str(j)):\n # print(j)\n break\n interval = intensities.loc[i:j]\n band = interval.idxmax(axis=0)\n lowest = band.idxmin()\n dfn = lowest.split(' ')\n framenumber = int(dfn[1])\n return framenumber\n\n\ndef compute_frame_with_lowest_intensity_from_smoothed(smoothed):\n copy_smoothed = smoothed.copy()\n lowest = copy_smoothed.idxmin(axis=1)\n lowest = lowest.values.tolist()[0]\n dfn = lowest.split(' ')\n framenumber = int(dfn[1])\n return framenumber\n\n\ndef savitzkygolay_for_pandas(df, window_length=21, polyorder=3):\n smoothed = scipy.signal.savgol_filter(df.transpose(), window_length, polyorder, axis=0, mode='nearest') # https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.signal.savgol_filter.html\n smoothed = smoothed.transpose()\n smoothed = pd.DataFrame(smoothed, index=[df.index], columns=[df.columns])\n return smoothed\n\n\ndef savitzkygolay_for_malgucken(df, window_length=21, polyorder=3):\n smoothed = scipy.signal.savgol_filter(df.transpose(), window_length, polyorder, axis=0, mode='nearest') # https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.signal.savgol_filter.html\n return smoothed\n", "id": "3333057", "language": "Python", "matching_score": 0.9185231924057007, "max_stars_count": 0, "path": "Ramanspektren/lib/auswertung.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein Graph mit 3 Spektren: Frame1, Frame100 und den Frame mit der Minimalintensität\noutput file: Zeit bis Target sauber ist nach baseline correctur und smoothing via Sawitzky-Golay-Filter\n'''\n#written by <NAME>\n\nimport os\nfrom lib import analyte\nimport pandas as pd\nimport numpy as np\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity_from_smoothed\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_times\nfrom Ramanspektren.lib.auswertung import savitzkygolay_for_pandas\n\n\nsuffix_for_new_filename_5spektren_in1graph = '_graph5spektren.html'\nsuffix_for_new_filename_zeitlVerlauf = '_smooth_HWZ_graphzeitlVerlauf.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\nlist_dateiname = []\nfor dateiname in os.listdir():\n if dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.tvf') or dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.TVF'):\n print(dateiname)\n list_dateiname.append(dateiname)\nfor i in range(0, len(list_dateiname)):\n if i == 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n times = get_times(list_dateiname[i])\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n smoothed = savitzkygolay_for_pandas(highest_intensity, window_length=21, polyorder=3)\n framenumber = compute_frame_with_lowest_intensity_from_smoothed(smoothed)\n vonTangenteAus = smoothed - smoothed['Frame ' + str(framenumber)].values.tolist()[0]\n timeTillRegAfterDifferenzHWZ = []\n NoOfHWZ_list = []\n NoOfHWZ = 1 # wie viele Halbwertszeiten\n intNachHWZ = smoothed['Frame 200'] / (2 ** NoOfHWZ)\n SignalConsideredAway = vonTangenteAus.columns[\n vonTangenteAus.ix['highest intensity [a. u.]'] < (intNachHWZ.values.tolist()[0])]\n timeTillReg = times[SignalConsideredAway[0]] - times['Frame 200']\n timeTillReg = np.float64(timeTillReg)\n timeTillRegAfterDifferenzHWZ.append(timeTillReg)\n NoOfHWZ_list.append(NoOfHWZ)\n while len(SignalConsideredAway) > 1:\n NoOfHWZ = NoOfHWZ + 1 # wie viele Halbwertszeiten\n intNachHWZ = smoothed['Frame 200'] / (2 ** NoOfHWZ)\n SignalConsideredAway = vonTangenteAus.columns[\n vonTangenteAus.ix['highest intensity [a. u.]'] < (intNachHWZ.values.tolist()[0])]\n timeTillReg = times[SignalConsideredAway[0]] - times['Frame 200']\n timeTillReg = np.float64(timeTillReg)\n timeTillRegAfterDifferenzHWZ.append(timeTillReg)\n NoOfHWZ_list.append(NoOfHWZ)\n df_timeTillRegAfterDifferentHZW = pd.DataFrame(timeTillRegAfterDifferenzHWZ, index=[NoOfHWZ_list], columns=[list_dateiname[i]])\n df_timeTillRegAfterDifferentHZW = df_timeTillRegAfterDifferentHZW.transpose()\n df_a = df_timeTillRegAfterDifferentHZW\n df_a = df_a.set_index([[list_dateiname[i]]])\n # print(i)\n if i is not 0:\n with open(list_dateiname[i]) as fd:\n intensities = get_intensities(list_dateiname[i])\n times = get_times(list_dateiname[i])\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n smoothed = savitzkygolay_for_pandas(highest_intensity, window_length=21, polyorder=3)\n framenumber = compute_frame_with_lowest_intensity_from_smoothed(smoothed)\n vonTangenteAus = smoothed - smoothed['Frame ' + str(framenumber)].values.tolist()[0]\n timeTillRegAfterDifferenzHWZ = []\n NoOfHWZ_list = []\n NoOfHWZ = 1 # wie viele Halbwertszeiten\n intNachHWZ = smoothed['Frame 200'] / (2 ** NoOfHWZ)\n SignalConsideredAway = vonTangenteAus.columns[\n vonTangenteAus.ix['highest intensity [a. u.]'] < (intNachHWZ.values.tolist()[0])]\n timeTillReg = times[SignalConsideredAway[0]] - times['Frame 200']\n timeTillReg = np.float64(timeTillReg)\n timeTillRegAfterDifferenzHWZ.append(timeTillReg)\n NoOfHWZ_list.append(NoOfHWZ)\n while len(SignalConsideredAway) > 1:\n NoOfHWZ = NoOfHWZ + 1 # wie viele Halbwertszeiten\n intNachHWZ = smoothed['Frame 200'] / (2 ** NoOfHWZ)\n SignalConsideredAway = vonTangenteAus.columns[\n vonTangenteAus.ix['highest intensity [a. u.]'] < (intNachHWZ.values.tolist()[0])]\n timeTillReg = times[SignalConsideredAway[0]] - times['Frame 200']\n timeTillReg = np.float64(timeTillReg)\n timeTillRegAfterDifferenzHWZ.append(timeTillReg)\n NoOfHWZ_list.append(NoOfHWZ)\n df_timeTillRegAfterDifferentHZW = pd.DataFrame(timeTillRegAfterDifferenzHWZ, index=[NoOfHWZ_list], columns=[list_dateiname[i]])\n df_timeTillRegAfterDifferentHZW = df_timeTillRegAfterDifferentHZW.transpose()\n df_b = df_timeTillRegAfterDifferentHZW\n df_b = df_b.set_index([[list_dateiname[i]]])\n # print(i)\n df_a = df_a.append(df_b)\n\ndf_a.to_csv('Zusammenfassung_time_till_target_clean_HWZ.csv', sep=';')\n\n", "id": "4860969", "language": "Python", "matching_score": 9.328180313110352, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to time_till_target_clean_HWZAllIn1.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein Graph mit 3 Spektren: Frame1, Frame100 und den Frame mit der Minimalintensität\noutput file: Zeit bis Target sauber ist nach baseline correctur und smoothing via Sawitzky-Golay-Filter\n'''\n#written by <NAME>\n\nimport os\n\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib import analyte\nimport pandas as pd\nimport numpy as np\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity_from_smoothed\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_times\nfrom Ramanspektren.lib.auswertung import savitzkygolay_for_pandas\nfrom Ramanspektren.lib.plotlygraphen import plotly_Spectrum_2dscatter_layout\n\n\n\nsuffix_for_new_filename_5spektren_in1graph = '_graph5spektren.html'\nsuffix_for_new_filename_zeitlVerlauf = '_smooth_HWZ_graphzeitlVerlauf.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n #print(ind)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\ndef plotly_zeitlVerlauf(df, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename_zeitlVerlauf)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile) #, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\ndef plotly_zeiten3spektren_in1graph_2dscatter_layout():\n layout = go.Layout(\n autosize=False,\n width=800,\n height=430,\n showlegend=True,\n legend=dict(\n x=0.05, y=1,\n font=dict(family='Arial, sans-serif',\n size=16,\n color='#000000')),\n yaxis=dict(title='<b>Intensity [a. u.]</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF'\n ),\n xaxis=dict(title='<b>rel. Wavenumber [cm<sup>-1</sup>]</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial bold, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=False,\n ticks='outside',\n tick0=50,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=[50, 2000],\n dtick=200\n ))\n return layout\n\n\n\ndef plotly_zeiten5spektren_in1graph_2dscatter_data(intensities, framenumber):\n # print(intensities)\n ind = intensities.index.values.tolist()\n firstCol = intensities['Frame 1'].values.tolist()\n secondCol = intensities['Frame 20'].values.tolist()\n thirdCol = intensities['Frame 200'].values.tolist()\n fourthCol = intensities['Frame 300'].values.tolist()\n sixthCol = intensities['Frame ' + str(framenumber)].values.tolist()\n\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#660066\", width=3),\n name='Frame 1')\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n mode='lines',\n line=go.Line(color=\"#0000cd\", width=3),\n name='Frame 20 - flow on')\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n mode='lines',\n line=go.Line(color=\"#009933\", width=3),\n name='Frame 200 - At the start of regeration (voltage on)')\n trace4 = go.Scatter(\n x=ind,\n y=fourthCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Frame 300 - voltage off')\n trace6 = go.Scatter(\n x=ind,\n y=sixthCol,\n mode='lines',\n line=go.Line(color=\"#ff0000\", width=3),\n name='position of least intense signal')\n\n # name='Frame ' + str(framenumber))\n #print([trace1, trace2, trace3])\n return [trace1, trace2, trace3, trace4, trace6], ind\n\n\ndef plotly_zeiten5spektren_in1graph(intensities, dateiname, suffix_for_new_filename_3spektren_in1graph):\n framenumber = compute_frame_with_lowest_intensity_from_smoothed(smoothed)\n #print(framenumber)\n nwfile = generate_filename(dateiname, suffix_for_new_filename_3spektren_in1graph)\n data, ind = plotly_zeiten5spektren_in1graph_2dscatter_data(intensities, framenumber)\n layout = plotly_Spectrum_2dscatter_layout(ind, xaxis_title='rel. Wavenumber [cm<sup>-1</sup>]', yaxis_title='Intensity [a. u.]',\n range_nr=[50, 2000], dtick_nr=200, ausan=False, positionsangabe='', annotation_y='', graphwidth=800)\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.plot(fig, filename=nwfile) #, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n# for dateiname in os.listdir():\n# if dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200ProgStart.tvf') or dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200ProgStart.TVF'):\n# # print(dateiname)\n# times = get_times(dateiname)\n#\n# TimeVoltageOn = round(times['Frame 200']['time [s]'] + 100, 0)\n# FrameVoltageOn = times[times.columns[times.ix['time [s]'] > TimeVoltageOn - 1]].columns[0]\n# #print(FrameVoltageOn)\n# break\n# #print(FrameVoltageOn)\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.tvf') or dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n times = get_times(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n smoothed = savitzkygolay_for_pandas(highest_intensity, window_length=21, polyorder=3)\n framenumber = compute_frame_with_lowest_intensity_from_smoothed(smoothed)\n vonTangenteAus = smoothed - smoothed['Frame ' + str(framenumber)].values.tolist()[0]\n\n timeTillRegAfterDifferenzHWZ = []\n NoOfHWZ_list = []\n NoOfHWZ = 1 # wie viele Halbwertszeiten\n intNachHWZ = smoothed['Frame 200'] / (2**NoOfHWZ)\n SignalConsideredAway = vonTangenteAus.columns[vonTangenteAus.ix['highest intensity [a. u.]'] < (intNachHWZ.values.tolist()[0])]\n timeTillReg = times[SignalConsideredAway[0]] - times['Frame 200']\n\n timeTillReg = np.float64(timeTillReg)\n timeTillRegAfterDifferenzHWZ.append(timeTillReg)\n NoOfHWZ_list.append(NoOfHWZ)\n while len(SignalConsideredAway) > 1:\n NoOfHWZ = NoOfHWZ + 1 # wie viele Halbwertszeiten\n intNachHWZ = smoothed['Frame 200'] / (2 ** NoOfHWZ)\n SignalConsideredAway = vonTangenteAus.columns[vonTangenteAus.ix['highest intensity [a. u.]'] < (intNachHWZ.values.tolist()[0])]\n timeTillReg = times[SignalConsideredAway[0]] - times['Frame 200']\n timeTillReg = np.float64(timeTillReg)\n timeTillRegAfterDifferenzHWZ.append(timeTillReg)\n NoOfHWZ_list.append(NoOfHWZ)\n # print(timeTillRegAfterDifferenzHWZ)\n\n\n print(NoOfHWZ_list)\n print(timeTillRegAfterDifferenzHWZ)\n df_timeTillRegAfterDifferentHZW = pd.DataFrame(timeTillRegAfterDifferenzHWZ, index=[NoOfHWZ_list], columns=[dateiname])\n df_timeTillRegAfterDifferentHZW = df_timeTillRegAfterDifferentHZW.transpose()\n print(df_timeTillRegAfterDifferentHZW)\n\n\n # print(intNachHWZ)\n\n # print(smoothed)\n # print(smoothed['Frame ' + str(framenumber)].values.tolist()[0])\n # print(vonTangenteAus)\n\n #print(len(SignalConsideredAway))\n # print(SignalConsideredAway[0])\n # print(times[SignalConsideredAway[0]])\n # print(times['Frame 200'])\n\n\n\n # plotly_zeiten5spektren_in1graph(intensities, dateiname, suffix_for_new_filename_5spektren_in1graph)\n\n plotly_zeitlVerlauf(smoothed, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title='Time [s]', yaxis_title='Intensity [a. u.]') #zeitl Verlauf nach baseline correktur\n\n\n\n\n", "id": "881359", "language": "Python", "matching_score": 10.458830833435059, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to time_till_target_clean_HWZ.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein Graph mit 3 Spektren: Frame1, Frame100 und den Frame mit der Minimalintensität\noutput file: zeitlicher Verlauf der Frames nach baseline correctur\n'''\n#written by <NAME>\n\nimport regex as re\nfrom Ramanspektren.lib.allgemein import liste_in_floats_umwandeln\nimport os\nimport plotly\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib import analyte\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection, get_spectrum_values\nfrom Ramanspektren.lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_times\nimport scipy.signal\n\n\nsuffix_for_new_filename_zeitlVerlauf = '_mitundohnesmooth_graphzeitlVerlauf.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, smoothed, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n # print(highest_intensity, smoothed)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n secondCol = smoothed.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='unbearbeitet',\n showlegend=False)\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n mode='lines',\n line=go.Line(color=\"#ff0000\", width=3),\n name='geglättet',\n showlegend=False)\n\n data = [trace1, trace2]\n return data, ind\n\ndef plotly_zeitlVerlauf_vergl(df_korregiert, smoothed, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename_zeitlVerlauf)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df_korregiert, smoothed, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile, auto_open=True) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.tvf') or dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n\n times = get_times(dateiname)\n #\n # TimeVoltageOn = round(times['Frame 200']['time [s]'] + 100, 0)\n # FrameVoltageOn = times[times.columns[times.ix['time [s]'] > TimeVoltageOn - 1]].columns[0]\n # TimeVoltageOff = round(times['Frame 200']['time [s]'] + 200, 0)\n # FrameVoltageOff = times[times.columns[times.ix['time [s]'] > TimeVoltageOff - 1]].columns[0]\n #\n# plotly_zeiten6spektren_in1graph(intensities, dateiname, suffix_for_new_filename_6spektren_in1graph, FrameVoltageOn, FrameVoltageOff)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n # print(highest_intensity.transpose())\n smoothed = scipy.signal.savgol_filter(highest_intensity.transpose(), window_length=21, polyorder=4, axis=0, mode='nearest')\n smoothed = smoothed.transpose()\n smoothed = pd.DataFrame(smoothed, index=['highest intensity [a. u.]'], columns=[df_korregiert.columns])\n\n plotly_zeitlVerlauf_vergl(highest_intensity, smoothed, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title='Time [s]', yaxis_title='Intensity [a. u.]') #zeitl Verlauf nach baseline correktur\n", "id": "11639716", "language": "Python", "matching_score": 9.680295944213867, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_zeitlVerlauf + Plotly_smooth_zeitlVerlauf.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein Graph mit 3 Spektren: Frame1, Frame100 und den Frame mit der Minimalintensität\noutput file: zeitlicher Verlauf der Frames nach baseline correctur\n'''\n#written by <NAME>\n\n\n\nfrom Ramanspektren.lib.allgemein import liste_in_floats_umwandeln\nimport os\nimport plotly\nimport pandas as pd\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\nfrom Ramanspektren.lib.xml_import import get_intensities\nimport scipy.signal\nfrom lib.allgemein import generate_filename\nimport Ramanspektren.lib.baseline_corr\nimport lib.analyte\n\n\nsuffix_for_new_filename_zeitlVerlauf = '_mitundohnesmooth.html'\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = punkte_baseline[0]\nband_end = punkte_baseline[1]\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, smoothed, zeiten):\n ind = highest_intensity.index.values.tolist()\n # print(highest_intensity, smoothed)\n firstCol = highest_intensity.values.tolist()\n secondCol = smoothed.values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='unbearbeitet',\n showlegend=False)\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n mode='lines',\n line=go.Line(color=\"#ff0000\", width=3),\n name='geglättet',\n showlegend=False)\n\n data = [trace1, trace2]\n return data, ind\n\ndef plotly_zeitlVerlauf_vergl(df_korregiert, smoothed, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename_zeitlVerlauf)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df_korregiert, smoothed, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile, auto_open=True) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('180503_0,05mM_MG_40erObjektiv_5%_9_rinsing_F20VoltageOn-4,01V_F50VoltageOff.tvf'):\n print(dateiname)\n intensities = Ramanspektren.lib.xml_import.get_intensities(dateiname)\n times = Ramanspektren.lib.xml_import.get_times(dateiname)\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n\n # print(intensities)\n # print(smoothed_intensities)\n\n plotly_zeitlVerlauf_vergl(intensities.iloc[:,0], smoothed_intensities.iloc[:,0], times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title='', yaxis_title='Intensity [a. u.]') #zeitl Verlauf nach baseline correktur\n", "id": "9744486", "language": "Python", "matching_score": 6.170229911804199, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_Spectrum + Plotly_smooth_Spectrum.py" }, { "content": "'''\npunkte_baseline are the wavenumbers were the spectrum is taken and pulled down to the baseline.\nband_start: is the start of the interval where the script searches for the highest intensity. this highest intensity is than shown over time.\nband_end: is the end of that interval.\n'''\n\n\n'''\nimput file: .tvf-TriVista-File\noutput file: band intensity over time after baseline correction\n'''\n#written by <NAME>\n\n\nimport lib.analyte\n\n\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = 1152\nband_end = 1215\n\n\nimport scipy.signal\nimport os\nimport pandas as pd\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib.allgemein import generate_filename\nfrom lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\n\n\nsuffix_for_new_filename = '_graph.html'\n\n\ndef plotly_zeitlVerlaufAlle_2dscatter_data(highest_intensity):\n ind = highest_intensity.index.values.tolist()\n for i in highest_intensity:\n print(i)\n # nr = i.split(' ')\n # ind.append(nr[1])\n\n # voltage = []\n # for k in highest_intensity.index:\n # nr = k.split('_')\n # voltage.append(nr[6])\n\n nrCol = []\n for l in highest_intensity.index:\n measu = highest_intensity.ix[l].values.tolist()\n nrCol.append(measu)\n\n traces = []\n for i in highest_intensity:\n # for t in range(0, len(highest_intensity.index)):\n trace = go.Scatter(\n x=ind,\n y=highest_intensity[i],\n mode='lines',\n # line=go.Line(color=viridis_plus_rot_as_list()[t*2], width=3),\n name=i,\n showlegend=True)\n traces.append(trace)\n return traces, ind\n\n\ndef plotly_zeitlVerlaufAlle(df, dateiname, suffix_for_new_filename, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_zeitlVerlaufAlle_2dscatter_data(df)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title)) #, yrangestart=0, yrangestop=110))\n plotly.offline.plot(fig, filename=nwfile) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.csv') or dateiname.endswith('.CSV'):\n print(dateiname)\n with open(dateiname, 'r') as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) #, names=['time [s]', 'measured voltage [V]', 'leer'])\n # df1 = df.apply(pd.to_numeric, errors='raise')\n intensities = pd.DataFrame(df.iloc[1:,0:])\n times = pd.DataFrame(df.iloc[0, 0:]).transpose()\n smoothed_intensities = pd.DataFrame(intensities['Frame 1'], index=intensities.index) #, columns=[intensities.columns])\n # print(smoothed_intensities)\n for i in range(5, 15, 2): # window_length\n for j in range(1, 6): # polyorder\n #print(i, j)\n try:\n interim = scipy.signal.savgol_filter(intensities['Frame 1'], window_length=i, polyorder=j, axis=0, mode='nearest')\n smoothed_intensities['window=' + str(i) + ', order=' + str(j)] = pd.DataFrame(interim, index=intensities.index) #, columns=[intensities.columns])\n except:\n continue\n\n print(smoothed_intensities)\n\n plotly_zeitlVerlaufAlle(smoothed_intensities, dateiname, suffix_for_new_filename, xaxis_title='Frame', yaxis_title='Intensity [a. u.]')\n", "id": "5283862", "language": "Python", "matching_score": 7.076078414916992, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/alle smoothings.py" }, { "content": "'''\npunkte_baseline are the wavenumbers were the spectrum is taken and pulled down to the baseline.\nband_start: is the start of the interval where the script searches for the highest intensity. this highest intensity is than shown over time.\nband_end: is the end of that interval.\n'''\n\n\n'''\nimput file: .tvf-TriVista-File\noutput file: band intensity over time after baseline correction\n'''\n#written by <NAME>\n\n\nimport lib.analyte\n\n\n#suffix_for_new_filename = '_graphIntensityOverTime.csv'\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = 1152\nband_end = 1215\n\n\nimport os\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nimport scipy.signal\nimport pandas as pd\nfrom lib.allgemein import generate_filename\nfrom lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n #print(ind)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\n\ndef plotly_zeitlVerlauf_vergl(df_korregiert, smoothed, times, nwfile, xaxis_title, yaxis_title):\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df_korregiert, smoothed, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile) #, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('_justExport.csv') or dateiname.endswith('_justExport.CSV'):\n print(dateiname)\n with open(dateiname, 'r') as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) #, names=['time [s]', 'measured voltage [V]', 'leer'])\n # df1 = df.apply(pd.to_numeric, errors='raise')\n intensities = pd.DataFrame(df.iloc[1:,0:])\n print(intensities)\n times = pd.DataFrame(df.iloc[0, 0:]).transpose()\n\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n\n # df_smoothed_intensities = pd.DataFrame(data=smoothed_intensities.iloc[:, :], index=smoothed_intensities.index, columns=smoothed_intensities.columns, copy=True)\n # print(df_smoothed_intensities.columns)\n # df_times = pd.DataFrame(data=times.iloc[:, :], index=times.index, columns=times.columns, copy=True)\n # print(df_times)\n all = times.append(smoothed_intensities)\n\n # print('_'.join(dateiname.split('_')[0:-1]))\n all.to_csv(generate_filename('_'.join(dateiname.split('_')[0:-1]) + '.csv', '_window9_order1_smoothed.csv'), sep=';')\n\n # plotly_zeitlVerlauf_vergl(pd.DataFrame(intensities['Frame 5']), smoothed_intensities['Frame 5'], times,\n # generate_filename(dateiname, '_window_length=9, polyorder=1.html'),\n # xaxis_title='Time [s]',\n # yaxis_title='Intensity [a. u.]') # zeitl Verlauf nach baseline correktur\n", "id": "6924740", "language": "Python", "matching_score": 9.143461227416992, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/smoothning.py" }, { "content": "\n'''\npunkte_baseline are the wavenumbers were the spectrum is taken and pulled down to the baseline.\nband_start: is the start of the interval where the script searches for the highest intensity. this highest intensity is than shown over time.\nband_end: is the end of that interval.\n'''\n\n\n'''\nimput file: .tvf-TriVista-File\noutput file: band intensity over time after baseline correction\n'''\n#written by <NAME>\n\n\nimport lib.analyte\n\n\n#suffix_for_new_filename = '_graphIntensityOverTime.csv'\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = 1152\nband_end = 1215\n\n\nimport os\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nimport scipy.signal\nimport pandas as pd\nfrom lib.allgemein import generate_filename\nfrom lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n #print(ind)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\n\ndef plotly_zeitlVerlauf_vergl(df_korregiert, smoothed, times, nwfile, xaxis_title, yaxis_title):\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df_korregiert, smoothed, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile) #, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('_smoothed.csv') or dateiname.endswith('_smoothed.CSV'):\n print(dateiname)\n with open(dateiname, 'r') as fd:\n df = pd.read_csv(fd, sep=';', header=0, index_col=0) #, names=['time [s]', 'measured voltage [V]', 'leer'])\n intensities = pd.DataFrame(df.iloc[1:, 0:])\n times = pd.DataFrame(df.iloc[0, 0:]).transpose()\n df_out = intensities.apply(lambda x: x - x.min())\n all = times.append(df_out)\n all.to_csv(generate_filename(dateiname, '_drawnDown.csv'), sep=';')\n", "id": "9147543", "language": "Python", "matching_score": 7.343061923980713, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/baselinecorr simplified.py" }, { "content": "'''\npunkte_baseline are the wavenumbers were the spectrum is taken and pulled down to the baseline.\nband_start: is the start of the interval where the script searches for the highest intensity. this highest intensity is than shown over time.\nband_end: is the end of that interval.\n'''\n\n\n'''\nimput file: .tvf-TriVista-File\noutput file: band intensity over time after baseline correction\n'''\n#written by <NAME>\n\n\nfrom Ramanspektren.lib import analyte\n\n\nsuffix_for_new_filename = '_graphIntensityOverTime.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\nimport os\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.xml_import import get_times\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n #print(ind)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\n\ndef plotly_zeitlVerlauf(highest_intensity, times, dateiname):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(highest_intensity, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title='Time [s]', yaxis_title='Intensity [a. u.]'))\n plotly.offline.plot(fig, filename=nwfile) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n times = get_times(dateiname)\n plotly_zeitlVerlauf(highest_intensity, times, dateiname) #zeitl Verlauf nach baseline correktur\n\n", "id": "11631514", "language": "Python", "matching_score": 7.972527980804443, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_IntensityOverTime.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: zeitlicher Verlauf der Frames nach baseline correctur\n'''\n#written by <NAME>\n\n\nimport os\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib import analyte\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.xml_import import get_intensities\nfrom lib.xml_import import get_positions\nfrom lib.xml_import import get_times\nfrom lib.allgemein import generate_filename\nfrom lib.auswertung import compute_wn_with_highest_intensity\nfrom lib.auswertung import grep_highest_intensity\nfrom lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\n\n\nsuffix_for_new_filename = '_baselinecor_graphzeitlVerlauf.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\ndef plotly_zeitlVerlauf(df, times, dateiname, suffix_for_new_filename, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) #, image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n\n times = get_times(dateiname)\n plotly_zeitlVerlauf(highest_intensity, times, dateiname, suffix_for_new_filename, xaxis_title='Time [s]', yaxis_title='Intensity [a. u.]') #zeitl Verlauf nach baseline correktur\n\n", "id": "5411584", "language": "Python", "matching_score": 4.565344333648682, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_zeitlVerlauf.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein Graph mit 3 Spektren: Frame1, Frame100 und den Frame mit der Minimalintensität\noutput file: zeitlicher Verlauf der Frames nach baseline correctur\n'''\n#written by <NAME>\n\n\nimport os\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib import analyte\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection\nfrom Ramanspektren.lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_positions\nfrom Ramanspektren.lib.xml_import import get_times\n\n\nsuffix_for_new_filename_zeitlVerlauf = '_graphzeitlVerlauf.html'\nsuffix_for_new_filename_3spektren_in1graph = '_graph3spektren.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n #print(ind)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\ndef plotly_zeitlVerlauf(df, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename_zeitlVerlauf)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\ndef plotly_zeiten3spektren_in1graph_2dscatter_layout():\n layout = go.Layout(\n autosize=False,\n width=800,\n height=430,\n showlegend=True,\n legend=dict(\n x=0.05, y=1,\n font=dict(family='Arial, sans-serif',\n size=16,\n color='#000000')),\n yaxis=dict(title='<b>Intensity [a. u.]</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF'\n ),\n xaxis=dict(title='<b>rel. Wavenumber [cm<sup>-1</sup>]</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial bold, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=False,\n ticks='outside',\n tick0=50,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=[50, 2000],\n dtick=200\n ))\n return layout\n\ndef plotly_zeiten3spektren_in1graph_2dscatter_data(intensities, framenumber):\n # print(intensities)\n ind = intensities.index.values.tolist()\n firstCol = intensities['Frame 1'].values.tolist()\n secondCol = intensities['Frame 100'].values.tolist()\n thirdCol = intensities['Frame ' + str(framenumber)].values.tolist()\n\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Frame 1')\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='At the start of regeration')\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n mode='lines',\n line=go.Line(color=\"#ff0000\", width=3),\n name='position of least intense signal')\n\n # name='Frame ' + str(framenumber))\n #print([trace1, trace2, trace3])\n return [trace2, trace3]\n\ndef plotly_zeiten3spektren_in1graph(intensities, dateiname, suffix_for_new_filename_3spektren_in1graph):\n framenumber = compute_frame_with_lowest_intensity(intensities, band_start, band_end)\n #print(framenumber)\n nwfile = generate_filename(dateiname, suffix_for_new_filename_3spektren_in1graph)\n data = plotly_zeiten3spektren_in1graph_2dscatter_data(intensities, framenumber)\n layout = plotly_zeiten3spektren_in1graph_2dscatter_layout()\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.plot(fig, filename=nwfile, image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n plotly_zeiten3spektren_in1graph(intensities, dateiname, suffix_for_new_filename_3spektren_in1graph)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n\n try:\n positions = get_positions(dateiname)\n except:\n print('no positions')\n\n times = get_times(dateiname)\n plotly_zeitlVerlauf(highest_intensity, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title='Time [s]', yaxis_title='Intensity [a. u.]') #zeitl Verlauf nach baseline correktur\n", "id": "8197988", "language": "Python", "matching_score": 7.322207450866699, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_zeitlVerlauf + Plotly_3SpektrenIn1Graph.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei\noutput file: ein Graph mit 5 Spektren: Frame1, Frame 20 - flow on, Frame 200 - At the start of regeration (voltage on), Frame 300 - voltage off, position of least intense signal\noutput file: zeitlicher Verlauf der Frames nach baseline correctur\n'''\n#written by <NAME>\n\nimport regex as re\nimport decimal\nfrom Ramanspektren.lib.allgemein import liste_in_floats_umwandeln\nimport os\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom Ramanspektren.lib import analyte\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.auswertung import compute_wn_with_highest_intensity\nfrom Ramanspektren.lib.auswertung import compute_frame_with_lowest_intensity\nfrom Ramanspektren.lib.auswertung import grep_highest_intensity\nfrom Ramanspektren.lib.baseline_corr import baselinecorrection, get_spectrum_values\nfrom Ramanspektren.lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_positions\nfrom Ramanspektren.lib.xml_import import get_times\nfrom Ramanspektren.lib.plotlygraphen import plotly_Spectrum_2dscatter_layout\n\n\nsuffix_for_new_filename_zeitlVerlauf = '_graphzeitlVerlauf.html'\nsuffix_for_new_filename_5spektren_in1graph = '_graph5spektren.html'\npunkte_baseline = analyte.kristallviolett()\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity, zeiten):\n ind = zeiten.ix['time [s]'].values.tolist()\n print(ind)\n print(highest_intensity)\n firstCol = highest_intensity.ix['highest intensity [a. u.]'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='Verlauf',\n showlegend=False)\n data = [trace1]\n return data, ind\n\ndef plotly_zeitlVerlauf(df, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename_zeitlVerlauf)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df, times)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\ndef plotly_zeiten5spektren_in1graph_2dscatter_data(intensities, framenumber):\n # print(intensities)\n ind = intensities.index.values.tolist()\n firstCol = intensities['Frame 1'].values.tolist()\n secondCol = intensities['Frame 20'].values.tolist()\n thirdCol = intensities['Frame 200'].values.tolist()\n fourthCol = intensities['Frame 300'].values.tolist()\n fifthCol = intensities['Frame ' + str(framenumber)].values.tolist()\n\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#9400D3\", width=3),\n name='Frame 1')\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n mode='lines',\n line=go.Line(color=\"#0000FF\", width=3),\n name='flow on')\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name='voltage on')\n trace4 = go.Scatter(\n x=ind,\n y=fourthCol,\n mode='lines',\n line=go.Line(color=\"#FFFF00\", width=3),\n name='voltage off')\n trace5 = go.Scatter(\n x=ind,\n y=fifthCol,\n mode='lines',\n line=go.Line(color=\"#ff0000\", width=3),\n name='position of least intense signal')\n\n # name='Frame ' + str(framenumber))\n #print([trace1, trace2, trace3])\n return [trace1, trace2, trace3, trace4, trace5], ind\n\n\ndef plotly_zeiten5spektren_in1graph(intensities, dateiname, suffix_for_new_filename_3spektren_in1graph):\n framenumber = compute_frame_with_lowest_intensity(intensities, band_start, band_end)\n print(framenumber)\n nwfile = generate_filename(dateiname, suffix_for_new_filename_3spektren_in1graph)\n data, ind = plotly_zeiten5spektren_in1graph_2dscatter_data(intensities, framenumber)\n layout = plotly_Spectrum_2dscatter_layout(ind, xaxis_title='rel. Wavenumber [cm<sup>-1</sup>]', yaxis_title='Intensity [a. u.]',\n range_nr=[50, 2000], dtick_nr=200, ausan=False, positionsangabe='', annotation_y='', graphwidth=800)\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.tvf') or dateiname.endswith('beiF1Analyt_beiF20ElektrolythFlussAn_beiF200SpannungAn_beiF300SpannungAus.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n times = get_times(dateiname)\n\n plotly_zeiten5spektren_in1graph(intensities, dateiname, suffix_for_new_filename_5spektren_in1graph)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n wn_with_highest_intensity = compute_wn_with_highest_intensity(df_korregiert, band_start, band_end)\n highest_intensity = grep_highest_intensity(df_korregiert, wn_with_highest_intensity)\n\n plotly_zeitlVerlauf(highest_intensity, times, dateiname, suffix_for_new_filename_zeitlVerlauf, xaxis_title='Time [s]', yaxis_title='Intensity [a. u.]') #zeitl Verlauf nach baseline correktur\n", "id": "8951728", "language": "Python", "matching_score": 5.892521858215332, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to Plotly_zeitlVerlauf + Plotly_5SpektrenIn1GraphFSpektrenAb170918.py" }, { "content": "import os\nimport pandas as pd\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib.allgemein import generate_filename\nfrom lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\nfrom lib.plotlygraphen import viridis_plus_rot_as_list\nfrom lib.xml_import import get_times\n\n\n\nsuffix_for_new_filename = '_graph.html'\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity):\n ind = []\n for i in highest_intensity:\n nr = i.split(' ')\n ind.append(nr[1])\n\n voltage = []\n for k in highest_intensity.index:\n nr = k.split('_')\n voltage.append(nr[6])\n\n nrCol = []\n for l in highest_intensity.index:\n measu = highest_intensity.ix[l].values.tolist()\n nrCol.append(measu)\n\n traces = []\n for t in range(0, len(highest_intensity.index)):\n trace = go.Scatter(\n x=ind,\n y=nrCol[t],\n mode='lines',\n line=go.Line(color=viridis_plus_rot_as_list()[t*2], width=3),\n name=voltage[t],\n showlegend=True)\n traces.append(trace)\n return traces, ind\n\n\ndef plotly_zeitlVerlauf(df, dateiname, suffix_for_new_filename, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title, yrangestart=0, yrangestop=110))\n plotly.offline.plot(fig, filename=nwfile) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\n# for dateiname in os.listdir():\n# if dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200ProgStart.tvf') or dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200ProgStart.TVF'):\n# # print(dateiname)\n# times = get_times(dateiname)\n#\n# TimeVoltageOn = round(times['Frame 200']['time [s]'] + 100, 0)\n# FrameVoltageOn = times[times.columns[times.ix['time [s]'] > TimeVoltageOn - 1]].columns[0]\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('usammenfassung_Renata_grep.csv') or dateiname.endswith('usammenfassung_Renata_grep.CSV'):\n print(dateiname)\n with open(dateiname) as fd:\n df = pd.read_csv(fd, index_col=0, header=0, sep=';')\n df2 = df.apply(lambda x: x / df['Frame 182'] * 100, axis=0) # Normalisierung\n\n plotly_zeitlVerlauf(df2, dateiname, suffix_for_new_filename, xaxis_title='Frame', yaxis_title='Intensity [a. u.]')\n", "id": "11932295", "language": "Python", "matching_score": 5.86398983001709, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/allRenataGrepIn1 Normalisierung - Kopie mit Interation.py" }, { "content": "import os\nimport pandas as pd\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.plotlygraphen import plotly_zeitlVerlauf_2dscatter_layout\n\n\nsuffix_for_new_filename = '_graph.html'\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(highest_intensity):\n ind =[]\n for i in highest_intensity:\n #print(i)\n nr = i.split(' ')\n ind.append(nr[1])\n # print(ind)\n # ind = highest_intensity.ix.values.tolist()\n firstCol = highest_intensity.ix[0].values.tolist()\n secondCol = highest_intensity.ix[1].values.tolist()\n thirdCol = highest_intensity.ix[2].values.tolist()\n forthCol = highest_intensity.ix[3].values.tolist()\n # fifthCol = highest_intensity.ix[4].values.tolist()\n # sixthCol = highest_intensity.ix[5].values.tolist()\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n mode='lines',\n line=go.Line(color=\"#440154FF\", width=3),\n name=highest_intensity.index[0],\n showlegend=True)\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n mode='lines',\n line=go.Line(color=\"#404788FF\", width=3),\n name=highest_intensity.index[1],\n showlegend=True)\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n mode='lines',\n line=go.Line(color=\"#287D8EFF\", width=3),\n name=highest_intensity.index[2],\n showlegend=True)\n trace4 = go.Scatter(\n x=ind,\n y=forthCol,\n mode='lines',\n line=go.Line(color=\"#29AF7FFF\", width=3),\n name=highest_intensity.index[3],\n showlegend=True)\n # trace5 = go.Scatter(\n # x=ind,\n # y=fifthCol,\n # mode='lines',\n # line=go.Line(color=\"#95D840FF\", width=3),\n # name='Verlauf',\n # showlegend=False)\n data = [trace1, trace2, trace3, trace4]\n return data, ind\n\n\ndef plotly_zeitlVerlauf(df, dateiname, suffix_for_new_filename, xaxis_title, yaxis_title):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title, yaxis_title))\n plotly.offline.plot(fig, filename=nwfile) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('Zusammenfassung_Renata_grep.csv'):\n print(dateiname)\n with open(dateiname) as fd:\n df = pd.read_csv(fd, index_col=0, header=0, sep=';')\n # print(df)\n df2 = df.apply(lambda x: x / df['Frame 100'] * 100, axis=0)\n # print(df2)\n plotly_zeitlVerlauf(df2, dateiname, suffix_for_new_filename, xaxis_title='Frame', yaxis_title='Intensity [a. u.]')\n", "id": "5552425", "language": "Python", "matching_score": 2.1272311210632324, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/allRenataGrepIn1 Normalisierung.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei mit Einzelaufnahme/ Einzelspektrum\noutput file: Graph mit Spektrum: Spektrum des Frames\n'''\n#written by <NAME>\n\n\nimport os\n\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom Ramanspektren.lib.allgemein import generate_filename\nfrom Ramanspektren.lib.plotlygraphen import plotly_Spectrum_2dscatter_layout\nfrom Ramanspektren.lib.xml_import import get_intensities\nfrom Ramanspektren.lib.xml_import import get_intensities\n\nsuffix_for_new_filename = '_Accumulationen.html'\nband_start = 1605\nband_end = 1630\n\n\ndef plotly_Spectrum_1Spektrum_2dscatter_data(takeData, framenumber):\n ind = intensities.index.values.tolist()\n thirdCol = takeData.values.tolist() # trace1 = go.Scatter(\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name=framenumber)\n return [trace3], ind\n\ndef plotly_Spectrum_1Spectrum(dateiname, suffix_for_new_filename, takeData):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_Spectrum_1Spektrum_2dscatter_data(takeData, 'Intensity [a. u.]')\n layout = plotly_Spectrum_2dscatter_layout(ind, xaxis_title='rel. Wavenumber [cm<sup>-1</sup>]', yaxis_title='Intensity [a. u.]', range_nr=[50, 2000], dtick_nr=200)\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.plot(fig, filename=nwfile) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('Acc.tvf') or dateiname.endswith('Acc.TVF'):\n print(dateiname)\n\n try:\n intensities = get_intensities(dateiname)\n plotly_Spectrum_1Spectrum(dateiname, suffix_for_new_filename, takeData=intensities['Intensity [a. u.]'])\n\n except:\n intensities = get_intensities(dateiname)\n intensities['mean'] = intensities.mean(axis=1)\n plotly_Spectrum_1Spectrum(dateiname, suffix_for_new_filename, takeData=intensities['mean'])\n", "id": "4407597", "language": "Python", "matching_score": 6.0037407875061035, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVistaAccumulationen to Plotly_Spektrum.py" }, { "content": "'''\nimput file: .tvf-TriVista-Datei mit Einzelaufnahme/ Einzelspektrum\noutput file: Graph mit Spektrum: Spektrum des Frames\n'''\n#written by <NAME>\n\n\nimport os\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom lib.allgemein import generate_filename\nfrom lib.plotlygraphen import plotly_Spectrum_2dscatter_layout\nfrom lib.xml_import import get_intensities\n\n\nsuffix_for_new_filename = '_1Spectrum.html'\n\n\n\ndef plotly_Spectrum_1Spektrum_2dscatter_data(intensities, framenumber):\n ind = intensities.index.values.tolist()\n thirdCol = intensities[framenumber].values.tolist() # trace1 = go.Scatter(\n showlegend = False\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n mode='lines',\n line=go.Line(color=\"#000000\", width=3),\n name=framenumber\n )\n return [trace3], ind\n\ndef plotly_Spectrum_1Spectrum(intensities, dateiname, suffix_for_new_filename):\n indexes = intensities.index.values.tolist()\n # print(indexes[0])\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_Spectrum_1Spektrum_2dscatter_data(intensities, 'Intensity [a. u.]')\n layout = plotly_Spectrum_2dscatter_layout(ind, xaxis_title='rel. Wavenumber [cm<sup>-1</sup>]', yaxis_title='Intensity [a. u.]', range_nr=[indexes[0], indexes[-1]], dtick_nr=(indexes[0]-indexes[-1])/15)\n fig = go.Figure(data=data, layout=layout)\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) # , image='png', image_filename=nwfile, image_width=800, image_height=430)\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n\n\n try:\n intensities = get_intensities(dateiname)\n plotly_Spectrum_1Spectrum(intensities, dateiname, suffix_for_new_filename)\n except:\n print('does not work')\n", "id": "577375", "language": "Python", "matching_score": 0.7191550731658936, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVistaEinzelaufnahme to Plotly_Spektrum.py" }, { "content": "\n'''\npunkte_baseline are the wavenumbers were the spectrum is taken and pulled down to the baseline.\nband_start: is the start of the interval where the script searches for the highest intensity. this highest intensity is than shown over time.\nband_end: is the end of that interval.\n'''\n\n\n'''\nimput file: .tvf-TriVista-File\noutput file: band intensity over time after baseline correction\n'''\n#written by <NAME>\n\n\nimport lib.analyte\n\n\n\n#suffix_for_new_filename = '_graphIntensityOverTime.csv'\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = punkte_baseline[0]\nband_end = punkte_baseline[1]\n\n\nimport os\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nimport scipy.signal\nimport pandas as pd\nfrom lib.allgemein import generate_filename\nimport Ramanspektren.lib.xml_import\nimport Ramanspektren.lib.baseline_corr\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n intensities = Ramanspektren.lib.xml_import.get_intensities(dateiname)\n times = Ramanspektren.lib.xml_import.get_times(dateiname)\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0,\n mode='nearest')\n intensities = pd.DataFrame(smoothed_intensities, index=intensities.index,\n columns=intensities.columns)\n intensities = intensities.ix[150:2000]\n intensities = intensities.apply(lambda x: x - x.min())\n\n df_intensities = pd.DataFrame(data=intensities.iloc[:, :], index=intensities.index,\n columns=[intensities.columns],\n copy=True)\n df_intensities.iloc[:] = intensities.iloc[:]\n\n liste = dateiname.split('_')\n # liste.insert(6, '100%buffer')\n liste = '_'.join(liste)\n\n if times.empty is False:\n all = times.append(df_intensities)\n all = all.fillna(0)\n\n else:\n # df = pd.DataFrame([{generate_filename(liste, '_w9_o1_s_pdD.csv')}], columns=['Intensity [a. u.]'], index=['filename'])\n # print(df)\n # df = df.transpose()\n # print(df)\n # print(df['filename'])\n # print(df_intensities)\n # df_intensities = df_intensities.transpose()\n # print(df_intensities)\n # # df_intensities.insert(0, 'filename', df['filename'].values.tolist())\n # print(df_intensities)\n # df_intensities = df_intensities.transpose()\n # print(df_intensities)\n all = df_intensities.rename(columns={\"Intensity [a. u.]\": str(generate_filename(liste, '_w9_o1_s_pdD.csv'))})\n # all = df.append(df_intensities)\n # all = all.fillna(0)\n # print(all)\n # print(df.iloc[0][0])\n # print(isinstance(df.iloc[0][0], tuple))\n # print(df_intensities.iloc[0][0])\n # print(isinstance(df_intensities.iloc[0][0], object))\n # all = df.append(df_intensities, ignore_index=True)\n #\n# print(df_intensities)\n # print('aslkdfj')\n\n all.to_csv(generate_filename(liste, '_w9_o1_s_pdD.csv'), sep=';')\n", "id": "2832600", "language": "Python", "matching_score": 6.569371223449707, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista Einzelaufnahme to primitive baselinecorr.py" }, { "content": "\n'''\npunkte_baseline are the wavenumbers were the spectrum is taken and pulled down to the baseline.\nband_start: is the start of the interval where the script searches for the highest intensity. this highest intensity is than shown over time.\nband_end: is the end of that interval.\n'''\n\n\n'''\nimput file: .tvf-TriVista-File\noutput file: band intensity over time after baseline correction\n'''\n#written by <NAME>\n\n\n\nimport os\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nimport scipy.signal\nimport pandas as pd\nfrom lib.allgemein import generate_filename\nimport lib.xml_import\nimport lib.baseline_corr\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n intensities = lib.xml_import.get_intensities(dateiname)\n times = lib.xml_import.get_times(dateiname)\n\n smoothed_intensities = scipy.signal.savgol_filter(intensities, window_length=9, polyorder=1, axis=0, mode='nearest')\n smoothed_intensities = pd.DataFrame(smoothed_intensities, index=intensities.index, columns=intensities.columns)\n # smoothed_intensities = smoothed_intensities.ix[150:2000]\n\n smoothed_intensities = smoothed_intensities.apply(lambda x: x - x.min())\n\n df_intensities = pd.DataFrame(data=smoothed_intensities.iloc[:, :], index=intensities.index, columns=[intensities.columns],\n copy=True)\n df_intensities.iloc[:] = smoothed_intensities.iloc[:]\n all = times.append(df_intensities)\n all = all.fillna(0)\n\n liste = dateiname.split('_')\n # liste.insert(6, '100%buffer')\n liste = '_'.join(liste)\n\n all.to_csv(generate_filename(liste, '_w9_o1_s_pdD.csv'), sep=';')\n\n", "id": "11137696", "language": "Python", "matching_score": 4.813826084136963, "max_stars_count": 0, "path": "Ramanspektren/tvf-TriVista to primitive baselinecorr.py" }, { "content": "'''\npunkte_baseline are the wavenumbers were the spectrum is taken and pulled down to the baseline.\nband_start: is the start of the interval where the script searches for the highest intensity. this highest intensity is than shown over time.\nband_end: is the end of that interval.\n'''\n\n\n'''\nimput file: .tvf-TriVista-File\noutput file: band intensity over time after baseline correction\n'''\n#written by <NAME>\n\n\nimport lib.analyte\n\n\npunkte_baseline = lib.analyte.kristallviolett_al_Raja()\nband_start = 1152\nband_end = 1215\n\n\nimport os\nimport pandas as pd\nfrom lib.xml_import import get_intensities\nfrom lib.xml_import import get_times\nfrom lib.allgemein import generate_filename\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n print(dateiname)\n intensities = get_intensities(dateiname)\n times = get_times(dateiname)\n df_intensities = pd.DataFrame(data=intensities.iloc[:,:], index=intensities.index, columns=[intensities.columns], copy=True)\n df_intensities.iloc[:] = intensities.iloc[:]\n all = times.append(df_intensities)\n all = all.fillna(0)\n all.to_csv(generate_filename(dateiname, '_justExport.csv'), sep=';')\n\n\n", "id": "8965196", "language": "Python", "matching_score": 1.325348138809204, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to everythingInExcel.py" }, { "content": "import os\nfrom lib.xml_import import get_positions\nfrom lib import analyte\nfrom lib.allgemein import generate_filename\nfrom lib.baseline_corr import baselinecorrection\nfrom lib.xml_import import get_intensities\nfrom lib.auswertung import compute_highest_intensity_within_interval\nimport pandas as pd\nimport regex as re\nfrom lib.plotlygraphen import plotly_xy_yFehler\n\nsuffix_for_new_filename = '_means.csv'\n# punkte_baseline = analyte.vier_mercaptobenzoesaeure()\npunkte_baseline = analyte.PMBA_nach_Kevin()\n\n\n\ndef werte_einlesen(punkte_baseline):\n data = pd.DataFrame.from_dict({'intensity': [0], 'parameter': [0]})\n print(os.getcwd())\n #dirpath, dirnames, filenames = \\\n for dirpath, dirnames, filenames in os.walk(os.getcwd()):\n # print(dirpath, dirnames, filenames)\n if 'lib' in dirnames:\n dirnames.remove('lib')\n if not dirpath.endswith('\\lib'):\n # print(dirpath, dirnames) #, filenames)\n pfad = dirpath.split('\\\\')\n messungparameter = '_'.join([pfad[-2],pfad[-1]])\n # print(messungparameter)\n for dateiname in os.listdir(dirpath):\n if dateiname.endswith('.tvf') or dateiname.endswith('.TVF'):\n # print(dirpath, dateiname)\n # print('_'.join(dateiname.split(' ')))\n # dst = '_'.join(dateiname.split(' '))\n # os.rename(os.path.join(dirpath, dateiname), os.path.join(dirpath, dst))\n intensities = get_intensities(os.path.join(dirpath, dateiname))\n # print(intensities)\n df_korregiert = baselinecorrection(intensities, punkte_baseline)\n # print(df_korregiert)\n highest_intensity_within_interval = compute_highest_intensity_within_interval(df_korregiert, punkte_baseline)\n # print(highest_intensity_within_interval.__dict__)\n # print(highest_intensity_within_interval.values[0])\n data = data.append(pd.DataFrame.from_dict({'parameter': [messungparameter], 'intensity': [highest_intensity_within_interval.values[0]]}), sort=True)\n return data # eingelesene werte\n\n\n\n\ndef generate_df_with_mean_and_std(data):\n # print(data)\n listofparams = []\n for i in data['parameter']:\n if i is not 0:\n # print(i)\n if i not in listofparams:\n listofparams.append(i)\n # print(listofparams)\n werte = {}\n for j in listofparams:\n subsetDataFrame = data[data['parameter'] == j]\n # print(subsetDataFrame)\n # print(subsetDataFrame.iloc[0, 0])\n # print(subsetDataFrame.iloc[0, 1])\n # print(subsetDataFrame.iloc[1, 0])\n # print(subsetDataFrame.iloc[:, 0])\n # print(subsetDataFrame.iloc[:, 0].mean())\n # print(subsetDataFrame.iloc[:, 0].std())\n # print(j)\n jlist = j.split('_')\n # print(jlist[1], jlist[3], re.sub('s', '', re.sub(',', '.', jlist[-1])))\n dictkeys = str(jlist[1]) + ' ' + str(jlist[3]) + ' ' + str(re.sub('s', '', re.sub(',', '.', jlist[-1])))\n # print(dictkeys)\n # print(dictkeys.split(' '))\n if dictkeys.split(' ')[1] == 'time':\n dictkeys = dictkeys + ' s'\n elif dictkeys.split(' ')[1] == 'conc':\n ratio = dictkeys.split(' ').pop()\n dictkeys = dictkeys + ' ' + str(float(ratio) * 69) + ' %'\n # print(dictkeys)\n werte[dictkeys] = [dictkeys.split(' ')[-2], dictkeys.split(' ')[-1], subsetDataFrame.iloc[:, 0].mean(), subsetDataFrame.iloc[:, 0].std()]\n # print(werte)\n df1 = pd.DataFrame(werte, index=['x_value', 'x_value_dim', 'mean', 'std'])\n df1 = df1.transpose()\n # print(df1)\n return df1\n\n\ndef safe_stuff(data, nwfile):\n data.to_csv(nwfile, sep=';')\n\n\ndef make3_df_from_one(nwfile):\n for datei in os.listdir():\n if datei.endswith('.cv') or datei.endswith(nwfile):\n # print(datei)\n df2 = pd.read_csv(datei, sep=';', index_col=0)\n # print(df2)\n dictwithdf = {'hno3time': df2[df2.index.str.startswith('HNO3 time')],\n 'hno3conc': df2[df2.index.str.startswith('HNO3 conc')],\n 'nh3time': df2[df2.index.str.startswith('NH4OH time')]}\n # print(dictwithdf)\n # print(hno3conc)\n # print(hno3time)\n # print(nh3time)\n return dictwithdf\n\n\ndef generate_graph(dictdf):\n for i in dictdf: # if i == 'nh3time':\n # print(i)\n # print(dictdf[i])\n # print(type(i))\n # print(dictdf[i].__dict__)\n # print(dictdf['hno3time'].__dict__)\n dictdf['hno3time'].xaxis_title = 'time [s]'\n dictdf['hno3time'].yaxis_title = 'intensity [a. u.]'\n dictdf['hno3time'].x_dtick = 15\n dictdf['hno3conc'].xaxis_title = 'concentration [%]'\n dictdf['hno3conc'].yaxis_title = 'intensity [a. u.]'\n dictdf['hno3conc'].x_dtick = 15\n dictdf['nh3time'].xaxis_title = 'time [s]'\n dictdf['nh3time'].yaxis_title = 'intensity [a. u.]'\n dictdf['nh3time'].x_dtick = 15\n\n # print(dictdf['hno3time'].__dict__)\n\n plotly_xy_yFehler(dictdf[i]['x_value'], dictdf[i]['mean'],\n errory=dictdf[i]['std'],\n # x_range=[0,dictdf[i]['x_value'].max()],\n x_dtick=dictdf['nh3time'].x_dtick,\n dateiname=str(i + '.html'), suffix_for_new_filename=suffix_for_new_filename,\n xaxis_title=dictdf[i].xaxis_title, yaxis_title=dictdf[i].yaxis_title)\n\n\n\ndata = werte_einlesen(punkte_baseline)\nprint(data)\nfilename = 'optimization'\nnwfile = generate_filename(filename, suffix_for_new_filename)\nsafe_stuff(data, nwfile)\ndf = generate_df_with_mean_and_std(data)\n# print(nwfile)\n# safe_stuff(df, nwfile)\n # print(df)\n # print(os.getcwd())\n\n# print(dictdf)\n\n# dictdf = make3_df_from_one(nwfile)\n# generate_graph(dictdf)\n", "id": "1203499", "language": "Python", "matching_score": 3.5992023944854736, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/tvf-TriVista to graph for target optimization.py" }, { "content": "import os\n# from plotly import graph_objs as go\nimport pandas as pd\nfrom lib.plotlygraphen import plotly_xy_yFehler\n\n\nsuffix_for_new_filename = '_xyGraph_yFehler.html'\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.csv') or dateiname.endswith('.CSV'):\n print(dateiname)\n with open(dateiname) as fd:\n\n\n df = pd.read_csv(fd, index_col=0, header=0, sep=';')\n print(df)\n\n x = df.index.values.tolist()\n y = df['intensity [a. u.]'].values.tolist()\n error_y = df['error [a. u.]'].values.tolist()\n\n # plotly_xy_yFehler(x_values=x, y_values=y, errory=error_y, dateiname=dateiname, suffix_for_new_filename=suffix_for_new_filename, xaxis_title='time [s]', yaxis_title='intensity [a. u.]')\n\n", "id": "8235489", "language": "Python", "matching_score": 0.20750924944877625, "max_stars_count": 0, "path": "Graphen/xy_yFehler.py" }, { "content": "#from Cython.Includes.numpy import __init__ as np\nimport numpy as np\nimport pandas as pd\n\n\ndef liste_in_floats_umwandeln(input):\n ft = []\n for i in input:\n k = np.float64(i)\n ft.append(k)\n return ft\n\ndef liste_in_string_umwandeln(input):\n ft = []\n for i in input:\n k = np.str(i)\n ft.append(k)\n return ft\n\ndef add_value_to_listelements(input, value):\n ft = []\n for i in input:\n k = i + value\n ft.append(k)\n return ft\n\ndef generate_filename(dateiname, suffix_for_new_filename):\n if '.' in dateiname:\n name = dateiname.split('.')\n del name[-1]\n separator = \".\"\n nwname = separator.join(name)\n nwfile = nwname + suffix_for_new_filename\n else:\n nwfile = dateiname + suffix_for_new_filename\n return nwfile\n\n\ndef innerFunktionOf_leave_every_other_datapoint_except_range(df, rangestart, rangeend):\n df2 = df.iloc[:rangestart]\n df3 = df.iloc[rangestart:rangeend]\n df4 = df.iloc[rangeend:]\n # print(df2)\n for index, row in df2.iterrows():\n if index == 1:\n df_a = pd.DataFrame(row)\n df_a = df_a.transpose()\n else:\n if index % 2 == 0:\n df_b = row\n df_a = df_a.append(df_b)\n df_a = df_a.append(df3)\n for index, row in df4.iterrows():\n if index == df4.index[0]:\n df_c = pd.DataFrame(row)\n df_c = df_c.transpose()\n else:\n if index % 2 == 0:\n df_b = row\n df_c = df_c.append(df_b)\n df_c = df_a.append(df_c)\n return df_c\n\n\ndef leave_every_other_datapoint_except_range(df, rangestart, rangeend):\n try:\n df_c = innerFunktionOf_leave_every_other_datapoint_except_range(df, rangestart, rangeend)\n except:\n df = df.set_index([list(range(1, len(df.index) + 1))])\n df_c = innerFunktionOf_leave_every_other_datapoint_except_range(df, rangestart, rangeend)\n return df_c\n\n\n\n# '_graphPositionen2D.html'\n# '_graphMapping.html'\n# '_graph3spektren.html'\n# '_neuesBSCorr.csv'\n# '_graphzeitlVerlaufForAll.html'\n# '_RegenerationVsVoltage.html'\n# '_graphzeitlVerlauf.html'\n# '_graphMappingIn2D.html'\n# '_SpectrumMitNiedrigsterIntensitaet.html'\n# '_SpectrumMitHoesterIntensitaet.html'\n# '_neuesRenataGrep.csv'\n# '_1Spectrum.html'\n", "id": "3037939", "language": "Python", "matching_score": 8.759172439575195, "max_stars_count": 0, "path": "Ramanspektren/lib/allgemein.py" }, { "content": "#from Cython.Includes.numpy import __init__ as np\nimport numpy as np\n\n\ndef liste_in_floats_umwandeln(input):\n ft = []\n for i in input:\n k = np.float64(i)\n ft.append(k)\n return ft\n\ndef liste_in_string_umwandeln(input):\n ft = []\n for i in input:\n k = np.str(i)\n ft.append(k)\n return ft\n\ndef generate_filename(dateiname, suffix_for_new_filename):\n name = dateiname.split('.')\n del name[-1]\n separator = \".\"\n nwname = separator.join(name)\n nwfile = nwname + suffix_for_new_filename\n return nwfile\n\n# '_graphPositionen2D.html'\n# '_graphMapping.html'\n# '_graph3spektren.html'\n# '_neuesBSCorr.csv'\n# '_graphzeitlVerlaufForAll.html'\n# '_RegenerationVsVoltage.html'\n# '_graphzeitlVerlauf.html'\n# '_graphMappingIn2D.html'\n# '_SpectrumMitNiedrigsterIntensitaet.html'\n# '_SpectrumMitHoesterIntensitaet.html'\n# '_neuesRenataGrep.csv'\n# '_1Spectrum.html'\n", "id": "11947146", "language": "Python", "matching_score": 1.7866902351379395, "max_stars_count": 0, "path": "Graphen/lib/allgemein.py" }, { "content": "import os\nimport pandas as pd\nfrom Ramanspektren.lib.allgemein import liste_in_floats_umwandeln\nfrom Ramanspektren.lib.xml_import import get_times\nfrom Ramanspektren.lib.plotlygraphen import plotly_y_dependent_of_x\n\n\nsuffix_for_new_filename = '_RegenerationVsVoltage.html'\n\n\n'''\nimput file: eine .csv-Datei Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\noutput file: schlichter Graph mit 2D-Scatterplot in Plotly\n\n'''\n\nfor dateiname in os.listdir():\n if dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200ProgStart.tvf') or dateiname.endswith('_beiF1Analyt_beiF20ElektrolythFlussAn_beiF200ProgStart.TVF'):\n # print(dateiname)\n times = get_times(dateiname)\n\n TimeVoltageOn = round(times['Frame 200']['time [s]'] + 100, 0)\n FrameVoltageOn = times[times.columns[times.ix['time [s]'] > TimeVoltageOn - 1]].columns[0]\n# break\n\nfor dateiname in os.listdir():\n if dateiname.endswith('usammenfassung_Renata_grep.csv') or dateiname.endswith('usammenfassung_Renata_grep.CSV'):\n print(dateiname)\n with open(dateiname) as fd:\n df = pd.read_csv(fd, index_col=0, header=0, sep=';')\n df2 = df.apply(lambda x: x / df[FrameVoltageOn] * 100, axis=0) # Normalisierung\n\n voltage = []\n for k in df2.index:\n nr = k.split('_')\n nr = nr[6]\n nr = nr[:-1]\n nr = nr.replace(',', '.')\n voltage.append(nr)\n x_values = voltage\n x_values = liste_in_floats_umwandeln(x_values)\n # print(x_values)\n\n interval = df2.ix[:, FrameVoltageOn:]\n y_values = 100 - interval.min(axis=1)\n # print(y_values)\n print(y_values)\n df2['signal decrease [%]'] = y_values\n print(df2)\n df2.to_csv('Zusammenfassung_Renata_grep_mitSignalDecrease.csv', sep=';')\n\n plotly_y_dependent_of_x(x_values, y_values, dateiname, suffix_for_new_filename,\n x_range=None, y_range=[0, 105],\n x_dtick=2.5, y_dtick=10,\n xaxis_title='U [V]', yaxis_title='signal decrease [%]')\n", "id": "2353477", "language": "Python", "matching_score": 5.320008754730225, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/allRenataGrepIn1 to Plotly_amount of Regeneration_depends_on_voltage for Plotly_6SpektrenIn1Graph..py" }, { "content": "import os\nimport pandas as pd\nfrom Ramanspektren.lib.allgemein import liste_in_floats_umwandeln\nfrom Ramanspektren.lib.xml_import import get_times\nfrom Ramanspektren.lib.plotlygraphen import plotly_y_dependent_of_x\nfrom Ramanspektren.lib.auswertung import savitzkygolay_for_pandas\nfrom Ramanspektren.lib.auswertung import savitzkygolay_for_malgucken\n\n\nsuffix_for_new_filename = '_RegenerationVsVoltage.html'\n\n\n'''\nimput file: eine .csv-Datei Datei mit zeitl Verlauf in einer Tabelle nach baseline korrektur\noutput file: schlichter Graph mit 2D-Scatterplot in Plotly\n\n'''\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('usammenfassung_Renata_grep_und_Stromzeug.csv') or dateiname.endswith('usammenfassung_Renata_grep_und_Stromzeug.CSV'):\n print(dateiname)\n with open(dateiname) as fd:\n df = pd.read_csv(fd, index_col=0, header=0, sep=';')\n\n savgol = df.ix[:, 'Frame 1':'Frame 400'].apply(lambda x: savitzkygolay_for_malgucken(x, window_length=21, polyorder=4), axis=1) # Normalisierung\n\n normal_savgol = savgol.apply(lambda x: x / savgol['Frame 200'] * 100, axis=0)\n\n x_values = df['in chip resistance [Ohm]']\n # x_values = liste_in_floats_umwandeln(x_values)\n print(x_values)\n\n interval = normal_savgol.ix[:, 'Frame 200':]\n y_values = 100 - interval.min(axis=1)\n print(y_values)\n\n plotly_y_dependent_of_x(x_values, y_values, dateiname, suffix_for_new_filename='_RegVsResistance.html',\n x_range=None, y_range=[0, 105],\n x_dtick=5000, y_dtick=10,\n xaxis_title='resistance [Ohm]', yaxis_title='signal decrease [%]')\n", "id": "4250875", "language": "Python", "matching_score": 1.318771481513977, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/allRenataGrepIn1 to Plotly_amount of Regeneration_depends_on_resistance.py" }, { "content": "'''\nimput file: .csv-Datei aus Osziloskop\n'''\n\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport decimal\nimport plotly\nimport plotly.graph_objs as go # import Scatter, Layout\nfrom Ramanspektren.lib.allgemein import generate_filename\nimport scipy.signal\n\n\n\nused_resistor = 1000 # in [Ohm]\nsuffix_for_new_filename = '_Smooth_SpannunngBzwStromGgZeit.html'\n\n\ndef get_voltage(df):\n nr = df.split('_')\n # print(nr)\n nr = nr[2]\n # nr = nr.split('.')\n # nr = nr[0]\n nr = nr[:-1]\n # print(nr)\n nr = nr.replace(',', '.')\n voltage = float(nr)\n # print(voltage)\n return voltage\n\n\ndef plotly_zeitlVerlauf_2dscatter_data(df):\n ind = df.index.values.tolist()\n #print(ind, ind[-1], len(ind) )\n # print(df)\n # firstCol = df['measured voltage [V] smoothed'].values.tolist()\n secondCol = df['current [µA]'].values.tolist()\n # thirdCol = df['in chip voltage [V]'].values.tolist()\n # fourthCol = df['measured voltage [V]'].values.tolist()\n\n # secondCol = list(map(abs, (df['Ch. A Current (uA)'] - df['Ch. B Current (uA)'])))\n # thirdCol = (df['Ch. A Voltage (V)'] - df['Ch. B Voltage (V)']) / ((df['Ch. A Current (uA)'] - df['Ch. B Current (uA)'])/1000000).values.tolist()\n\n # wds = (df['Ch. A Voltage (V)'] - df['Ch. B Voltage (V)']) / (df['Ch. A Current (uA)'] - df['Ch. B Current (uA)'])\n\n\n # firstCol = df['Ch. A Voltage (V)'].values.tolist()\n # secondCol = df['Ch. A Current (uA)'].values.tolist()\n # thirdCol = df['Ch. B Voltage (V)'].values.tolist()\n # forthCol = df['Ch. B Current (uA)'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n # trace1 = go.Scatter(\n # x=ind,\n # y=firstCol,\n # yaxis='y2',\n # mode='lines',\n # line=go.Line(color=\"#0000FF\", width=1),\n # name='measured voltage [V] smoothed',\n # showlegend=True)\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n # yaxis='y1',\n mode='lines',\n line=go.Line(color=\"rgb(47,110,115)\", width=3),\n name='current [uA]',\n showlegend=True)\n # trace3 = go.Scatter(\n # x=ind,\n # y=thirdCol,\n # yaxis='y2',\n # mode='lines',\n # line=go.Line(color=\"#FF0000\", width=3),\n # name='in chip voltage [V]',\n # showlegend=True)\n # trace4 = go.Scatter(\n # x=ind,\n # y=fourthCol,\n # yaxis='y2',\n # mode='lines',\n # line=go.Line(color=\"#0099FF\", width=1),\n # name='measured voltage [V]',\n # showlegend=True)\n # trace4 = go.Scatter(\n # x=ind,\n # y=forthCol,\n # mode='lines',\n # line=go.Line(color=\"#FF6666\", width=3),\n # name='Ch. B Current (uA)',\n # showlegend=True)\n data = [trace2] #, trace1] # , trace3, trace4]\n return data, ind\n\ndef plotly_zeitlVerlauf_2dscatter_layout(\n ind, xaxis_title, yaxis_title, yaxis2_title, yrangestart=None, yrangestop=None, y2rangestart=None, y2rangestop=None, graphwidth=800):\n layout = go.Layout(\n autosize=False,\n width=graphwidth,\n height=430,\n showlegend=True,\n legend=dict(x=1.2, y=1),\n xaxis=dict(\n title='<b>' + xaxis_title + '</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial bold, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n # range=[0, ind[-1]],\n range=None,\n dtick=round((ind[-1]-ind[0]) / 10, -1)\n ),\n yaxis=dict(\n title='<b>' + yaxis_title + '</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=[yrangestart, yrangestop]\n ),\n # yaxis2=dict(\n # title='<b>' + yaxis2_title + '</b>',\n # overlaying='y',\n # side='right',\n # titlefont=dict(family='Arial, sans-serif',\n # size=20,\n # color='#000000'),\n # showticklabels=True,\n # tickangle=0,\n # tickfont=dict(family='Arial, sans-serif',\n # size=20,\n # color='#000000'),\n # showgrid=False,\n # showline=True,\n # linewidth=2,\n # zeroline=False,\n # autotick=True,\n # ticks='outside',\n # tick0=0,\n # ticklen=5,\n # tickwidth=1,\n # tickcolor='#FFFFFF',\n # range=[y2rangestart, y2rangestop]\n # ),\n )\n return layout\n\n\ndef plotly_zeitlVerlauf(df, dateiname):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n data, ind = plotly_zeitlVerlauf_2dscatter_data(df)\n fig = go.Figure(data=data, layout=plotly_zeitlVerlauf_2dscatter_layout(ind, xaxis_title='Time [s]', yaxis_title='Current (uA)', yaxis2_title='voltage (V)', yrangestart=None, yrangestop=None, graphwidth=900))\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) #, image='png', image_filename=nwfile) #, image_width=800, image_height=430)\n\n\ndef split_in_sections(data_to_cut, cutting_points, applyed_voltage):\n copy_data_to_cut = data_to_cut.copy()\n for i in range(0, len(cutting_points) - 1):\n if i == 0:\n points = [cutting_points[i], cutting_points[i + 1]]\n kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n a = bearbeitet.ix[cutting_points[i]:cutting_points[i + 1]]\n else:\n points = [cutting_points[i], cutting_points[i + 1]]\n kurvenabschnitt = copy_data_to_cut.ix[points[0]:points[1]]\n bearbeitet = bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage)\n b = bearbeitet.ix[cutting_points[i] + 1:cutting_points[i + 1]]\n a = a.append(b)\n ndata_to_cut = copy_data_to_cut - copy_data_to_cut + a\n ndata_to_cut = ndata_to_cut.fillna(0)\n return ndata_to_cut\n\n\ndef bearbeitung(kurvenabschnitt, cutting_points, i, applyed_voltage): # , [spectrum_values[i]: spectrum_values[i + 1]]):\n copy_kurvenabschnitt = kurvenabschnitt.copy()\n dataset = copy_kurvenabschnitt.ix[cutting_points[i] : cutting_points[i + 1]]\n dataset = applyed_voltage - abs(dataset)\n return dataset\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.csv') or dateiname.endswith('.CSV'):\n print(dateiname)\n with open(dateiname, 'r') as fd:\n try:\n applyed_voltage = get_voltage(dateiname)\n print(applyed_voltage)\n df = pd.read_csv(fd, sep=',', header=0, index_col=0, skiprows=16, names=['time [s]', 'measured voltage [V]', 'leer'])\n print(df)\n del df['leer']\n print(df1)\n df1 = df.replace('Null', np.nan)\n df1.dropna(axis=0, how='all', inplace=True)\n df2 = df1.apply(pd.to_numeric, errors='raise')\n # print(df2)\n df2['measured voltage [V] smoothed'] = scipy.signal.savgol_filter(df2, window_length=21, polyorder=1, axis=0, mode='nearest') # ggf auch polyorder=2 oder 3\n # print(df2)\n df2['current [A]'] = df2['measured voltage [V] smoothed']/used_resistor # voltage is the measured voltage\n df2['current [mA]'] = df2['current [A]'] * 1000\n df2['current [µA]'] = df2['current [mA]'] * 1000\n\n df2['in chip voltage [V]'] = split_in_sections(df2['measured voltage [V] smoothed'], [34, 145], applyed_voltage)\n interval = df2['in chip voltage [V]'].ix[0:50]\n maxi_voltage_in_chip = interval.max(axis=0)\n print(str(maxi_voltage_in_chip) + ' V')\n\n interval = df2['current [µA]'].ix[0:50]\n maxi_current_in_chip = interval.max(axis=0)\n print(str(maxi_current_in_chip) + ' µA')\n\n resistance_in_chip = maxi_voltage_in_chip * 1000000 / maxi_current_in_chip # resistance [Ohm]\n print(str(resistance_in_chip) + ' Ohm')\n\n # print(df2) # voltage is the voltage in the chip\n\n # wds.to_csv(dateiname + '_Widerstände.csv', sep=';', header=0)\n #\n # plotly_zeitlVerlauf(df2, dateiname)\n\n # for i in df2:\n # print(df2[i])\n #\n # df2[str(i) + ' smoothed'] = scipy.signal.savgol_filter(\n # df2[str(i)], window_length=21, polyorder=3, axis=0, mode='nearest')\n\n plotly_zeitlVerlauf(df2, dateiname)\n except:\n print('blubbi')", "id": "523983", "language": "Python", "matching_score": 7.657501220703125, "max_stars_count": 0, "path": "Oszilloskop/Oszi-Data to Plotly_smooth_SpannunngBzwStromGgZeit.py" }, { "content": "'''\nimput file: .trc-Datei aus LabSmith-Netzteil\n'''\n\n\nimport os\nimport pandas as pd\nimport regex as re\nimport numpy as np\nfrom scipy import polyfit,polyval,stats\nimport re\nfrom six import StringIO\nfrom decimal import *\nimport plotly.plotly as py\nimport plotly.graph_objs as go #import Scatter, Layout\nimport plotly\nimport plotly.offline as offline\n\n\ndef plotly_generate_filename_zeitlVerlauf(dateiname):\n name = dateiname.split('.')\n name.remove('trc')\n separator = \".\"\n nwname = separator.join(name)\n nwfile = nwname + '_graphzeitlVerlauf.html'\n return nwfile\n\ndef plotly_nach_zeiten_2dscatter_data(df):\n ind = df.index.values.tolist()\n #print(ind, ind[-1], len(ind) )\n firstCol = df['Ch. A Voltage (V)'].values.tolist()\n secondCol = df['Ch. A Current (uA)'].values.tolist()\n thirdCol = df['Ch. B Voltage (V)'].values.tolist()\n forthCol = df['Ch. B Current (uA)'].values.tolist()\n #print(firstCol)\n # for i in range(0, len(ind)):\n # ind[i] = i + 1\n trace1 = go.Scatter(\n x=ind,\n y=firstCol,\n yaxis='y2',\n mode='lines',\n line=go.Line(color=\"#0000FF\", width=3),\n name='Ch. A Voltage (V)',\n showlegend=True)\n trace2 = go.Scatter(\n x=ind,\n y=secondCol,\n mode='lines',\n line=go.Line(color=\"#FF0000\", width=3),\n name='Ch. A Current (uA)',\n showlegend=True)\n trace3 = go.Scatter(\n x=ind,\n y=thirdCol,\n yaxis='y2',\n mode='lines',\n line=go.Line(color=\"#0099FF\", width=3),\n name='Ch. B Voltage (V)',\n showlegend=True)\n trace4 = go.Scatter(\n x=ind,\n y=forthCol,\n mode='lines',\n line=go.Line(color=\"#FF6666\", width=3),\n name='Ch. B Current (uA)',\n showlegend=True)\n data = [trace1, trace2, trace3, trace4]\n return data, ind\n\ndef plotly_nach_zeiten_2dscatter_layout(ind):\n layout = go.Layout(\n autosize=False,\n #width=800,\n width=900,\n height=430,\n showlegend=True,\n legend=dict(x=1.2, y=1),\n yaxis=dict(title='<b>Current (uA)</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF'\n ),\n yaxis2=dict(title='<b>Voltage (V)</b>',\n overlaying='y',\n side='right',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=True,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF'\n ),\n xaxis=dict(title='<b>Time [s]</b>',\n titlefont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial bold, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n autotick=False,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=[0, ind[-1]],\n dtick=round(ind[-1] / 10, -1)\n ))\n return layout\n\n\ndef plotly_zeitlVerlauf(df, dateiname):\n nwfile = plotly_generate_filename_zeitlVerlauf(dateiname)\n data, ind = plotly_nach_zeiten_2dscatter_data(df)\n fig = go.Figure(data=data, layout=plotly_nach_zeiten_2dscatter_layout(ind))\n plotly.offline.plot(fig, filename=nwfile, auto_open=False) #, image='png', image_filename=nwfile) #, image_width=800, image_height=430)\n\n\n\n\n\n\nfor dateiname in os.listdir():\n if dateiname.endswith('.trc') or dateiname.endswith('.TRC'):\n print(dateiname)\n with open(dateiname, 'r') as fd:\n try:\n df = pd.read_csv(fd, sep='\\t', header=0, index_col=0)\n #print(df)\n plotly_zeitlVerlauf(df, dateiname)\n except:\n print('alles doof')", "id": "5472632", "language": "Python", "matching_score": 2.5021586418151855, "max_stars_count": 0, "path": "LabSmith-Stromlogger/DataImport.py" }, { "content": "# Get this figure: fig = py.get_figure(\"https://plot.ly/~IPython.Demo/3684/\")\n# Get this figure's data: data = py.get_figure(\"https://plot.ly/~IPython.Demo/3684/\").get_data()\n# Add data to this figure: py.plot(Data([Scatter(x=[1, 2], y=[2, 3])]), filename =\"Iris Data\", fileopt=\"extend\")\n# Get y data of first trace: y1 = py.get_figure(\"https://plot.ly/~IPython.Demo/3684/\").get_data()[0][\"y\"]\n\n# Get figure documentation: https://plot.ly/python/get-requests/\n# Add data documentation: https://plot.ly/python/file-options/\n\n# If you're using unicode in your file, you may need to specify the encoding.\n# You can reproduce this figure in Python with the following code!\n\n# Learn about API authentication here: https://plot.ly/python/getting-started\n# Find your api_key here: https://plot.ly/settings/api\n\nimport plotly.plotly as py\nimport plotly\nfrom plotly import graph_objs as go\nfrom plotly.graph_objs import *\n\n\ntrace1 = {\n \"x\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n \"y\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"942dd5\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace2 = {\n \"x\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n \"y\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"c29bea\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace3 = {\n \"x\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n \"y\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"3d6526\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace4 = {\n \"x\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n \"y\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"c4ce1f\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace5 = {\n \"x\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n \"y\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"13bcee\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace6 = {\n \"x\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n \"y\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"532124\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace7 = {\n \"x\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n \"y\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"3adf2c\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y2\"\n}\ntrace8 = {\n \"x\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n \"y\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"a91497\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y2\"\n}\ntrace9 = {\n \"x\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n \"y\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"1f9d0d\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y2\"\n}\ntrace10 = {\n \"x\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n \"y\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"e1876d\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y3\"\n}\ntrace11 = {\n \"x\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n \"y\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"f8fc72\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y3\"\n}\ntrace12 = {\n \"x\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n \"y\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"bfbfa3\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y3\"\n}\ntrace13 = {\n \"x\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n \"y\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"f2eabb\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace14 = {\n \"x\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n \"y\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"8f253d\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace15 = {\n \"x\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n \"y\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"c063f6\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace16 = {\n \"x\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n \"y\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"d48e3a\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace17 = {\n \"x\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n \"y\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"9c1f01\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace18 = {\n \"x\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n \"y\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"ec780b\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y\"\n}\ntrace19 = {\n \"x\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n \"y\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"8ec916\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y2\"\n}\ntrace20 = {\n \"x\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n \"y\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"cf0bba\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y2\"\n}\ntrace21 = {\n \"x\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n \"y\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"e00102\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y2\"\n}\ntrace22 = {\n \"x\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n \"y\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"4e091a\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y3\"\n}\ntrace23 = {\n \"x\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n \"y\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"aad08e\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y3\"\n}\ntrace24 = {\n \"x\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n \"y\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"a45457\",\n \"xaxis\": \"x\",\n \"yaxis\": \"y3\"\n}\ntrace25 = {\n \"x\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n \"y\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"431578\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y\"\n}\ntrace26 = {\n \"x\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n \"y\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"b68390\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y\"\n}\ntrace27 = {\n \"x\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n \"y\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"008418\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y\"\n}\ntrace28 = {\n \"x\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n \"y\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"4332db\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y\"\n}\ntrace29 = {\n \"x\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n \"y\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"bd5eb9\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y\"\n}\ntrace30 = {\n \"x\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n \"y\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"a0f680\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y\"\n}\ntrace31 = {\n \"x\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n \"y\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"7b50ba\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y2\"\n}\ntrace32 = {\n \"x\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n \"y\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"851d88\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y2\"\n}\ntrace33 = {\n \"x\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n \"y\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"f59103\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y2\"\n}\ntrace34 = {\n \"x\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n \"y\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"026e6c\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y3\"\n}\ntrace35 = {\n \"x\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n \"y\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"6c4ca4\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y3\"\n}\ntrace36 = {\n \"x\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n \"y\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"7f86ee\",\n \"xaxis\": \"x2\",\n \"yaxis\": \"y3\"\n}\ntrace37 = {\n \"x\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n \"y\": [1.4, 1.4, 1.3, 1.5, 1.4, 1.7, 1.4, 1.5, 1.4, 1.5, 1.5, 1.6, 1.4, 1.1, 1.2, 1.5, 1.3, 1.4, 1.7, 1.5, 1.7, 1.5, 1, 1.7, 1.9, 1.6, 1.6, 1.5, 1.4, 1.6, 1.6, 1.5, 1.5, 1.4, 1.5, 1.2, 1.3, 1.4, 1.3, 1.5, 1.3, 1.3, 1.3, 1.6, 1.9, 1.4, 1.6, 1.4, 1.5, 1.4],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"52f36d\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y\"\n}\ntrace38 = {\n \"x\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n \"y\": [4.7, 4.5, 4.9, 4, 4.6, 4.5, 4.7, 3.3, 4.6, 3.9, 3.5, 4.2, 4, 4.7, 3.6, 4.4, 4.5, 4.1, 4.5, 3.9, 4.8, 4, 4.9, 4.7, 4.3, 4.4, 4.8, 5, 4.5, 3.5, 3.8, 3.7, 3.9, 5.1, 4.5, 4.5, 4.7, 4.4, 4.1, 4, 4.4, 4.6, 4, 3.3, 4.2, 4.2, 4.2, 4.3, 3, 4.1],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"3331d0\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y\"\n}\ntrace39 = {\n \"x\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n \"y\": [6, 5.1, 5.9, 5.6, 5.8, 6.6, 4.5, 6.3, 5.8, 6.1, 5.1, 5.3, 5.5, 5, 5.1, 5.3, 5.5, 6.7, 6.9, 5, 5.7, 4.9, 6.7, 4.9, 5.7, 6, 4.8, 4.9, 5.6, 5.8, 6.1, 6.4, 5.6, 5.1, 5.6, 6.1, 5.6, 5.5, 4.8, 5.4, 5.6, 5.1, 5.1, 5.9, 5.7, 5.2, 5, 5.2, 5.4, 5.1],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"57bbb9\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y\"\n}\ntrace40 = {\n \"x\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n \"y\": [0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3, 0.2, 0.2, 0.1, 0.2, 0.2, 0.1, 0.1, 0.2, 0.4, 0.4, 0.3, 0.3, 0.3, 0.2, 0.4, 0.2, 0.5, 0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.4, 0.1, 0.2, 0.2, 0.2, 0.2, 0.1, 0.2, 0.2, 0.3, 0.3, 0.2, 0.6, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"797f1c\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y\"\n}\ntrace41 = {\n \"x\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n \"y\": [1.4, 1.5, 1.5, 1.3, 1.5, 1.3, 1.6, 1, 1.3, 1.4, 1, 1.5, 1, 1.4, 1.3, 1.4, 1.5, 1, 1.5, 1.1, 1.8, 1.3, 1.5, 1.2, 1.3, 1.4, 1.4, 1.7, 1.5, 1, 1.1, 1, 1.2, 1.6, 1.5, 1.6, 1.5, 1.3, 1.3, 1.3, 1.2, 1.4, 1.2, 1, 1.3, 1.2, 1.3, 1.3, 1.1, 1.3],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"b47d46\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y\"\n}\ntrace42 = {\n \"x\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n \"y\": [2.5, 1.9, 2.1, 1.8, 2.2, 2.1, 1.7, 1.8, 1.8, 2.5, 2, 1.9, 2.1, 2, 2.4, 2.3, 1.8, 2.2, 2.3, 1.5, 2.3, 2, 2, 1.8, 2.1, 1.8, 1.8, 1.8, 2.1, 1.6, 1.9, 2, 2.2, 1.5, 1.4, 2.3, 2.4, 1.8, 1.8, 2.1, 2.4, 2.3, 1.9, 2.3, 2.5, 2.3, 1.9, 2, 2.3, 1.8],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"9fb3d9\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y\"\n}\ntrace43 = {\n \"x\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n \"y\": [3.5, 3, 3.2, 3.1, 3.6, 3.9, 3.4, 3.4, 2.9, 3.1, 3.7, 3.4, 3, 3, 4, 4.4, 3.9, 3.5, 3.8, 3.8, 3.4, 3.7, 3.6, 3.3, 3.4, 3, 3.4, 3.5, 3.4, 3.2, 3.1, 3.4, 4.1, 4.2, 3.1, 3.2, 3.5, 3.6, 3, 3.4, 3.5, 2.3, 3.2, 3.5, 3.8, 3, 3.8, 3.2, 3.7, 3.3],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"e54613\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y2\"\n}\ntrace44 = {\n \"x\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n \"y\": [3.2, 3.2, 3.1, 2.3, 2.8, 2.8, 3.3, 2.4, 2.9, 2.7, 2, 3, 2.2, 2.9, 2.9, 3.1, 3, 2.7, 2.2, 2.5, 3.2, 2.8, 2.5, 2.8, 2.9, 3, 2.8, 3, 2.9, 2.6, 2.4, 2.4, 2.7, 2.7, 3, 3.4, 3.1, 2.3, 3, 2.5, 2.6, 3, 2.6, 2.3, 2.7, 3, 2.9, 2.9, 2.5, 2.8],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"450700\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y2\"\n}\ntrace45 = {\n \"x\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n \"y\": [3.3, 2.7, 3, 2.9, 3, 3, 2.5, 2.9, 2.5, 3.6, 3.2, 2.7, 3, 2.5, 2.8, 3.2, 3, 3.8, 2.6, 2.2, 3.2, 2.8, 2.8, 2.7, 3.3, 3.2, 2.8, 3, 2.8, 3, 2.8, 3.8, 2.8, 2.8, 2.6, 3, 3.4, 3.1, 3, 3.1, 3.1, 3.1, 2.7, 3.2, 3.3, 3, 2.5, 3, 3.4, 3],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"edc09c\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y2\"\n}\ntrace46 = {\n \"x\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n \"y\": [5.1, 4.9, 4.7, 4.6, 5, 5.4, 4.6, 5, 4.4, 4.9, 5.4, 4.8, 4.8, 4.3, 5.8, 5.7, 5.4, 5.1, 5.7, 5.1, 5.4, 5.1, 4.6, 5.1, 4.8, 5, 5, 5.2, 5.2, 4.7, 4.8, 5.4, 5.2, 5.5, 4.9, 5, 5.5, 4.9, 4.4, 5.1, 5, 4.5, 4.4, 5, 5.1, 4.8, 5.1, 4.6, 5.3, 5],\n # \"marker\": {\n # \"color\": \"rgb(31, 119, 180)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"setosa\",\n # \"type\": \"scatter\",\n # \"uid\": \"0fdd0b\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y3\"\n}\ntrace47 = {\n \"x\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n \"y\": [7, 6.4, 6.9, 5.5, 6.5, 5.7, 6.3, 4.9, 6.6, 5.2, 5, 5.9, 6, 6.1, 5.6, 6.7, 5.6, 5.8, 6.2, 5.6, 5.9, 6.1, 6.3, 6.1, 6.4, 6.6, 6.8, 6.7, 6, 5.7, 5.5, 5.5, 5.8, 6, 5.4, 6, 6.7, 6.3, 5.6, 5.5, 5.5, 6.1, 5.8, 5, 5.6, 5.7, 5.7, 6.2, 5.1, 5.7],\n # \"marker\": {\n # \"color\": \"rgb(255, 127, 14)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"versicolor\",\n # \"type\": \"scatter\",\n # \"uid\": \"cd5693\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y3\"\n}\ntrace48 = {\n \"x\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n \"y\": [6.3, 5.8, 7.1, 6.3, 6.5, 7.6, 4.9, 7.3, 6.7, 7.2, 6.5, 6.4, 6.8, 5.7, 5.8, 6.4, 6.5, 7.7, 7.7, 6, 6.9, 5.6, 7.7, 6.3, 6.7, 7.2, 6.2, 6.1, 6.4, 7.2, 7.4, 7.9, 6.4, 6.3, 6.1, 7.7, 6.3, 6.4, 6, 6.9, 6.7, 6.9, 5.8, 6.8, 6.7, 6.7, 6.3, 6.5, 6.2, 5.9],\n # \"marker\": {\n # \"color\": \"rgb(44, 160, 44)\",\n # \"line\": {\n # \"color\": \"rgb(255, 255, 255)\",\n # \"width\": 0.5\n # },\n # \"opacity\": 0.74,\n # \"size\": 8\n # },\n # \"mode\": \"markers\",\n # \"name\": \"virginica\",\n # \"type\": \"scatter\",\n # \"uid\": \"6e728b\",\n \"xaxis\": \"x3\",\n \"yaxis\": \"y3\"\n}\ndata = Data([trace1, trace2, trace3, trace4, trace5, trace6, trace7, trace8, trace9, trace10, trace11, trace12, trace13, trace14, trace15, trace16, trace17, trace18, trace19, trace20, trace21, trace22, trace23, trace24, trace25, trace26, trace27, trace28, trace29, trace30, trace31, trace32, trace33, trace34, trace35, trace36, trace37, trace38, trace39, trace40, trace41, trace42, trace43, trace44, trace45, trace46, trace47, trace48])\nlayout = {\n \"autosize\": False,\n \"bargap\": 0.2,\n \"bargroupgap\": 0,\n \"barmode\": \"group\",\n \"boxgap\": 0.3,\n \"boxgroupgap\": 0.3,\n \"boxmode\": \"overlay\",\n \"dragmode\": \"zoom\",\n \"font\": {\n \"color\": \"#444\",\n \"family\": \"'Open sans', verdana, arial, sans-serif\",\n \"size\": 12\n },\n \"height\": 568,\n \"hidesources\": False,\n \"hovermode\": \"x\",\n \"legend\": {\n \"x\": 1.02,\n \"y\": 1.30133928571,\n \"bgcolor\": \"#fff\",\n \"bordercolor\": \"#444\",\n \"borderwidth\": 0,\n \"font\": {\n \"color\": \"\",\n \"family\": \"\",\n \"size\": 0\n },\n \"traceorder\": \"normal\",\n \"xanchor\": \"left\",\n \"yanchor\": \"top\"\n },\n \"margin\": {\n \"r\": 80,\n \"t\": 100,\n \"autoexpand\": True,\n \"b\": 20,\n \"l\": 20,\n \"pad\": 0\n },\n \"paper_bgcolor\": \"#fff\",\n \"plot_bgcolor\": \"rgb(255, 255, 255)\",\n \"separators\": \".,\",\n \"showlegend\": False,\n \"title\": \"Iris flower data set\",\n \"titlefont\": {\n \"color\": \"rgb(67,67,67)\",\n \"family\": \"\",\n \"size\": 20\n },\n \"width\": 800,\n \"xaxis\": {\n \"anchor\": \"y\",\n \"autorange\": True,\n \"autotick\": True,\n \"domain\": [0.26, 0.48],\n \"dtick\": 2,\n \"exponentformat\": \"B\",\n \"gridcolor\": \"#eee\",\n \"gridwidth\": 1,\n \"linecolor\": \"#444\",\n \"linewidth\": 1,\n \"mirror\": False,\n \"nticks\": 0,\n \"overlaying\": False,\n \"position\": 0,\n \"range\": [-0.571539657854, 7.57153965785],\n \"rangemode\": \"normal\",\n \"showexponent\": \"all\",\n \"showgrid\": True,\n \"showline\": False,\n \"showticklabels\": True,\n \"tick0\": 0,\n \"tickangle\": \"auto\",\n \"tickcolor\": \"#444\",\n \"tickfont\": {\n \"color\": \"rgb(102,102,102)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"ticklen\": 5,\n \"ticks\": \"\",\n \"tickwidth\": 1,\n \"title\": \"petal width\",\n \"titlefont\": {\n \"color\": \"rgb(67, 67, 67)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"type\": \"linear\",\n \"zeroline\": False,\n \"zerolinecolor\": \"#444\",\n \"zerolinewidth\": 1\n },\n \"xaxis2\": {\n \"anchor\": \"y\",\n \"autorange\": True,\n \"autotick\": True,\n \"domain\": [0.52, 0.74],\n \"dtick\": 1,\n \"exponentformat\": \"B\",\n \"gridcolor\": \"#eee\",\n \"gridwidth\": 1,\n \"linecolor\": \"#444\",\n \"linewidth\": 1,\n \"mirror\": False,\n \"nticks\": 0,\n \"overlaying\": False,\n \"position\": 0,\n \"range\": [1.76298600311, 4.63701399689],\n \"rangemode\": \"normal\",\n \"showexponent\": \"all\",\n \"showgrid\": True,\n \"showline\": False,\n \"showticklabels\": True,\n \"tick0\": 0,\n \"tickangle\": \"auto\",\n \"tickcolor\": \"#444\",\n \"tickfont\": {\n \"color\": \"rgb(102,102,102)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"ticklen\": 5,\n \"ticks\": \"\",\n \"tickwidth\": 1,\n \"title\": \"sepal width\",\n \"titlefont\": {\n \"color\": \"rgb(67, 67, 67)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"type\": \"linear\",\n \"zeroline\": False,\n \"zerolinecolor\": \"#444\",\n \"zerolinewidth\": 1\n },\n \"xaxis3\": {\n \"anchor\": \"y\",\n \"autorange\": True,\n \"autotick\": True,\n \"domain\": [0.78, 1],\n \"dtick\": 1,\n \"exponentformat\": \"B\",\n \"gridcolor\": \"#eee\",\n \"gridwidth\": 1,\n \"linecolor\": \"#444\",\n \"linewidth\": 1,\n \"mirror\": False,\n \"nticks\": 0,\n \"overlaying\": False,\n \"position\": 0,\n \"range\": [3.94447900467, 8.25552099533],\n \"rangemode\": \"normal\",\n \"showexponent\": \"all\",\n \"showgrid\": True,\n \"showline\": False,\n \"showticklabels\": True,\n \"tick0\": 0,\n \"tickangle\": \"auto\",\n \"tickcolor\": \"#444\",\n \"tickfont\": {\n \"color\": \"rgb(102,102,102)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"ticklen\": 5,\n \"ticks\": \"\",\n \"tickwidth\": 1,\n \"title\": \"sepal length\",\n \"titlefont\": {\n \"color\": \"rgb(67, 67, 67)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"type\": \"linear\",\n \"zeroline\": False,\n \"zerolinecolor\": \"#444\",\n \"zerolinewidth\": 1\n },\n \"yaxis\": {\n \"anchor\": \"x\",\n \"autorange\": True,\n \"autotick\": True,\n \"domain\": [0.26, 0.48],\n \"dtick\": 2,\n \"exponentformat\": \"B\",\n \"gridcolor\": \"#eee\",\n \"gridwidth\": 1,\n \"linecolor\": \"#444\",\n \"linewidth\": 1,\n \"mirror\": False,\n \"nticks\": 0,\n \"overlaying\": False,\n \"position\": 0,\n \"range\": [-0.757775970726, 7.75777597073],\n \"rangemode\": \"normal\",\n \"showexponent\": \"all\",\n \"showgrid\": True,\n \"showline\": False,\n \"showticklabels\": True,\n \"tick0\": 0,\n \"tickangle\": \"auto\",\n \"tickcolor\": \"#444\",\n \"tickfont\": {\n \"color\": \"rgb(102,102,102)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"ticklen\": 5,\n \"ticks\": \"\",\n \"tickwidth\": 1,\n \"title\": \"petal width\",\n \"titlefont\": {\n \"color\": \"rgb(67, 67, 67)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"type\": \"linear\",\n \"zeroline\": False,\n \"zerolinecolor\": \"#444\",\n \"zerolinewidth\": 1\n },\n \"yaxis2\": {\n \"anchor\": \"x\",\n \"autorange\": True,\n \"autotick\": True,\n \"domain\": [0.52, 0.74],\n \"dtick\": 1,\n \"exponentformat\": \"B\",\n \"gridcolor\": \"#eee\",\n \"gridwidth\": 1,\n \"linecolor\": \"#444\",\n \"linewidth\": 1,\n \"mirror\": False,\n \"nticks\": 0,\n \"overlaying\": False,\n \"position\": 0,\n \"range\": [1.69725553974, 4.70274446026],\n \"rangemode\": \"normal\",\n \"showexponent\": \"all\",\n \"showgrid\": True,\n \"showline\": False,\n \"showticklabels\": True,\n \"tick0\": 0,\n \"tickangle\": \"auto\",\n \"tickcolor\": \"#444\",\n \"tickfont\": {\n \"color\": \"rgb(102,102,102)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"ticklen\": 5,\n \"ticks\": \"\",\n \"tickwidth\": 1,\n \"title\": \"sepal width\",\n \"titlefont\": {\n \"color\": \"rgb(67, 67, 67)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"type\": \"linear\",\n \"zeroline\": False,\n \"zerolinecolor\": \"#444\",\n \"zerolinewidth\": 1\n },\n \"yaxis3\": {\n \"anchor\": \"x\",\n # \"autorange\": True,\n # \"autotick\": True,\n \"domain\": [0.78, 1],\n \"dtick\": 1,\n \"exponentformat\": \"B\",\n \"gridcolor\": \"#eee\",\n \"gridwidth\": 1,\n \"linecolor\": \"#444\",\n \"linewidth\": 1,\n \"mirror\": False,\n \"nticks\": 0,\n \"overlaying\": False,\n \"position\": 0,\n \"range\": [3.84588330962, 8.35411669038],\n \"rangemode\": \"normal\",\n \"showexponent\": \"all\",\n \"showgrid\": True,\n \"showline\": False,\n \"showticklabels\": True,\n \"tick0\": 0,\n \"tickangle\": \"auto\",\n \"tickcolor\": \"#444\",\n \"tickfont\": {\n \"color\": \"rgb(102,102,102)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"ticklen\": 5,\n \"ticks\": \"\",\n \"tickwidth\": 1,\n \"title\": \"sepal length\",\n \"titlefont\": {\n \"color\": \"rgb(67, 67, 67)\",\n \"family\": \"\",\n \"size\": 0\n },\n \"type\": \"linear\",\n \"zeroline\": False,\n \"zerolinecolor\": \"#444\",\n \"zerolinewidth\": 1\n }\n}\nfig = Figure(data=data, layout=layout)\n\n\n# for dateiname in os.listdir():\n# if dateiname.endswith('_normalized.csv'):\n# print(dateiname)\n# with open(dateiname) as fd:\n# df = pd.read_csv(fd, index_col=0, header=1, sep=';')\n# # print(df)\n# df = Ramanspektren.lib.allgemein.leave_every_other_datapoint_except_range(df, 18, 21)\n# x = df.iloc[:, 0]\n# # print(x)\n# y = pd.DataFrame(df.iloc[:, 1:])\n# # print(y)\n# plotly_xy_yFehler(x_values=x, y_values=y, x_range=[0,50], y_range=[0,150], dateiname=dateiname, suffix_for_new_filename=suffix_for_new_filename, xaxis_title='time [s]', yaxis_title='norm. intensity [a. u.]', x_lables=True, y_lables=True, z_lables=True)\n\nplotly.offline.plot(fig)", "id": "9605765", "language": "Python", "matching_score": 3.321256637573242, "max_stars_count": 0, "path": "Ramanspektren/erstesPaper/graph zeitl Verlauf nebeneinander exampledata.py" }, { "content": "import plotly\nfrom plotly import graph_objs as go\nfrom lib.allgemein import generate_filename\n\n\ndef plotly_xy_yFehler_data(x_values, y_values, errorx_values, errory_values, errorx_ausan = False, errory_ausan = False):\n print(plotly.__version__)\n if errorx_values is not None:\n errorx_ausan = True\n if errory_values is not None:\n errory_ausan = True\n #print(y_values)\n trace = go.Scatter(\n x=x_values,\n y=y_values,\n error_x=dict(\n type='data',\n array=errorx_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errorx_ausan\n ),\n error_y=dict(\n type='data',\n array=errory_values,\n # thickness=1,\n # width=0,\n color='#000000',\n visible=errory_ausan\n ),\n mode='markers',\n marker=dict(\n sizemode='diameter',\n sizeref=1, #relative Größe der Marker\n sizemin=20,\n size=10,\n # color='#000000',\n # opacity=0.8,\n line=dict(color='rgb(166, 166, 166)',\n width=0)))\n data = [trace]\n return data\n\n\ndef plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick):\n layout = go.Layout(\n autosize=True,\n separators='. ',\n width=650,\n height=430,\n margin=dict(l=100),\n xaxis=dict(\n title='<b>' + xaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=False,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=x_range,\n # range=[0, 2.5],\n dtick=x_dtick,\n ),\n yaxis=dict(\n title='<b>' + yaxis_title + '</b>',\n titlefont=dict(family='Arial bold, sans-serif',\n size=20,\n color='#000000'),\n showticklabels=True,\n # tickangle=0,\n tickfont=dict(family='Arial, sans-serif',\n size=20,\n color='#000000'),\n # separatethousands=' ',\n # exponentformat='power',\n showgrid=False,\n showline=True,\n linewidth=2,\n zeroline=False,\n autotick=False,\n ticks='outside',\n tick0=0,\n ticklen=5,\n tickwidth=1,\n tickcolor='#FFFFFF',\n range=y_range,\n # range=[0, 105],\n dtick=y_dtick,\n ))\n return layout\n\n\ndef plotly_xy_yFehler(x_values, y_values, errorx=None, errory=None, dateiname=None, suffix_for_new_filename=None, x_range=None, y_range=None, x_dtick=None, y_dtick=None, xaxis_title='', yaxis_title=''):\n nwfile = generate_filename(dateiname, suffix_for_new_filename)\n fig = dict(data=plotly_xy_yFehler_data(x_values, y_values, errorx, errory),\n layout=plotly_xy_yFehler_layout(xaxis_title, yaxis_title, x_range, y_range, x_dtick, y_dtick))\n plotly.offline.plot(fig, filename=nwfile) #, image_filename=nwfile) #, image='png', image_width=1600, image_height=860)\n", "id": "9938084", "language": "Python", "matching_score": 3.275385618209839, "max_stars_count": 0, "path": "Ramanspektren/lib/plotlygraphen.py" } ]
5.305874
elmame
[ { "content": "from setuptools import find_packages, setup\nimport pathlib\n\n# The directory containing this file\nHERE = pathlib.Path(__file__).parent\n\n# The text of the README file\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name='scseirx',\n version='1.3.0',\n packages=find_packages(where='src'),\n package_dir={\"\": \"src\"},\n package_data={'scseirx':['data/nursing_home/*.bz2', 'data/school/*.bz2', 'img/*.png']},\n description='A simulation tool to explore the spread of COVID-19 in small communities such as nursing homes or schools via agent-based modeling (ABM) and the impact of prevention measures.',\n long_description=README,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/JanaLasser/agent_based_COVID_SEIRX\",\n author='<NAME>',\n author_email='<EMAIL>',\n license='MIT',\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\"],\n install_requires=[\n 'numpy>=1.19.2',\n 'scipy>=1.6.1',\n 'matplotlib==3.3.4',\n 'networkx>=2.5',\n 'mesa>=0.8.8.1',\n 'pandas>=1.2.3'],\n setup_requires=['pytest-runner'],\n tests_require=['pytest==6.2.2'],\n test_suite='tests'\n)\n", "id": "2792318", "language": "Python", "matching_score": 0.6188921928405762, "max_stars_count": 0, "path": "setup.py" }, { "content": "def check_test_type(var, tests):\n\tif var != None:\n\t\tassert type(var) == str, 'not a string'\n\t\tassert var in tests.keys(), 'unknown test type {}'.format(var)\n\treturn var\n\n\n\nclass Testing():\n\tdef __init__(self, model, diagnostic_test_type, \n\t\tpreventive_screening_test_type, follow_up_testing_interval,\n\t\tscreening_intervals, liberating_testing, \n\t\tK1_contact_types, verbosity):\n\n\t\tself.follow_up_testing_interval = follow_up_testing_interval\n\t\tself.screening_intervals = screening_intervals\n\t\tself.liberating_testing = liberating_testing\n\t\tself.model = model\n\t\tself.verbosity = verbosity\n\t\tself.K1_contact_types = K1_contact_types\n\n\t\t# mean parameters for exposure and infection duration to base\n\t\t# estimates for test detection thresholds on\n\t\texposure_duration = 4\n\t\tinfection_duration = 11\n\n\t\tself.tests = {\n\t\t'same_day_antigen':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.1':\n\t {\n\t 'sensitivity':0.1,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.2':\n\t {\n\t 'sensitivity':0.2,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.3':\n\t {\n\t 'sensitivity':0.3,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.4':\n\t {\n\t 'sensitivity':0.4,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.5':\n\t {\n\t 'sensitivity':0.5,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.6':\n\t {\n\t 'sensitivity':0.6,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.7':\n\t {\n\t 'sensitivity':0.7,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.8':\n\t {\n\t 'sensitivity':0.8,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t 'same_day_antigen0.9':\n\t {\n\t 'sensitivity':0.9,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':0\n\t },\n\t\t'one_day_antigen':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':1\n\t },\n\t\t'two_day_antigen':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration + 2,\n\t 'time_testable':exposure_duration + 6,\n\t 'time_until_test_result':2\n\t },\n\t 'same_day_PCR':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':4,\n\t 'time_testable':infection_duration,\n\t 'time_until_test_result':0\n\t },\n\t 'one_day_PCR':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':4,\n\t 'time_testable':infection_duration,\n\t 'time_until_test_result':1\n\t },\n\t 'two_day_PCR':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':4,\n\t 'time_testable':infection_duration,\n\t 'time_until_test_result':2\n\t },\n\t 'same_day_LAMP':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration,\n\t 'time_testable':infection_duration,\n\t 'time_until_test_result':0\n\t },\n\t 'one_day_LAMP':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration,\n\t 'time_testable':infection_duration,\n\t 'time_until_test_result':1\n\t },\n\t 'two_day_LAMP':\n\t {\n\t 'sensitivity':1,\n\t 'specificity':1,\n\t 'time_until_testable':exposure_duration,\n\t 'time_testable':infection_duration,\n\t 'time_until_test_result':2\n\t }\n\t }\n\n\t\tself.diagnostic_test_type = check_test_type(diagnostic_test_type, self.tests)\n\t\tself.preventive_screening_test_type = check_test_type(preventive_screening_test_type, self.tests)\n\t\t#self.sensitivity = self.tests[self.test_type]['sensitivity']\n\t\t#self.specificity = self.tests[self.test_type]['specificity']\n\t\t#self.time_until_testable = self.tests[self.test_type]['time_until_testable']\n\t\t#self.time_testable = self.tests[self.test_type]['time_testable']\n\t\t#self.time_until_test_result = self.tests[self.test_type]['time_until_test_result']\n\n\n\n\n", "id": "9837133", "language": "Python", "matching_score": 2.3756818771362305, "max_stars_count": 0, "path": "src/scseirx/testing_strategy.py" }, { "content": "import numpy as np\nimport networkx as nx\nfrom math import gamma\nfrom scipy.optimize import root_scalar\n\nfrom mesa import Model\nfrom mesa.time import RandomActivation, SimultaneousActivation\nfrom mesa.datacollection import DataCollector\n\nfrom scseirx.testing_strategy import Testing\n\n## data collection functions ##\ndef get_N_diagnostic_tests(model):\n return model.number_of_diagnostic_tests\n\n\ndef get_N_preventive_screening_tests(model):\n return model.number_of_preventive_screening_tests\n\n\ndef get_infection_state(agent):\n if agent.exposed == True: return 'exposed'\n elif agent.infectious == True: return 'infectious'\n elif agent.recovered == True: return 'recovered'\n else: return 'susceptible'\n\n\ndef get_quarantine_state(agent):\n if agent.quarantined == True: return True\n else: return False\n\n\ndef get_undetected_infections(model):\n return model.undetected_infections\n\n\ndef get_predetected_infections(model):\n return model.predetected_infections\n\n\ndef get_pending_test_infections(model):\n return model.pending_test_infections\n\n\ndef get_diagnostic_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['student']\n\n\ndef get_diagnostic_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]['teacher']\n\n\ndef get_diagnostic_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.diagnostic_test_type]\\\n ['family_member']\n\n\ndef get_preventive_test_detected_infections_student(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]\\\n ['student']\n\n\ndef get_preventive_test_detected_infections_teacher(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]\\\n ['teacher']\n\n\ndef get_preventive_test_detected_infections_family_member(model):\n return model.positive_tests[model.Testing.preventive_screening_test_type]\\\n ['family_member']\n\n\n# parameter sanity check functions\n\n\ndef check_positive(var):\n\tassert var >= 0, 'negative number'\n\treturn var\n\n\ndef check_bool(var):\n\tassert type(var) == bool, 'not a bool'\n\treturn var\n\n\ndef check_positive_int(var):\n if var == None:\n return var\n assert type(var) == int, 'not an integer'\n assert var >= 0, 'negative number'\n return var\n\n\ndef check_contact_type_dict(var):\n\tassert type(var) == dict, 'not a dictionary'\n\tassert set(var.keys()).issubset({'very_far', 'far', 'intermediate', 'close'}), \\\n\t\t'does not contain the correct contact types (has to be very_far, far, intermediate or close)'\n\tassert all((isinstance(i, int) or isinstance(i, float)) for i in var.values()), \\\n\t\t'contact type weights are not numeric'\n\n\treturn var\n\n\ndef check_K1_contact_types(var):\n for area in var:\n assert area in ['very_far', 'far', 'intermediate',\n 'close'], 'K1 contact type not recognised'\n return var\n\n\ndef check_testing(var):\n assert var in ['diagnostic', 'background', 'preventive', False], \\\n 'unknown testing mode: {}'.format(var)\n\n return var\n\n\n\ndef check_probability(var):\n\tassert (type(var) == float) or (var == 0) or (var == 1), \\\n\t\t '{} not a float'.format(var)\n\tassert var >= 0, 'probability negative'\n\tassert var <= 1, 'probability larger than 1'\n\treturn var\n\n\ndef check_graph(var):\n assert type(var) in [nx.Graph, nx.MultiGraph], 'not a networkx graph'\n assert len(var.nodes) > 0, 'graph has no nodes'\n assert len(var.edges) > 0, 'graph has no edges'\n areas = [e[2]['contact_type'] for e in var.edges(data=True)]\n areas = set(areas)\n for a in areas:\n assert a in {'very_far', 'far', 'intermediate',\n 'close'}, 'contact type {} not recognised'.format(a)\n return var\n\n\ndef check_index_case(var, agent_types):\n\tallowed_strings = agent_types[:]\n\tallowed_strings.extend(['continuous'])\n\tassert var in allowed_strings, 'unknown index case mode'\n\treturn var\n\n\ndef check_discount(var):\n if var['slope'] != None:\n assert var['slope'] <= 0, 'slope needs to be <= 0 or None'\n assert np.abs(var['slope']) <= 1, 'absolute value of slope needs to be <= 1'\n assert var['intercept'], 'intercept needs to be positive'\n assert var['intercept'], 'intercept needs to be <= 1'\n return var\n\n\ndef get_weibull_shape(k, mu, var):\n '''\n Calculates the shape parameter of a Weibull distribution, given its mean\n mu and its variance var\n '''\n return var / mu**2 - gamma(1 + 2/k) / gamma(1+1/k)**2 + 1\n\n\n\ndef get_weibull_scale(mu, k):\n '''\n Calculates the scale parameter of a Weibull distribution, given its mean\n mu and its shape parameter k\n '''\n return mu / gamma(1 + 1/k)\n\n\ndef weibull_two_param(shape, scale):\n '''\n A two-parameter Weibull distribution, based on numpy ramdon's single \n parameter distribution. We use this distribution in the simulation to draw\n random epidemiological parameters for agents from the given distribution\n See https://numpy.org/doc/stable/reference/random/generated/numpy.random.weibull.html\n '''\n return scale * np.random.weibull(shape)\n\n\nclass SEIRX(Model):\n '''\n A model with a number of different agents that reproduces\n the SEIRX dynamics of pandemic spread in a facility. Note:\n all times are set to correspond to days\n\n G: networkx undirected graph, interaction graph between agents. Edges have\n to have edge the edge attribute 'contact_type' specifying the closeness of \n contacts, which can be ['very far', 'far', 'intermediate' and 'close']. \n Nodes have to have the node attribute 'type' which specifies the agent type\n of the given node (for example 'student' or 'teacher' in a school scenario).\n In addition, nodes can have the attribute 'unit', which assigns them to a\n unit in space (for example a 'class' in a school scenario).\n\n verbosity: integer in [0, 1, 2], controls text output to std out to track\n simulation progress and transmission dynamics. Default = 0.\n\n testing, default = 'diagnostic' \n 'diagnostic': only diagnostic tests for symptomatic agents\n 'background': adds background screens of all agents after a positive \n diagnostic test\n 'preventive': adds preventive screens of agent groups in time \n intervals specified separately for each agent group in\n the variable 'screening_interval' \n\n infection_duration, default = 11 NOTE: includes the time an agent is exposed \n but not yet infectious at the beginning of an infection\n positive integer: mean or median of the infection duration in days\n list of two floats: mean and standard deviation of a distribution \n specifying the infection duration in days. These \n numbers will be used to construct a Weibull \n distribution from which the infection duration will \n be drawn for every agent individually\n\n exposure_duration, default = 4. Sets the time from transmission to becoming \n infectious\n positive integer: mean or median of the exposure duration in days\n list of two floats: mean and standard deviation of a distribution \n specifying the exposure duration in days. These \n numbers will be used to construct a Weibull \n distributoin from which the exposure duration will \n be drawn for every agent individually.\n\n time_until_symptoms, default = 6. Sets the time from transmission to \n (potentially) developing symptoms. Symptom probability has to be set for\n each agent group individually using the parameter 'symptom_probability'\n positive integer: mean or median of the time until symptoms in days\n list of two floats: mean and standard deviation of a distribution \n specifying the time until symptoms in days. These \n numbers will be used to construct a Weibull \n distribution from which the time until symptoms will\n be drawn for every agent individually.\n\n quarantine_duration, default = 14. Positive integer, sets the time a \n positively tested agent is quarantined in days\n\n infection_risk_contact_type_weights: dictionary of the form\n {'very_far':float, 'far':float, 'intermediate':float, 'close':float}\n that sets transmission risk multipliers for different contact types of\n agents specified in the contact network G. Default: {'very_far': 0.1,\n 'far': 0.5, 'intermediate': 1, 'close': 3}\n\n subclinical_modifier: default = 1.0. Float, modifies the infectiousness of \n asymptomatic cases. Example: if subclinical_modifier = 0.5, the \n infectiousness of an asymptomatic case will be reduced to 50%.\n\n K1_contact_types: list of strings from ['very_far', 'far', 'intermediate',\n 'close']. Definition of contact types for which agents are considered \n \"K1 contact persons\" if they had contact to a positively tested person wtith \n a specified contact intensity. Default = ['close'].\n\n diagnostic_test_type, default = 'one_day_PCR'. String, specifies the test \n technology and test result turnover time used for diagnostic testing. For \n example 'same_day_antigen' or 'two_day_PCR'. See module \"Testing\" for \n different implemented testing techologies.\n\n preventive_screening_test_type:, default = 'one_day_PCR', String, specifies \n the test technology and test result turnover time used for preventive \n sreening. For example 'same_day_antigen' or 'two_day_PCR'. See module \n \"Testing\" for different implemented testing techologies.\n\n follow_up_testing_interval, default = None. Positive integer, sets the time \n a follow-up screen (background screen) is initiated after an initial screen \n triggered by a positive test result. Only applies if the testing strategy is\n 'background' or preventive.\n\n liberating_testing, default = False. Boolean, flag that specifies, whether \n or not an agent is released from quarantine after returning a negative test \n result.\n\n\tindex_case, default = 'employee' (nursing home scenario) or 'teacher' \n (school scenario). Specifies how infections are introduced into the facility.\n agent_type: If an agent type (for example 'student' or 'teacher' in \n the school scenario) is specified, a single randomly\n chosen agent from this agent group will become the index\n case and no further index cases will be introduced into\n the scenario.\n 'continuous': In this case, agents have a continuous risk to become \n index cases in every simulation step. The risk has to\n be specified for every agent group individually, using\n the 'index_probability' parameter. If only a single\n agent group has a non-zero index probability, then only\n agents from this group can become index cases.\n\n\n\tagent_types: dictionary of the structure\n\t\t{\n\t\tagent type:\n\t\t\t{\n\t\t\tscreening interval : integer, number of days between each preventive\n\t\t\tscreen in this agent group\n\n\t\t\tindex probability : float in the range [0, 1], sets the probability\n\t\t\tto become an index case in each time step\n\n\t\t\tmask : bool\n whether or not the agent type is wearing a mask\n\t\t\t}\n\t\t}\n\n\tThe dictionary's keys are the names of the agent types which have to\n\tcorrespond to the node attributes in the contact graph. The screening\n\tinterval sets the time-delay between preventive screens of this agent group,\n\tthe index probability sets the probability of a member of this agent group\n\tbecoming an index case in every time step\n\n seed: positive integer, fixes the seed of the simulation to enable\n repeatable simulation runs. If seed = None, the simulation will be \n initialized at random.\n '''\n\n def __init__(self, G, \n verbosity = 0, \n base_transmission_risk = 0.05,\n testing='diagnostic',\n exposure_duration = [5.0, 1.9],\n time_until_symptoms = [6.4, 0.8],\n infection_duration = [10.91, 3.95], \n quarantine_duration = 10, \n subclinical_modifier = 0.6,\n infection_risk_contact_type_weights = {\n 'very_far': 0.1,\n 'far': 0.25,\n 'intermediate': 0.5,\n 'close': 1},\n K1_contact_types = ['close'],\n diagnostic_test_type = 'one_day_PCR',\n preventive_screening_test_type = 'same_day_antigen',\n follow_up_testing_interval = None,\n liberating_testing = False,\n index_case = 'teacher', \n agent_types = {\n 'teacher': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False},\n 'student': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False},\n 'family_member':{'screening_interval': None,\n 'index_probability': 0,\n 'mask':False}},\n age_transmission_risk_discount = \\\n {'slope':-0.02,\n 'intercept':1},\n age_symptom_discount = \\\n {'slope':-0.02545,\n 'intercept':0.854545},\n mask_filter_efficiency = {'exhale':0, 'inhale':0},\n transmission_risk_ventilation_modifier = 0,\n seed = None):\n\n # mesa models already implement fixed seeds through their own random\n # number generations. Sadly, we need to use the Weibull distribution\n # here, which is not implemented in mesa's random number generation\n # module. Therefore, we need to initialize the numpy random number\n # generator with the given seed as well\n if seed != None:\n np.random.seed(seed)\n\n # sets the (daily) transmission risk for a household contact without\n # any precautions. Target infection ratios are taken from literature\n # and the value of the base_transmission_risk is calibrated such that\n # the simulation produces the correct infection ratios in a household\n # setting with the given distributions for epidemiological parameters\n # of agents\n self.base_transmission_risk = base_transmission_risk\n \t# sets the level of detail of text output to stdout (0 = no output)\n self.verbosity = check_positive_int(verbosity)\n # flag to turn off the testing & tracing strategy\n self.testing = check_testing(testing)\n self.running = True # needed for the batch runner implemented by mesa\n # set the interaction mode to simultaneous activation\n self.schedule = SimultaneousActivation(self)\n\n\n # internal step counter used to launch screening tests\n self.Nstep = 0\n\n # since we may have weekday-specific contact networks, we need\n # to keep track of the day of the week. Since the index case\n # per default is introduced at step 0 in index case mode, we\n # need to offset the starting weekday by a random number of weekdays\n # to prevent artifacts from always starting on the same day of the week\n\n self.weekday_offset = self.random.randint(1, 8)\n self.weekday = self.Nstep + self.weekday_offset\n\n ## epidemiological parameters: can be either a single integer or the\n # mean and standard deviation of a distribution\n self.epi_params = {}\n # counter to track the number of pathological parameter combinations\n # that had to be re-rolled (only here for debugging and control reasons)\n self.param_rerolls = 0\n\n for param, param_name in zip([exposure_duration, time_until_symptoms,\n infection_duration],['exposure_duration', 'time_until_symptoms',\n 'infection_duration']):\n\n if isinstance(param, int):\n self.epi_params[param_name] = check_positive_int(param)\n\n elif isinstance(param, list) and len(param) == 2:\n\n mu = check_positive(param[0])\n var = check_positive(param[1]**2)\n shape = root_scalar(get_weibull_shape, args=(mu, var),\n method='toms748', bracket=[0.2, 500]).root\n scale = get_weibull_scale(mu, shape)\n\n self.epi_params[param_name] = [shape, scale] \n else:\n print('{} format not recognized, should be either a single '+\\\n 'int or a tuple of two positive numbers'.format(param_name)) \n\n\n # duration of quarantine\n self.quarantine_duration = check_positive_int(quarantine_duration)\n\n self.infection_risk_area_weights = check_contact_type_dict(\n infection_risk_contact_type_weights)\n\n # modifier for infectiosness for asymptomatic cases\n self.subclinical_modifier = check_positive(subclinical_modifier)\n # modifiers for the infection risk, depending on contact type\n self.infection_risk_contact_type_weights = infection_risk_contact_type_weights\n\n # discounts for age-dependent transmission and reception risks and\n # symptom probabilities\n self.age_transmission_risk_discount = \\\n check_discount(age_transmission_risk_discount)\n self.age_symptom_discount = \\\n check_discount(age_symptom_discount)\n\n self.mask_filter_efficiency = mask_filter_efficiency\n self.transmission_risk_ventilation_modifier = \\\n transmission_risk_ventilation_modifier\n\n ## agents and their interactions\n # interaction graph of agents\n self.G = check_graph(G)\n # add weights as edge attributes so they can be visualised easily\n if type(self.G) == nx.MultiGraph:\n for (u, v, key, contact_type) in self.G.edges(keys=True, \n data='contact_type'):\n self.G[u][v][key]['weight'] = \\\n self.infection_risk_contact_type_weights[contact_type]\n else:\n for e in G.edges(data=True):\n G[e[0]][e[1]]['weight'] = self.infection_risk_contact_type_weights\\\n \t[G[e[0]][e[1]]['contact_type']]\n\n # extract the different agent types from the contact graph\n self.agent_types = list(agent_types.keys())\n # dictionary of available agent classes with agent types and classes\n self.agent_classes = {}\n if 'resident' in agent_types:\n from scseirx.agent_resident import resident\n self.agent_classes['resident'] = resident\n if 'employee' in agent_types:\n from scseirx.agent_employee import employee\n self.agent_classes['employee'] = employee\n if 'student' in agent_types:\n from scseirx.agent_student import student\n self.agent_classes['student'] = student\n if 'teacher' in agent_types:\n from scseirx.agent_teacher import teacher\n self.agent_classes['teacher'] = teacher\n if 'family_member' in agent_types:\n from scseirx.agent_family_member import family_member\n self.agent_classes['family_member'] = family_member\n\n ## set agent characteristics for all agent groups\n # list of agent characteristics\n params = ['screening_interval','index_probability', 'mask',\n 'voluntary_testing_rate']\n\n # default values that are used in case a characteristic is not specified\n # for an agent group\n defaults = {'screening_interval':None,\n 'index_probability':0,\n 'mask':False,\n 'voluntary_testing_rate':1}\n\n # sanity checks that are applied to parameters passed to the class\n # constructor to make sure they conform to model expectations\n check_funcs = [check_positive_int, check_probability, check_bool,\n check_probability]\n\n # member dicts that store the parameter values for each agent group\n self.screening_intervals = {}\n self.index_probabilities = {}\n self.masks = {}\n self.voluntary_testing_rates = {}\n\n param_dicts = [self.screening_intervals, self.index_probabilities, \n self.masks, self.voluntary_testing_rates]\n\n # iterate over all possible agent parameters and agent groups: set the\n # respective value to the value passed through the constructor or to \n # the default value if no value has been passed\n for param,param_dict,check_func in zip(params,param_dicts,check_funcs):\n for at in self.agent_types:\n try:\n param_dict.update({at:check_func(agent_types[at][param])})\n except KeyError:\n param_dict.update({at:defaults[param]})\n\n # pass all parameters relevant for the testing strategy to the testing\n # class. NOTE: this separation is not a strictly necessary design \n # decision but I like to keep the parameters related to testing and \n # tracing in a separate place\n self.Testing = Testing(self, diagnostic_test_type,\n preventive_screening_test_type,\n check_positive_int(follow_up_testing_interval),\n self.screening_intervals,\n check_bool(liberating_testing),\n check_K1_contact_types(K1_contact_types),\n verbosity)\n\n\n # specifies either continuous probability for index cases in agent\n # groups based on the 'index_probability' for each agent group, or a\n # single (randomly chosen) index case in the passed agent group\n self.index_case = check_index_case(index_case, self.agent_types)\n\n self.num_agents = {}\n\n ## add agents\n # extract the agent nodes from the graph and add them to the scheduler\n for agent_type in self.agent_types:\n IDs = [x for x,y in G.nodes(data=True) if y['type'] == agent_type]\n self.num_agents.update({agent_type:len(IDs)})\n\n # get the agent locations (units) from the graph node attributes\n units = [self.G.nodes[ID]['unit'] for ID in IDs]\n for ID, unit in zip(IDs, units):\n\n tmp_epi_params = {}\n # for each of the three epidemiological parameters, check if\n # the parameter is an integer (if yes, pass it directly to the\n # agent constructor), or if it is specified by the shape and \n # scale parameters of a Weibull distribution. In the latter \n # case, draw a new number for every agent from the distribution\n # NOTE: parameters drawn from the distribution are rounded to\n # the nearest integer\n while True:\n for param_name, param in self.epi_params.items():\n if isinstance(param, int):\n tmp_epi_params[param_name] = param\n\n else:\n tmp_epi_params[param_name] = \\\n round(weibull_two_param(param[0], param[1]))\n\n if tmp_epi_params['exposure_duration'] > 0 and \\\n tmp_epi_params['time_until_symptoms'] >= \\\n tmp_epi_params['exposure_duration'] and\\\n tmp_epi_params['infection_duration'] > \\\n tmp_epi_params['exposure_duration']:\n break\n else:\n self.param_rerolls += 1\n if verbosity > 1:\n print('pathological epi-param case found!')\n print(tmp_epi_params)\n\n # check if the agent participates in voluntary testing\n p = self.voluntary_testing_rates[agent_type]\n voluntary_testing = np.random.choice([True, False],\n p=[p, 1-p])\n\n a = self.agent_classes[agent_type](ID, unit, self, \n tmp_epi_params['exposure_duration'], \n tmp_epi_params['time_until_symptoms'], \n tmp_epi_params['infection_duration'], \n voluntary_testing,\n verbosity)\n self.schedule.add(a)\n\n\t\t# infect the first agent in single index case mode\n if self.index_case != 'continuous':\n infection_targets = [\n a for a in self.schedule.agents if a.type == index_case]\n # pick a random agent to infect in the selected agent group\n target = self.random.randint(0, len(infection_targets) - 1)\n infection_targets[target].exposed = True\n if self.verbosity > 0:\n print('{} exposed: {}'.format(index_case,\n infection_targets[target].ID))\n \n\n # list of agents that were tested positive this turn\n self.newly_positive_agents = []\n # flag that indicates if there were new positive tests this turn\n self.new_positive_tests = False\n # dictionary of flags that indicate whether a given agent group has\n # been creened this turn\n self.screened_agents= {\n 'reactive':{agent_type: False for agent_type in self.agent_types},\n 'follow_up':{agent_type: False for agent_type in self.agent_types},\n 'preventive':{agent_type: False for agent_type in self.agent_types}}\n\n\n # dictionary of counters that count the days since a given agent group\n # was screened. Initialized differently for different index case modes\n if (self.index_case == 'continuous') or \\\n \t (not np.any(list(self.Testing.screening_intervals.values()))):\n \tself.days_since_last_agent_screen = {agent_type: 0 for agent_type in\n \tself.agent_types}\n # NOTE: if we initialize these variables with 0 in the case of a single\n # index case, we introduce a bias since in 'single index case mode' the\n # first index case will always become exposed in step 0. To realize\n # random states of the preventive sceening procedure with respect to the\n # incidence of the index case, we have to randomly pick the days since\n # the last screen for the agent group from which the index case is\n else:\n \tself.days_since_last_agent_screen = {}\n \tfor agent_type in self.agent_types:\n \t\tif self.Testing.screening_intervals[agent_type] != None:\n \t\t\tself.days_since_last_agent_screen.update({\n \t\t\t\tagent_type: self.random.choice(range(0,\n \t\t\t\t self.Testing.screening_intervals[agent_type] + 1))})\n \t\telse:\n \t\t\tself.days_since_last_agent_screen.update({agent_type: 0})\n\n # dictionary of flags that indicates whether a follow-up screen for a\n # given agent group is scheduled\n self.scheduled_follow_up_screen = {agent_type: False for agent_type in\n \tself.agent_types}\n\n # counters\n self.number_of_diagnostic_tests = 0\n self.number_of_preventive_screening_tests = 0\n self.positive_tests = {self.Testing.preventive_screening_test_type:\n {agent_type:0 for agent_type in self.agent_types},\n self.Testing.diagnostic_test_type:\n {agent_type:0 for agent_type in self.agent_types}}\n\n self.undetected_infections = 0\n self.predetected_infections = 0\n self.pending_test_infections = 0\n self.quarantine_counters = {agent_type:0 for agent_type in agent_types.keys()}\n self.false_negative = 0\n\n # data collectors to save population counts and agent states every\n # time step\n self.datacollector = DataCollector(\n model_reporters=\n \t{\n \t'N_diagnostic_tests':get_N_diagnostic_tests,\n 'N_preventive_screening_tests':get_N_preventive_screening_tests,\n 'diagnostic_test_detected_infections_student':\\\n get_diagnostic_test_detected_infections_student,\n 'diagnostic_test_detected_infections_teacher':\\\n get_diagnostic_test_detected_infections_teacher,\n 'diagnostic_test_detected_infections_family_member':\\\n get_diagnostic_test_detected_infections_family_member,\n 'preventive_test_detected_infections_student':\\\n get_preventive_test_detected_infections_student,\n 'preventive_test_detected_infections_teacher':\\\n get_preventive_test_detected_infections_teacher,\n 'preventive_test_detected_infections_family_member':\\\n get_preventive_test_detected_infections_family_member,\n 'undetected_infections':get_undetected_infections,\n 'predetected_infections':get_predetected_infections,\n 'pending_test_infections':get_pending_test_infections\n },\n\n agent_reporters=\n \t{\n \t'infection_state': get_infection_state,\n 'quarantine_state': get_quarantine_state\n })\n\n\n ## transmission risk modifiers\n def get_transmission_risk_contact_type_modifier(self, source, target):\n # construct the edge key as combination between agent IDs and weekday\n n1 = source.ID\n n2 = target.ID\n tmp = [n1, n2]\n tmp.sort()\n n1, n2 = tmp\n key = n1 + n2 + 'd{}'.format(self.weekday)\n contact_weight = self.G.get_edge_data(n1, n2, key)['weight']\n\n # the link weight is a multiplicative modifier of the link strength.\n # contacts of type \"close\" have, by definition, a weight of 1. Contacts\n # of type intermediate, far or very far have a weight < 1 and therefore\n # are less likely to transmit an infection. For example, if the contact\n # type far has a weight of 0.2, a contact of type far has only a 20% \n # chance of transmitting an infection, when compared to a contact of\n # type close. To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of success)\n # by the modifications introduced by preventive measures. These\n # modifications are formulated in terms of \"probability of failure\", or\n # \"q\". A low contact weight has a high probability of failure, therefore\n # we return q = 1 - contact_weight here.\n q1 = 1 - contact_weight\n\n return q1\n\n\n def get_transmission_risk_age_modifier_transmission(self, source):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = source.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n\n # The age weight can be interpreted as multiplicative factor that \n # reduces the chance for transmission with decreasing age. The slope\n # of the age_transmission_discount function is the decrease (in % of \n # the transmission risk for an 18 year old or above) of transmission\n # risk with every year a person is younger than 18 (the intercept is\n # 1 by definition). \n # To calculate the probability of success p in the Bernoulli\n # trial, we need to reduce the base risk (or base probability of success)\n # by the modifications introduced by preventive measures. These\n # modifications are formulated in terms of \"probability of failure\", or\n # \"q\". A low age weight has a high probability of failure, therefore\n # we return q = 1 - age_weight here.\n q2 = 1 - age_weight\n else:\n q2 = 0\n\n return q2\n\n\n def get_transmission_risk_age_modifier_reception(self, target):\n '''linear function such that at age 18 the risk is that of an adult (=1).\n The slope of the line needs to be calibrated.\n '''\n age = target.age\n max_age = 18\n if age <= max_age:\n age_weight = self.age_transmission_risk_discount['slope'] * \\\n np.abs(age - max_age) + self.age_transmission_risk_discount['intercept']\n # see description in get_transmission_risk_age_modifier_transmission\n q3 = 1 - age_weight\n else:\n q3 = 0\n\n return q3\n\n\n # infectiousness is constant and high until symptom onset and then\n # decreases monotonically until agents are not infectious anymore \n # at the end of the infection_duration \n def get_transmission_risk_progression_modifier(self, source):\n if source.days_since_exposure < source.exposure_duration:\n progression_weight = 0\n elif source.days_since_exposure <= source.time_until_symptoms:\n progression_weight = 1\n elif source.days_since_exposure > source.time_until_symptoms and \\\n source.days_since_exposure <= source.infection_duration:\n # we add 1 in the denominator, such that the source is also \n # (slightly) infectious on the last day of the infection_duration\n progression_weight = \\\n (source.days_since_exposure - source.time_until_symptoms) / \\\n (source.infection_duration - source.time_until_symptoms + 1)\n else:\n progression_weight = 0\n # see description in get_transmission_risk_age_modifier_transmission\n q4 = 1 - progression_weight\n \n return q4\n\n def get_transmission_risk_subclinical_modifier(self, source):\n if source.symptomatic_course == False:\n subclinical_weight = self.subclinical_modifier\n else:\n subclinical_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q5 = 1 - subclinical_weight\n return q5\n\n def get_transmission_risk_exhale_modifier(self, source):\n if source.mask:\n exhale_weight = self.mask_filter_efficiency['exhale']\n else:\n exhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q6 = 1 - exhale_weight\n return q6\n\n\n def get_transmission_risk_inhale_modifier(self, target):\n if target.mask:\n inhale_weight = self.mask_filter_efficiency['inhale']\n else:\n inhale_weight = 1\n # see description in get_transmission_risk_age_modifier_transmission\n q7 = 1 - inhale_weight\n return q7\n\n\n def get_transmission_risk_ventilation_modifier(self):\n ventilation_weight = self.transmission_risk_ventilation_modifier\n # see description in get_transmission_risk_age_modifier_transmission\n q8 = 1 - ventilation_weight\n return q8\n\n\n def test_agent(self, a, test_type):\n a.tested = True\n a.pending_test = test_type\n if test_type == self.Testing.diagnostic_test_type:\n self.number_of_diagnostic_tests += 1\n else:\n self.number_of_preventive_screening_tests += 1\n\n if a.exposed:\n # tests that happen in the period of time in which the agent is\n # exposed but not yet infectious\n if a.days_since_exposure >= self.Testing.tests[test_type]['time_until_testable']:\n if self.verbosity > 1:\n print('{} {} sent positive sample (even though not infectious yet)'\n .format(a.type, a.ID))\n a.sample = 'positive'\n self.predetected_infections += 1\n self.positive_tests[test_type][a.type] += 1\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n elif a.infectious:\n # tests that happen in the period of time in which the agent is\n # infectious and the infection is detectable by a given test\n if a.days_since_exposure >= self.Testing.tests[test_type]['time_until_testable'] and \\\n a.days_since_exposure <= self.Testing.tests[test_type]['time_testable']:\n if self.verbosity > 1:\n print('{} {} sent positive sample'.format(a.type, a.ID))\n a.sample = 'positive'\n self.positive_tests[test_type][a.type] += 1\n\n # track the undetected infections to assess how important they are\n # for infection spread\n else:\n if self.verbosity > 1:\n print('{} {} sent negative sample (even though infectious)'\n .format(a.type, a.ID))\n a.sample = 'negative'\n self.undetected_infections += 1\n\n else:\n if self.verbosity > 1: print('{} {} sent negative sample'\n .format(a.type, a.ID))\n a.sample = 'negative'\n\n # for same-day testing, immediately act on the results of the test\n if a.days_since_tested >= self.Testing.tests[test_type]['time_until_test_result']:\n a.act_on_test_result()\n\n def screen_agents(self, agent_group, test_type, screen_type):\n # only test agents that have not been tested already in this simulation\n # step and that are not already known positive cases\n\n if self.verbosity > 0: \n print('initiating {} {} screen'\\\n .format(screen_type, agent_group))\n\n untested_agents = [a for a in self.schedule.agents if\n (a.tested == False and a.known_positive == False\n and a.type == agent_group)]\n\n if len(untested_agents) > 0:\n self.screened_agents[screen_type][agent_group] = True\n self.days_since_last_agent_screen[agent_group] = 0\n\n # only test agents if they participate in voluntary testing\n if screen_type == 'preventive':\n for a in untested_agents:\n if a.voluntary_testing:\n self.test_agent(a, test_type)\n else:\n if self.verbosity > 1:\n print('not testing {} {}, not participating in voluntary testing'\\\n .format(agent_group, a.ID))\n else:\n for a in untested_agents:\n self.test_agent(a, test_type)\n\n if self.verbosity > 0:\n print()\n else:\n if self.verbosity > 0:\n print('no agents tested because all agents have already been tested')\n\n # the type of the test used in the pending test result is stored in the\n # variable pending_test\n\n def collect_test_results(self):\n agents_with_test_results = [a for a in self.schedule.agents if\n (a.pending_test and\n a.days_since_tested >= self.Testing.tests[a.pending_test]['time_until_test_result'])]\n\n return agents_with_test_results\n\n def trace_contacts(self, a):\n if a.quarantined == False:\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n if self.verbosity > 0:\n print('qurantined {} {}'.format(a.type, a.ID))\n\n # find all agents that share edges with the agent\n # that are classified as K1 contact types in the testing\n # strategy\n K1_contacts = [e[1] for e in self.G.edges(a.ID, data=True) if\n e[2]['contact_type'] in self.Testing.K1_contact_types]\n K1_contacts = [a for a in self.schedule.agents if a.ID in K1_contacts]\n\n for K1_contact in K1_contacts:\n if self.verbosity > 0:\n print('quarantined {} {} (K1 contact of {} {})'\n .format(K1_contact.type, K1_contact.ID, a.type, a.ID))\n K1_contact.quarantined = True\n K1_contact.quarantine_start = self.Nstep\n\n def test_symptomatic_agents(self):\n # find symptomatic agents that have not been tested yet and are not\n # in quarantine and test them\n newly_symptomatic_agents = np.asarray([a for a in self.schedule.agents\n if (a.symptoms == True and a.tested == False and a.quarantined == False)])\n\n for a in newly_symptomatic_agents:\n # all symptomatic agents are quarantined by default\n if self.verbosity > 0:\n print('quarantined: {} {}'.format(a.type, a.ID))\n a.quarantined = True\n a.quarantine_start = self.Nstep\n\n self.test_agent(a, self.Testing.diagnostic_test_type)\n\n def quarantine_contacts(self):\n # trace and quarantine contacts of newly positive agents\n if len(self.newly_positive_agents) > 0:\n if self.verbosity > 0: print('new positive test(s) from {}'\n .format([a.ID for a in self.newly_positive_agents]))\n\n # send all K1 contacts of positive agents into quarantine\n for a in self.newly_positive_agents:\n self.trace_contacts(a)\n\n # indicate that a screen should happen because there are new\n # positive test results\n self.new_positive_tests = True\n self.newly_positive_agents = []\n\n else:\n self.new_positive_tests = False\n\n\n def step(self):\n self.weekday = (self.Nstep + self.weekday_offset) % 7 + 1\n # if the connection graph is time-resloved, set the graph that is\n # used to determine connections in this step to the sub-graph corres-\n # ponding to the current day of the week\n if self.dynamic_connections:\n self.G = self.weekday_connections[self.weekday]\n\n if self.verbosity > 0:\n print('weekday {}'.format(self.weekday))\n\n if self.testing:\n for agent_type in self.agent_types:\n for screen_type in ['reactive', 'follow_up', 'preventive']:\n self.screened_agents[screen_type][agent_type] = False\n\n if self.verbosity > 0: \n print('* testing and tracing *')\n \n self.test_symptomatic_agents()\n \n\n # collect and act on new test results\n agents_with_test_results = self.collect_test_results()\n for a in agents_with_test_results:\n a.act_on_test_result()\n \n self.quarantine_contacts()\n\n # screening:\n # a screen should take place if\n # (a) there are new positive test results\n # (b) as a follow-up screen for a screen that was initiated because\n # of new positive cases\n # (c) if there is a preventive screening policy and it is time for\n # a preventive screen in a given agent group\n\n # (a)\n if (self.testing == 'background' or self.testing == 'preventive')\\\n and self.new_positive_tests == True:\n for agent_type in self.screening_agents:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'reactive')\n self.scheduled_follow_up_screen[agent_type] = True\n\n # (b)\n elif (self.testing == 'background' or self.testing == 'preventive') and \\\n self.Testing.follow_up_testing_interval != None and \\\n sum(list(self.scheduled_follow_up_screen.values())) > 0:\n for agent_type in self.screening_agents:\n if self.scheduled_follow_up_screen[agent_type] and\\\n self.days_since_last_agent_screen[agent_type] >=\\\n self.Testing.follow_up_testing_interval:\n self.screen_agents(\n agent_type, self.Testing.diagnostic_test_type, 'follow_up')\n else:\n if self.verbosity > 0: \n print('not initiating {} follow-up screen (last screen too close)'\\\n .format(agent_type))\n\n # (c) \n elif self.testing == 'preventive' and \\\n np.any(list(self.Testing.screening_intervals.values())):\n\n for agent_type in self.screening_agents:\n interval = self.Testing.screening_intervals[agent_type]\n assert interval in [7, 3, 2, None], \\\n 'testing interval {} for agent type {} not supported!'\\\n .format(interval, agent_type)\n\n # (c.1) testing every 7 days = testing on Mondays\n if interval == 7 and self.weekday == 1:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.2) testing every 3 days = testing on Mo & Turs\n elif interval == 3 and self.weekday in [1, 4]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # (c.3) testing every 2 days = testing on Mo, Wed & Fri\n elif interval == 2 and self.weekday in [1, 3, 5]:\n self.screen_agents(agent_type,\n self.Testing.preventive_screening_test_type,\\\n 'preventive')\n # No interval specified = no testing, even if testing \n # mode == preventive\n elif interval == None:\n pass\n else:\n if self.verbosity > 0:\n print('not initiating {} preventive screen (wrong weekday)'\\\n .format(agent_type))\n else:\n # do nothing\n pass\n\n for agent_type in self.agent_types:\n if not (self.screened_agents['reactive'][agent_type] or \\\n self.screened_agents['follow_up'][agent_type] or \\\n self.screened_agents['preventive'][agent_type]):\n self.days_since_last_agent_screen[agent_type] += 1\n\n\n if self.verbosity > 0: print('* agent interaction *')\n self.datacollector.collect(self)\n self.schedule.step()\n self.Nstep += 1\n\n", "id": "1480726", "language": "Python", "matching_score": 8.41002368927002, "max_stars_count": 0, "path": "src/scseirx/model_SEIRX.py" }, { "content": "import numpy as np\nimport networkx as nx\nfrom mesa import Model\nfrom mesa.time import RandomActivation, SimultaneousActivation\nfrom mesa.datacollection import DataCollector\n\nfrom scseirx.model_SEIRX import *\n\n\n## data collection functions ##\ndef count_E_resident(model):\n E = np.asarray(\n [a.exposed for a in model.schedule.agents if a.type == 'resident']).sum()\n return E\n\n\ndef count_I_resident(model):\n I = np.asarray(\n [a.infectious for a in model.schedule.agents if a.type == 'resident']).sum()\n return I\n\n\ndef count_I_symptomatic_resident(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'resident'and a.symptomatic_course)]).sum()\n return I\n\n\ndef count_I_asymptomatic_resident(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'resident'and a.symptomatic_course == False)]).sum()\n return I\n\n\ndef count_R_resident(model):\n R = np.asarray(\n [a.recovered for a in model.schedule.agents if a.type == 'resident']).sum()\n return R\n\n\ndef count_X_resident(model):\n X = np.asarray(\n [a.quarantined for a in model.schedule.agents if a.type == 'resident']).sum()\n return X\n\n\ndef count_E_employee(model):\n E = np.asarray(\n [a.exposed for a in model.schedule.agents if a.type == 'employee']).sum()\n return E\n\n\ndef count_I_employee(model):\n I = np.asarray(\n [a.infectious for a in model.schedule.agents if a.type == 'employee']).sum()\n return I\n\n\ndef count_I_symptomatic_employee(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'employee'and a.symptomatic_course)]).sum()\n return I\n\n\ndef count_I_asymptomatic_employee(model):\n I = np.asarray([a.infectious for a in model.schedule.agents if\n (a.type == 'employee'and a.symptomatic_course == False)]).sum()\n return I\n\n\ndef count_R_employee(model):\n R = np.asarray(\n [a.recovered for a in model.schedule.agents if a.type == 'employee']).sum()\n return R\n\n\ndef count_X_employee(model):\n X = np.asarray(\n [a.quarantined for a in model.schedule.agents if a.type == 'employee']).sum()\n return X\n\n\ndef check_reactive_resident_screen(model):\n return model.screened_agents['reactive']['resident']\n\n\ndef check_follow_up_resident_screen(model):\n return model.screened_agents['follow_up']['resident']\n\n\ndef check_preventive_resident_screen(model):\n return model.screened_agents['preventive']['resident']\n\n\ndef check_reactive_employee_screen(model):\n return model.screened_agents['reactive']['employee']\n\n\ndef check_follow_up_employee_screen(model):\n return model.screened_agents['follow_up']['employee']\n\n\ndef check_preventive_employee_screen(model):\n return model.screened_agents['preventive']['employee']\n\ndata_collection_functions = \\\n {\n 'resident':\n {\n 'E':count_E_resident,\n 'I':count_I_resident,\n 'I_asymptomatic':count_I_asymptomatic_resident,\n 'I_symptomatic':count_I_symptomatic_resident,\n 'R':count_R_resident,\n 'X':count_X_resident\n },\n 'employee':\n {\n 'E':count_E_employee,\n 'I':count_I_employee,\n 'I_asymptomatic':count_I_asymptomatic_employee,\n 'I_symptomatic':count_I_symptomatic_employee,\n 'R':count_R_employee,\n 'X':count_X_employee\n }\n }\n\n\n\n\nclass SEIRX_nursing_home(SEIRX):\n \n\n def __init__(self, G, \n verbosity = 0, \n base_transmission_risk = 0.05,\n testing='diagnostic',\n exposure_duration = [5.0, 1.9],\n time_until_symptoms = [6.4, 0.8],\n infection_duration = [10.91, 3.95], \n quarantine_duration = 10, \n subclinical_modifier = 0.6,\n infection_risk_contact_type_weights = {\n 'very_far': 0.1,\n 'far': 0.25,\n 'intermediate': 0.5,\n 'close': 1},\n K1_contact_types = ['close'],\n diagnostic_test_type = 'one_day_PCR',\n preventive_screening_test_type = 'same_day_antigen',\n follow_up_testing_interval = None,\n liberating_testing = False,\n index_case = 'teacher', \n agent_types = {\n 'employee': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False},\n 'resident': {'screening_interval': None,\n 'index_probability': 0,\n 'mask':False}},\n age_transmission_risk_discount = \\\n {'slope':-0.02,\n 'intercept':1},\n age_symptom_discount = \\\n {'slope':-0.02545,\n 'intercept':0.854545},\n mask_filter_efficiency = {'exhale':0, 'inhale':0},\n transmission_risk_ventilation_modifier = 0,\n seed = None):\n\n\n super().__init__(G, \n verbosity = verbosity,\n base_transmission_risk = base_transmission_risk,\n testing = testing,\n exposure_duration = exposure_duration,\n time_until_symptoms = time_until_symptoms,\n infection_duration = infection_duration, \n quarantine_duration = quarantine_duration, \n subclinical_modifier = subclinical_modifier,\n infection_risk_contact_type_weights = \\\n infection_risk_contact_type_weights,\n K1_contact_types = K1_contact_types,\n diagnostic_test_type = diagnostic_test_type,\n preventive_screening_test_type = \\\n preventive_screening_test_type,\n follow_up_testing_interval = follow_up_testing_interval,\n liberating_testing = liberating_testing,\n index_case = index_case, \n agent_types = agent_types,\n age_transmission_risk_discount = \\\n age_transmission_risk_discount,\n age_symptom_discount = age_symptom_discount,\n mask_filter_efficiency = mask_filter_efficiency,\n transmission_risk_ventilation_modifier = \\\n transmission_risk_ventilation_modifier,\n seed = seed)\n\n\n # agent types that are included in preventive, background & follow-up\n # screens\n self.screening_agents = ['employee', 'resident']\n\n # define, whether or not a multigraph that defines separate connections\n # for every day of the week is used\n self.dynamic_connections = False\n\n \n # data collectors to save population counts and agent states every\n # time step\n model_reporters = {}\n for agent_type in self.agent_types:\n for state in ['E','I','I_asymptomatic','I_symptomatic','R','X']:\n model_reporters.update({'{}_{}'.format(state, agent_type):\\\n data_collection_functions[agent_type][state]})\n\n model_reporters.update(\\\n {\n 'screen_residents_reactive':check_reactive_resident_screen,\n 'screen_residents_follow_up':check_follow_up_resident_screen,\n 'screen_residents_preventive':check_preventive_resident_screen,\n 'screen_employees_reactive':check_reactive_employee_screen,\n 'screen_employees_follow_up':check_follow_up_employee_screen,\n 'screen_employees_preventive':check_preventive_employee_screen,\n 'N_diagnostic_tests':get_N_diagnostic_tests,\n 'N_preventive_screening_tests':get_N_preventive_screening_tests,\n 'undetected_infections':get_undetected_infections,\n 'predetected_infections':get_predetected_infections,\n 'pending_test_infections':get_pending_test_infections\n })\n\n agent_reporters =\\\n {\n 'infection_state':get_infection_state,\n 'quarantine_state':get_quarantine_state\n }\n\n self.datacollector = DataCollector(\n model_reporters = model_reporters,\n agent_reporters = agent_reporters)\n\n def calculate_transmission_probability(self, source, target, base_risk):\n \"\"\"\n Calculates the risk of transmitting an infection between a source agent\n and a target agent given the model's and agent's properties and the base\n transmission risk.\n\n Transmission is an independent Bernoulli trial with a probability of\n success p. The probability of transmission without any modifications\n by for example masks or ventilation is given by the base_risk, which\n is calibrated in the model. The probability is modified by contact type\n q1 (also calibrated in the model), infection progression q2\n (from literature), reduction of the viral load due to a sublclinical\n course of the disease q3 (from literature), reduction of exhaled viral\n load of the source by mask wearing q4 (from literature), reduction of\n inhaled viral load by the target q5 (from literature), and ventilation \n of the rooms q6 (from literature).\n\n Parameters\n ----------\n source : agent_SEIRX\n Source agent that transmits the infection to the target.\n target: agent_SEIRX\n Target agent that (potentially) receives the infection from the \n source.\n base_risk : float\n Probability p of infection transmission without any modifications\n through prevention measures.\n\n Returns\n -------\n p : float\n Modified transmission risk.\n \"\"\"\n n1 = source.ID\n n2 = target.ID\n link_type = self.G.get_edge_data(n1, n2)['link_type']\n\n q1 = self.get_transmission_risk_contact_type_modifier(source, target)\n q2 = self.get_transmission_risk_progression_modifier(source)\n q3 = self.get_transmission_risk_subclinical_modifier(source)\n\n # contact types where masks and ventilation are irrelevant\n if link_type in ['resident_resident_room', 'resident_resident_table']:\n p = 1 - (1 - base_risk * (1- q1) * (1 - q2) * (1 - q3))\n\n # contact types were masks and ventilation are relevant\n elif link_type in ['resident_resident_quarters',\n 'employee_resident_care',\n 'employee_employee_short']:\n\n q4 = self.get_transmission_risk_exhale_modifier(source)\n q5 = self.get_transmission_risk_inhale_modifier(target)\n q6 = self.get_transmission_risk_ventilation_modifier()\n\n p = 1 - (1 - base_risk * (1- q1) * (1 - q2) * (1 - q3) * \\\n (1 - q4) * (1 - q5) * (1 - q6))\n\n else:\n print('unknown link type: {}'.format(link_type))\n p = None\n return p", "id": "6054549", "language": "Python", "matching_score": 2.4141018390655518, "max_stars_count": 0, "path": "src/scseirx/model_nursing_home.py" }, { "content": "from mesa import Agent\n\n\nclass agent_SEIRX(Agent):\n '''\n An agent with an infection status. NOTe: this agent is not\n functional on it's own, as it does not implement a step()\n function. Therefore, every agent class that inherits from this\n generic agent class needs to implement their own step() function\n '''\n\n def __init__(self, unique_id, unit, model,\n exposure_duration, time_until_symptoms, infection_duration,\n voluntary_testing, verbosity):\n super().__init__(unique_id, model)\n self.verbose = verbosity\n self.ID = unique_id\n self.unit = unit\n self.voluntary_testing = voluntary_testing\n\n ## epidemiological parameters drawn from distributions\n # NOTE: all durations are inclusive, i.e. comparison are \"<=\" and \">=\"\n # number of days agents stay infectuous\n\n # days after transmission until agent becomes infectuous\n self.exposure_duration = exposure_duration\n # days after becoming infectuous until showing symptoms\n self.time_until_symptoms = time_until_symptoms\n # number of days agents stay infectuous\n self.infection_duration = infection_duration\n\n\n ## agent-group wide parameters that are stored in the model class\n self.index_probability = self.model.index_probabilities[self.type]\n self.symptom_probability = self.model.age_symptom_discount['intercept']\n self.mask = self.model.masks[self.type]\n\n ## infection states\n self.exposed = False\n self.infectious = False\n self.symptomatic_course = False\n self.symptoms = False\n self.recovered = False\n self.tested = False\n self.pending_test = False\n self.known_positive = False\n self.quarantined = False\n\n # sample given for test\n self.sample = None\n\n # staging states\n self.contact_to_infected = False\n\n # counters\n self.days_since_exposure = 0\n self.days_quarantined = 0\n self.days_since_tested = 0\n self.transmissions = 0\n self.transmission_targets = {}\n\n\n\n ### generic helper functions that are inherited by other agent classes\n\n def get_contacts(self, agent_group):\n contacts = [a for a in self.model.schedule.agents if\n (a.type == agent_group and self.model.G.has_edge(self.ID, a.ID))]\n return contacts\n\n\n def introduce_external_infection(self):\n if (self.infectious == False) and (self.exposed == False) and\\\n (self.recovered == False):\n index_transmission = self.model.random.random()\n if index_transmission <= self.index_probability:\n self.contact_to_infected = True\n if self.verbose > 0:\n print('{} {} is index case'.format(\n self.type, self.unique_id))\n\n\n def transmit_infection(self, contacts):\n # the basic transmission risk is that between two members of the \n # same household and has been calibrated to reproduce empirical \n # household secondary attack rates.\n base_risk = self.model.base_transmission_risk\n\n for target in contacts:\n if (target.exposed == False) and (target.infectious == False) and \\\n (target.recovered == False) and (target.contact_to_infected == False):\n\n # determine if a transmission occurrs\n p = self.model.calculate_transmission_probability(\\\n self, target, base_risk)\n transmission = self.model.random.random()\n\n if self.verbose > 1:\n print('target: {} {}, p: {}'\\\n .format(target.type, target.ID, p))\n\n if transmission < p:\n target.contact_to_infected = True\n self.transmissions += 1\n\n # track the state of the agent pertaining to testing at the\n # moment of transmission to count how many transmissions\n # occur in which states\n if self.tested and self.pending_test and \\\n self.sample == 'positive':\n self.model.pending_test_infections += 1\n\n self.transmission_targets.update({target.ID:self.model.Nstep})\n\n if self.verbose > 0:\n print('transmission: {} {} -> {} {} (p: {})'\n .format(self.type, self.unique_id, \\\n target.type, target.unique_id, p))\n\n\n def act_on_test_result(self):\n '''\n Function that gets called by the infection dynamics model class if a\n test result for an agent is returned. The function sets agent states\n according to the result of the test (positive or negative). Adds agents\n with positive tests to the newly_positive_agents list that will be\n used to trace and quarantine close (K1) contacts of these agents. Resets\n the days_since_tested counter and the sample as well as the \n pending_test flag\n '''\n\n # the type of the test used in the test for which the result is pending\n # is stored in the pending_test variable\n test_type = self.pending_test\n\n if self.sample == 'positive':\n\n # true positive\n if self.model.Testing.tests[test_type]['sensitivity'] >= self.model.random.random():\n self.model.newly_positive_agents.append(self)\n self.known_positive = True\n\n if self.model.verbosity > 1:\n print('{} {} returned a positive test (true positive)'\n .format(self.type, self.ID))\n\n if self.quarantined == False:\n self.quarantined = True\n if self.model.verbosity > 0:\n print('quarantined {} {}'.format(self.type, self.ID))\n\n # false negative\n else:\n if self.model.verbosity > 1:\n print('{} {} returned a negative test (false negative)'\\\n .format(self.type, self.ID))\n self.known_positive = False\n self.model.false_negative += 1\n\n if self.model.Testing.liberating_testing:\n self.quarantined = False\n if self.model.verbosity > 0:\n print('{} {} left quarantine prematurely'\\\n .format(self.type, self.ID))\n\n self.days_since_tested = 0\n self.pending_test = False\n self.sample = None\n\n elif self.sample == 'negative':\n\n # false positive\n if self.model.Testing.tests[test_type]['specificity'] <= self.model.random.random():\n self.model.newly_positive_agents.append(self)\n self.known_positive = True\n\n if self.model.verbosity > 1:\n print('{} {} returned a positive test (false positive)'\\\n .format(self.type, self.ID))\n\n if self.quarantined == False:\n self.quarantined = True\n if self.model.verbosity > 0:\n print('quarantined {} {}'.format(self.type, self.ID))\n\n # true negative\n else:\n if self.model.verbosity > 1:\n print('{} {} returned a negative test (true negative)'\\\n .format(self.type, self.ID))\n self.known_positive = False\n\n if self.model.Testing.liberating_testing:\n self.quarantined = False\n if self.model.verbosity > 0:\n print('{} {} left quarantine prematurely'\\\n .format(self.type, self.ID))\n\n self.days_since_tested = 0\n self.pending_test = False\n self.sample = None\n\n def become_exposed(self):\n if self.verbose > 0:\n print('{} exposed: {}'.format(self.type, self.unique_id))\n self.exposed = True\n self.contact_to_infected = False\n\n\n def become_infected(self):\n self.exposed = False\n self.infectious = True\n\n # determine if infected agent will show symptoms\n # NOTE: it is important to determine whether the course of the\n # infection is symptomatic already at this point, to allow\n # for a modification of transmissibility by symptomticity.\n # I.e. agents that will become symptomatic down the road might\n # already be more infectious before they show any symptoms than\n # agents that stay asymptomatic\n if self.model.random.random() <= self.symptom_probability:\n self.symptomatic_course = True\n if self.verbose > 0:\n print('{} infectious: {} (symptomatic course)'.format(self.type, self.unique_id))\n else:\n if self.verbose > 0:\n print('{} infectious: {} (asymptomatic course)'.format(self.type, self.unique_id))\n\n\n def show_symptoms(self):\n # determine if agent shows symptoms\n if self.symptomatic_course:\n self.symptoms = True\n if self.model.verbosity > 0:\n print('{} {} shows symptoms'.format(self.type, self.ID))\n\n\n def recover(self):\n self.infectious = False\n self.symptoms = False\n self.recovered = True\n self.days_since_exposure = self.infection_duration + 1\n if self.verbose > 0:\n print('{} recovered: {}'.format(self.type, self.unique_id))\n\n\n def leave_quarantine(self):\n if self.verbose > 0:\n print('{} released from quarantine: {}'.format(\n self.type, self.unique_id))\n self.quarantined = False\n self.days_quarantined = 0\n\n\n def advance(self):\n '''\n Advancing step: applies infections, checks counters and sets infection \n states accordingly\n '''\n\n # determine if a transmission to the agent occurred\n if self.contact_to_infected == True:\n self.become_exposed()\n\n # determine if agent has transitioned from exposed to infected\n if self.days_since_exposure == self.exposure_duration:\n self.become_infected()\n\n if self.days_since_exposure == self.time_until_symptoms:\n self.show_symptoms()\n\n if self.days_since_exposure == self.infection_duration:\n self.recover()\n\n # determine if agent is released from quarantine\n if self.days_quarantined == self.model.quarantine_duration:\n self.leave_quarantine()\n\n # if there is a pending test result, increase the days the agent has\n # waited for the result by 1 (NOTE: results are collected by the \n # infection dynamics model class according to days passed since the test)\n if self.pending_test:\n self.days_since_tested += 1\n\n if self.quarantined:\n self.days_quarantined += 1\n self.model.quarantine_counters[self.type] += 1\n\n if self.exposed or self.infectious:\n self.days_since_exposure += 1\n\n # reset tested flag at the end of the agent step\n self.tested = False\n \n", "id": "4569250", "language": "Python", "matching_score": 4.908877849578857, "max_stars_count": 0, "path": "src/scseirx/agent_SEIRX.py" }, { "content": "from scseirx.agent_SEIRX import agent_SEIRX\n\nclass employee(agent_SEIRX):\n '''\n An employee with an infection status\n '''\n\n def __init__(self, unique_id, unit, model, \n exposure_duration, time_until_symptoms, infection_duration,\n voluntary_testing, verbosity):\n\n self.type = 'employee'\n\n super().__init__(unique_id, unit, model, \n exposure_duration, time_until_symptoms, infection_duration,\n voluntary_testing, verbosity)\n \n\n def step(self):\n '''\n Infection step: if a employee is infected and not in quarantine, it \n interacts with other residents and employees trough the specified \n\n contact network and can pass a potential infection.\n Infections are staged here and only applied in the \n \"advance\"-step to simulate \"simultaneous\" interaction\n '''\n\n # check for external infection in continuous index case modes\n if self.model.index_case in ['continuous'] and \\\n self.index_probability > 0:\n self.introduce_external_infection()\n\n # simulate contacts to other agents if the agent is\n # infected and not in quarantine. Randomly transmit the infection \n # according to the transmission risk\n if self.infectious:\n if not self.quarantined:\n\n # get contacts to other agent groups according to the\n # interaction network\n residents = self.get_contacts('resident')\n employees = self.get_contacts('employee')\n\n # code transmission to other agent groups\n # separately to allow for differences in transmission risk\n self.transmit_infection(residents)\n self.transmit_infection(employees)", "id": "1934054", "language": "Python", "matching_score": 2.247788667678833, "max_stars_count": 0, "path": "src/scseirx/agent_employee.py" } ]
2.394892
smurn
[ { "content": "import os.path\nimport shutil\n\ndef pwd():\n \"\"\"\n Returns the current working directory.\n This is just a convinience function to access `os.getcwd()`.\n \"\"\"\n return os.getcwd()\n\n\ndef cd(path):\n \"\"\"\n Change the current directory.\n\n The return value can be used with the `with` statement to \n automatically switch back to the previous directory like this::\n\n with cish.cd(\"subdir\"):\n print cish.pwd() # we are inside \"subdir\"\n print cish.pwd() # we are back where we were before.\n \n The new absolute path of the current directory can also be\n obtained using the `with .. as` statement::\n\n with cish.cd(\"subdir\") as path:\n print path # we are inside \"subdir\"\n print cish.pwd() # we are back where we were before.\n\n This produces the same output as the previous example.\n \"\"\"\n path = os.path.abspath(path)\n prev_pwd = os.getcwd()\n\n # Don't wait for __enter__ as this function might not be\n # used with `with`.\n os.chdir(path)\n\n class ChangeDirContext(object):\n def __enter__(self):\n return path\n \n def __exit__(self, type_, value, traceback):\n os.chdir(prev_pwd)\n return False # re-throw exceptions if there was one.\n\n return ChangeDirContext()\n\n\ndef mkdirs(path):\n \"\"\"\n Creates the given directory, creating parent directories if required.\n Has no effect if the directory already exists, throws an exception\n if the path exists but is not a directory.\n \"\"\"\n path = os.path.abspath(path)\n if os.path.isdir(path):\n return\n elif os.path.exists(path):\n raise ValueError(\"Cannot create directory {path} it already exists \" +\n \"but is not a directory.\".format(path=path))\n else:\n os.makedirs(path)\n\n\ndef rm(path):\n \"\"\"\n Deletes the given file or directory, including the content.\n Has no effect if the path does not exist.\n \"\"\"\n path = os.path.abspath(path)\n if os.path.isdir(path):\n shutil.rmtree(path)\n elif os.path.exists(path):\n os.remove(path)\n\n\n", "id": "4321207", "language": "Python", "matching_score": 0.962670087814331, "max_stars_count": 0, "path": "cish/commands.py" }, { "content": "# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport unittest\nimport os.path\nimport shutil\nimport tempfile\n\nfrom cish import commands\n\nclass TestCommands(unittest.TestCase):\n\n def setUp(self):\n self.cwd = os.getcwd()\n self.tmpdir = tempfile.mkdtemp()\n\n def tearDown(self):\n os.chdir(self.cwd)\n shutil.rmtree(self.tmpdir)\n\n def test_mkdirs(self):\n \"\"\"\n Tests the creation of a directory with an existing parent.\n \"\"\"\n commands.mkdirs(self.get_path(\"mydir\"))\n self.assertTrue(os.path.isdir(self.get_path(\"mydir\")))\n\n def test_mkdirs_recursively(self):\n \"\"\"\n Tests the creation of a directory without an existing parent.\n \"\"\"\n commands.mkdirs(self.get_path(\"parent/mydir\"))\n self.assertTrue(os.path.isdir(self.get_path(\"parent/mydir\")))\n\n def test_mkdirs_existing(self):\n \"\"\"\n Tests that existing directories are not changed.\n \"\"\"\n self.create_files([\"mydir/myfile\"])\n commands.mkdirs(self.get_path(\"mydir\"))\n self.assertTrue(os.path.isfile(self.get_path(\"mydir/myfile\")))\n \n def test_mkdir_isfile(self):\n \"\"\"\n Tests that mkdirs fails if the target is an existing file.\n \"\"\"\n self.create_files([\"myfile\"])\n self.assertRaises(ValueError, commands.mkdirs, self.get_path(\"myfile\"))\n\n def test_rm_file(self):\n \"\"\"\n Test deletion of a file.\n \"\"\"\n self.create_files([\"myfile\"])\n commands.rm(self.get_path(\"myfile\"))\n self.assertFalse(os.path.exists(self.get_path(\"myfile\")))\n\n def test_rm_emptydir(self):\n \"\"\"\n Test deletion of an empty directory.\n \"\"\"\n os.mkdir(self.get_path(\"mydir\"))\n commands.rm(self.get_path(\"mydir\"))\n self.assertFalse(os.path.exists(self.get_path(\"mydir\")))\n\n def test_rm_recursive(self):\n \"\"\"\n Test deletion of a directory with content.\n \"\"\"\n self.create_files([\"mydir/myfile\",\n \"mydir/subdir/anotherfile\"])\n commands.rm(self.get_path(\"mydir\"))\n self.assertFalse(os.path.exists(self.get_path(\"mydir\")))\n\n def test_rm_nonexistant(self):\n \"\"\"\n Test deletion of a file that does not exist.\n \"\"\"\n commands.rm(self.get_path(\"mydir\"))\n self.assertFalse(os.path.exists(self.get_path(\"mydir\")))\n\n def test_pwd(self):\n \"\"\"\n Tests pwd.\n \"\"\"\n os.chdir(self.tmpdir)\n self.assertEqual(os.path.realpath(commands.pwd()), os.path.realpath(self.tmpdir))\n\n def test_cd_abs(self):\n \"\"\"\n Test cd with an absolute path.\n \"\"\"\n self.create_files([\"mydir/myfile\",\n \"mydir/subdir/anotherfile\"])\n commands.cd(self.get_path(\"mydir/subdir\"))\n self.assertTrue(os.getcwd(), self.get_path(\"mydir/subdir\"))\n\n def test_cd_relative(self):\n \"\"\"\n Test cd with a relative path.\n \"\"\"\n self.create_files([\"mydir/myfile\",\n \"mydir/subdir/anotherfile\"])\n os.chdir(self.get_path(\"mydir\"))\n commands.cd(\"subdir\")\n self.assertTrue(os.getcwd(), self.get_path(\"mydir/subdir\"))\n\n def test_cd_with(self):\n \"\"\"\n Test that the path is reset after the `with` statement.\n \"\"\"\n self.create_files([\"mydir/myfile\",\n \"mydir/subdir/anotherfile\"])\n os.chdir(self.get_path(\"mydir\"))\n \n with commands.cd(\"subdir\") as path:\n self.assertTrue(os.getcwd(), self.get_path(\"mydir/subdir\"))\n self.assertTrue(path, self.get_path(\"mydir/subdir\")) \n self.assertTrue(os.getcwd(), self.get_path(\"mydir\"))\n\n def test_cd_with_recursive(self):\n \"\"\"\n Test that the path is reset after nested `with` statements.\n \"\"\"\n self.create_files([\"mydir/myfile\",\n \"mydir/subdir/anotherfile\",\n \"mydir/subdir/subsubdir/afile\"])\n os.chdir(self.get_path(\"mydir\"))\n\n with commands.cd(\"subdir\"):\n with commands.cd(\"subsubdir\"): \n self.assertTrue(os.getcwd(), self.get_path(\"mydir/subdir/subsubdir\"))\n self.assertTrue(os.getcwd(), self.get_path(\"mydir/subdir\"))\n self.assertTrue(os.getcwd(), self.get_path(\"mydir\"))\n\n def get_path(self, path):\n \"\"\"\n Returns the absolute path to a file relative to the temporary directory.\n \"\"\"\n return os.path.join(self.tmpdir, path.replace(\"/\", os.sep))\n\n def create_files(self, files):\n \"\"\"\n Takes a list of files (optionally with relative paths)\n and creates them in the temporary directory.\n \"\"\"\n files = [f.replace(\"/\", os.sep) for f in files]\n\n for relative_file in files:\n relative_file = relative_file.replace(\"/\", os.sep)\n relative_path, filename = os.path.split(relative_file)\n absolute_path = os.path.join(self.tmpdir, relative_path)\n if not os.path.exists(absolute_path):\n os.makedirs(absolute_path)\n absolute_file = os.path.join(absolute_path, filename)\n with open(absolute_file, 'w') as f:\n f.write(relative_file)\n\n", "id": "10022106", "language": "Python", "matching_score": 5.258683681488037, "max_stars_count": 0, "path": "cish/test_commands.py" }, { "content": "# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN \n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) \n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport unittest\nimport os.path\nimport shutil\nimport tempfile\nimport subprocess\nimport json\n\nfrom cish import pyenv\n\nclass TestPyEnv(unittest.TestCase):\n \"\"\"\n Unit-tests for :class:`pyenv.PyEnv`.\n \"\"\"\n\n def setUp(self):\n self.cwd = os.getcwd()\n self.tmpdir = tempfile.mkdtemp()\n \n def tearDown(self):\n os.chdir(self.cwd)\n shutil.rmtree(self.tmpdir)\n\n def test_linux_style(self):\n \"\"\"\n Python installed using linux distribution packages\n usually ends up in '/usr/bin', as do all the utilities.\n \"\"\"\n self.create_files([\"bin/python\",\n \"bin/pip\",\n \"bin/virtualenv\"])\n env = pyenv.from_interpreter(self.get_path(\"bin/python\"))\n self.assertEqual(env.find_executable(\"pip\"), self.get_path(\"bin/pip\"))\n \n def test_windows_style(self):\n \"\"\"\n Windows installations have the utilities in a separate directory.\n \"\"\"\n self.create_files([\"python.exe\",\n \"Scripts/pip\",\n \"Scripts/virtualenv\"])\n env = pyenv.from_interpreter(self.get_path(\"python.exe\"))\n self.assertEqual(env.find_executable(\"pip\"), self.get_path(\"Scripts/pip\"))\n \n def test_prefer_wpython(self):\n \"\"\"\n Windows installations have the utilities in a separate directory.\n \"\"\"\n self.create_files([\"python.exe\",\n \"wpython.exe\"])\n env = pyenv.from_interpreter(self.get_path(\"python.exe\"))\n self.assertEqual(env.find_executable(\"python\"), self.get_path(\"wpython.exe\"))\n \n def test_w_prefix_only_for_python(self):\n \"\"\"\n On windows installations the interpreter might have a `w` prefix,\n but this is not used for other executables, so we should not \n return the wrong one.\n \"\"\"\n self.create_files([\"python.exe\",\n \"wpip.exe\",\n \"pip.exe\"])\n env = pyenv.from_interpreter(self.get_path(\"python.exe\"))\n self.assertEqual(env.find_executable(\"pip\"), self.get_path(\"pip.exe\"))\n \n def test_getattr(self):\n \"\"\"\n Test if we can invoke the python interpreter using a method on the environment.\n \"\"\"\n env = pyenv.interpeter_pyenv() \n env.python(\"-c\", \"pass\")\n\n def test_virtualenv_abs(self):\n \"\"\"\n Tests if we can create a virtual environment giving an absolute path.\n \"\"\"\n env = pyenv.interpeter_pyenv()\n venv = env.virtualenv(self.get_path(\"myenv\"))\n self.assertTrue(venv.find_executable(\"python\").startswith(self.tmpdir))\n\n def test_virtualenv_relative(self):\n \"\"\"\n Tests if we can create a virtual environment giving a relative path.\n \"\"\"\n os.chdir(self.tmpdir)\n env = pyenv.interpeter_pyenv()\n venv = env.virtualenv(\"myenv\")\n \n actual = os.path.realpath(venv.find_executable(\"python\"))\n expected = os.path.realpath(self.tmpdir)\n \n self.assertTrue(actual.startswith(expected), \n \"Expected %s to start with %s\" %(actual, expected))\n\n def test_virtualenv_overwrites(self):\n \"\"\"\n Tests if a virtual env is replacing an existing virtual env.\n \"\"\"\n env = pyenv.interpeter_pyenv()\n env.virtualenv(self.get_path(\"myenv\"))\n with open(self.get_path(\"myenv/helloworld\"), 'w') as f:\n f.write(\"content\")\n \n env.virtualenv(self.get_path(\"myenv\"))\n \n self.assertFalse(os.path.exists(self.get_path(\"myenv/helloworld\")))\n\n def test_config(self):\n \"\"\"\n Test if we can load an environment from a configuration file.\n \"\"\"\n self.create_files([\"python.exe\"])\n\n cfgfile = self.get_path(\"config.json\")\n with open(cfgfile, 'w') as f:\n json.dump({\"abc\": self.get_path(\"python.exe\")}, f)\n\n envs = pyenv.from_config(cfgfile)\n self.assertEqual(self.get_path(\"python.exe\"), envs[\"abc\"].find_executable(\"python\"))\n \n def get_path(self, path):\n \"\"\"\n Returns the absolute path to a file relative to the temporary directory.\n \"\"\"\n return os.path.join(self.tmpdir, path.replace(\"/\", os.sep))\n\n def create_files(self, files):\n \"\"\"\n Takes a list of files (optionally with relative paths)\n and creates them in the temporary directory.\n \"\"\"\n files = [f.replace(\"/\", os.sep) for f in files]\n \n for relative_file in files:\n relative_file = relative_file.replace(\"/\", os.sep)\n relative_path, filename = os.path.split(relative_file)\n absolute_path = os.path.join(self.tmpdir, relative_path)\n if not os.path.exists(absolute_path):\n os.makedirs(absolute_path)\n absolute_file = os.path.join(absolute_path, filename)\n with open(absolute_file, 'w') as f:\n f.write(relative_file)\n", "id": "8479757", "language": "Python", "matching_score": 6.170121192932129, "max_stars_count": 0, "path": "cish/test_pyenv.py" }, { "content": "# Copyright (c) 2014, <NAME>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN \n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) \n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport os.path\nimport sys\nimport shutil\nimport subprocess\nimport json\n\nclass PyEnv(object):\n \"\"\"\n Represents a python environment.\n An environment consists of a python interpeter, python library paths and utilities such as `pip`.\n Currenty we are only concerned about the paths were we find the interpeter and the utilities.\n \"\"\"\n\n def __init__(self, search_paths):\n \"\"\"\n Manually configure an enviroment.\n You might want to consider using the factory methods provided in this module.\n \"\"\"\n self.search_paths = search_paths\n self.exec_patterns = ['{name}' ,'{name}.exe']\n self.python_patterns = ['{name}', 'w{name}.exe' ,'{name}.exe']\n\n\n def __getattr__(self, name):\n \"\"\"\n Returns a method that invokes an exectuable in this environment.\n Arguments passed to the method become console line arguments.\n \"\"\"\n executable = self.find_executable(name)\n \n def invoker(*args):\n subprocess.check_call([executable] + list(args))\n return invoker\n\n\n def virtualenv(self, path=\"env\", system_side_packages=False):\n \"\"\"\n Creates a new virtual environment and returns the PyEnv for it.\n \n :param path: Location of the virtual environment. Can be an\n absolute path or a path relative to the current directory.\n Defaults to `\"env\"`.\n \n :param system_side_packages: Should the venv see the packages\n of the main environment?\n\n :returns: PyEnv instance for the new environment.\n \"\"\"\n abspath = os.path.abspath(path)\n \n if os.path.isdir(abspath):\n shutil.rmtree(abspath)\n elif os.path.exists(abspath):\n os.remove(abspath)\n \n parent = os.path.dirname(abspath)\n if not os.path.exists(parent):\n os.makedirs(parent)\n \n currentdir = os.getcwd()\n try:\n os.chdir(parent)\n \n virtualenv = self.find_executable(\"virtualenv\")\n args = [virtualenv, os.path.basename(abspath)]\n if system_side_packages:\n args.append(\"--system-site-packages\")\n subprocess.check_call(args)\n\n venv = from_virtualenv(abspath)\n if system_side_packages:\n venv.search_paths.extend(self.search_paths)\n return venv\n\n finally:\n os.chdir(currentdir)\n\n\n def find_executable(self, name):\n \"\"\"\n Finds an executable with the given name in this enviroment.\n \n :returns: Absolute path to the executable.\n\n :raises ValueError: if the exectuable could not be found.\n \"\"\"\n \n patterns = self.python_patterns if name == \"python\" else self.exec_patterns\n candidate_names = [ext.format(name=name) for ext in patterns]\n candidates = [os.path.join(path, n) for path in self.search_paths for n in candidate_names]\n \n for candidate in candidates:\n if os.path.exists(candidate):\n return candidate\n \n raise ValueError(\"Unable to find {name}. Looked at {candidates}\".format(\n name=repr(name),\n candidates=\", \".join(candidates)\n ))\n \n\ndef interpeter_pyenv():\n \"\"\"\n Returns the environment in which this script is running.\n \"\"\"\n if not sys.executable:\n raise ValueError(\"Interpeter that runs this script cannot be identified.\")\n return from_interpreter(sys.executable)\n\n\ndef from_config(*search_paths):\n \"\"\"\n Reads a json file with a `{name:\"path/to/python\", ...}` content and\n returns a `dict` with the names and :class:`PyEnv` instances for each\n entry.\n\n By default it searches in the following locations (in that order):\n\n * ./cish.json\n * ${HOME}/cish.json (or its windows equivalent)\n * /etc/cish.json (linux, osx)\n * C:\\cish.json (windows)\n\n Additional paths can be given as arguments and are searched before\n falling back to the default paths.\n \"\"\"\n\n filename = \"cish.json\"\n\n paths = list(search_paths)\n paths.append(os.path.abspath(filename))\n paths.append(os.path.join(os.path.expanduser(\"~\"), filename))\n if os.name == \"nt\":\n paths.append(\"C:\\\\\" + filename)\n else:\n paths.append(os.path.sep + os.path.join(\"etc\", filename))\n\n for path in paths:\n if os.path.exists(path):\n config_file = path\n break\n else:\n raise ValueError(\"Unable to locate configuration file. Searched in {paths}\".format(\n paths = \", \".join(paths)))\n\n with open(config_file, 'r') as f:\n config = json.load(f)\n\n if not hasattr(config, \"iteritems\"):\n raise ValueError(\"Invalid config file {f}. Must contain a key-value dict \" + \n \"as the top level element.\".format(f=config_file))\n\n return dict((name, from_interpreter(exe)) for name, exe in config.iteritems())\n\n\ndef from_interpreter(exe):\n \"\"\"\n Attempts to construct the environment for a given python interpeter by guessing\n where the paths are relative to it.\n \n :param exe: Path to the python interpeter executable.\n \n :returns: Instance of :class:`PyEnv`\n \"\"\"\n exeabs = os.path.abspath(exe)\n if not os.path.exists(exeabs):\n raise ValueError(\"Python interpreter {exe} does not exist here {exeabs}.\".format(exe=exe, exeabs=exeabs))\n \n path = os.path.dirname(exeabs)\n return _from_paths(path, [\"Scripts\", \"scripts\"])\n\n\ndef from_virtualenv(path):\n \"\"\"\n Attempts to construct the environment from the directory created by `virtualenv`.\n \n :param path: Directory containing the virtualenv.\n\n :returns: Instance of :class:`PyEnv`\n \"\"\"\n return _from_paths(path, [\"bin\", \"Scripts\", \"scripts\"]) \n\n\ndef _from_paths(path, subdirs):\n \"\"\"\n Helper method to construct an environment.\n\n :param path: Base path of the environment.\n\n :param subdirs: Directory names that might contain the interpeter and utilities.\n\n :returns: Instance of :class:`PyEnv`\n \"\"\"\n paths = [path] + [os.path.join(path, subdir) for subdir in subdirs]\n search_paths = []\n for path in paths:\n if os.path.isdir(path): \n search_paths.append(path)\n if not search_paths:\n raise ValueError(\"Python environment not found. None of the directories exists {dirs}\".format(\n dirs=\", \".join(paths)))\n return PyEnv(search_paths)\n\n", "id": "8228486", "language": "Python", "matching_score": 5.813659191131592, "max_stars_count": 0, "path": "cish/pyenv.py" }, { "content": "#!/usr/bin/env python\n\n# Copyright (c) 2014, <NAME>\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom setuptools import setup\nimport os.path\n\nif os.path.exists('README.rst'):\n with open('README.rst') as f:\n long_description = f.read()\nelse:\n long_description = \"\"\n\nsetup(name='cish',\n version='0.2.1',\n description='Write shell-like python scripts to control continuous builds.',\n long_description=long_description,\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/smurn/cish',\n packages=['cish'],\n install_requires = ['virtualenv'],\n )\n", "id": "1988463", "language": "Python", "matching_score": 3.4933133125305176, "max_stars_count": 0, "path": "setup.py" }, { "content": "\nimport os.path\nfrom setuptools import setup, find_packages\n\nif os.path.exists('README.rst'):\n with open('README.rst') as f:\n long_description = f.read()\nelse:\n long_description = None\n \nsetup(\n name = 'Lenatu',\n version = '0.1.0',\n description='Abstract Syntax Tree Analyzer.',\n long_description=long_description,\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/smurn/lenatu',\n packages = find_packages(),\n install_requires = [],\n)", "id": "7272131", "language": "Python", "matching_score": 0.04568151384592056, "max_stars_count": 0, "path": "setup.py" }, { "content": "import unittest\nfrom lenatu import tools\nimport ast\n\nclass TestUnindent(unittest.TestCase):\n\n def testEmptyString(self):\n self.assertEqual(\"\", tools.unindent(\"\"))\n \n def testOneLineNoIndent(self):\n self.assertEqual(\"x=1\", tools.unindent(\"x=1\"))\n \n def testOneLineSpaces(self):\n self.assertEqual(\"x=1\", tools.unindent(\" x=1\"))\n \n def testOneLineTabs(self):\n self.assertEqual(\"x=1\", tools.unindent(\"\\t\\tx=1\"))\n \n def testOneLineMix(self):\n self.assertEqual(\"x=1\", tools.unindent(\" \\t \\t x=1\"))\n \n def testTwoLines(self):\n self.assertEqual(\"x=1\\ny=2\", tools.unindent(\"x=1\\ny=2\"))\n \n def testTwoLinesSpaces(self):\n self.assertEqual(\"x=1\\ny=2\", tools.unindent(\" x=1\\n y=2\"))\n \n def testTwoLinesTabs(self):\n self.assertEqual(\"x=1\\ny=2\", tools.unindent(\"\\tx=1\\n\\ty=2\"))\n \n def testTwoLinesMixed(self):\n self.assertEqual(\"x=1\\ny=2\", tools.unindent(\"\\tx=1\\n y=2\"))\n\n def testStructurePreserved(self):\n self.assertEqual(\"def foo():\\n x=1\", tools.unindent(\"def foo():\\n x=1\"))\n \n def testStructurePreservedSpaces(self):\n self.assertEqual(\"def foo():\\n x=1\", tools.unindent(\" def foo():\\n x=1\"))\n\n def testStructurePreservedTabs(self):\n self.assertEqual(\"def foo():\\n x=1\", tools.unindent(\"\\tdef foo():\\n\\t x=1\"))\n \n def testIgnoreEmtptyLines(self):\n self.assertEqual(\"\\nx=1\", tools.unindent(\"\\n x=1\"))\n \n def testIgnoreComments(self):\n self.assertEqual(\"#comment\\nx=1\", tools.unindent(\"#comment\\n x=1\"))\n \n def testPartiallyIndendedComment(self):\n self.assertEqual(\"#comment\\nx=1\", tools.unindent(\" #comment\\n x=1\"))\n \n \nclass TestNPath(unittest.TestCase):\n \n def test_empty(self):\n n = ast.Module()\n self.assertEqual(n, tools.npath(n, \"\"))\n \n def test_attribute(self):\n n = ast.FunctionDef(name=\"hi\")\n self.assertEqual(\"hi\", tools.npath(n, \".name\"))\n \n def test_nested_attribute(self):\n n = ast.Return(value=ast.Name(id=\"hi\"))\n self.assertEqual(\"hi\", tools.npath(n, \".value.id\"))\n \n def test_subscript(self):\n n = ast.FunctionDef(body=[ast.Name(id=\"a\"), ast.Name(id=\"b\")])\n self.assertEqual(\"a\", tools.npath(n, \".body[0].id\"))\n self.assertEqual(\"b\", tools.npath(n, \".body[1].id\"))\n \n def test_filter(self):\n n = [ast.Name(), ast.Assign(), ast.Return]\n self.assertEqual(n[1], tools.npath(n, \"{Assign}\"))\n \n def test_flatten(self):\n n = ast.Module()\n f = ast.FunctionDef()\n e1 = ast.Expr()\n a1 = ast.Name()\n e2 = ast.Expr()\n a2 = ast.Name()\n n.body = [f]\n f.body = [e1, e2]\n e1.value = a1\n e2.value = a2\n \n expected = [f, e1, a1, e2, a2]\n actual = tools.npath(n, \".**\")\n self.assertEqual(expected, actual)\n \nclass TestVersion(unittest.TestCase):\n \n def test_exact(self):\n self.assertTrue(tools.version(\"2.5.3\", (2,5,3))(True))\n \n self.assertFalse(tools.version(\"2.5.3\", (2,5,2))(True))\n self.assertFalse(tools.version(\"2.5.3\", (2,5,4))(True))\n \n def test_plus(self):\n self.assertTrue(tools.version(\"2.6+\", (2,6,0))(True))\n self.assertTrue(tools.version(\"2.6+\", (2,6,1))(True))\n self.assertTrue(tools.version(\"2.6+\", (2,7,0))(True))\n \n self.assertFalse(tools.version(\"2.6+\", (2,5,99))(True))\n self.assertFalse(tools.version(\"2.6+\", (3,0,0))(True))\n \n def test_multiple(self):\n self.assertTrue(tools.version(\"2.6+ 3.3+\", (2,6,0))(True))\n self.assertTrue(tools.version(\"2.6+ 3.3+\", (3,3,0))(True))\n self.assertFalse(tools.version(\"2.6+ 3.3+\", (3,2,0))(True))\n \n ", "id": "5830984", "language": "Python", "matching_score": 1.6068332195281982, "max_stars_count": 0, "path": "lenatu/test_tools.py" }, { "content": "import re\nimport ast\nimport sys\n\ndef unindent(source):\n \"\"\"\n Removes the indentation of the source code that is common to all lines.\n \n Does not work for all cases. Use for testing only.\n \"\"\"\n \n def normalize(line):\n normalized = []\n for i, c in enumerate(line):\n if c == \" \":\n normalized.append(\" \")\n elif c == '\\t':\n normalized.append(8 * \" \")\n else:\n normalized.append(line[i:])\n break\n return \"\".join(normalized)\n \n def min_indent(lines):\n idendations = []\n for line in lines:\n if not line.strip():\n continue\n if line.strip().startswith(\"#\"):\n continue\n idendations.append(count(line))\n if not idendations:\n return 0\n else:\n return min(idendations)\n \n def count(normalized):\n count = 0\n for c in normalized:\n if c == ' ':\n count += 1\n else:\n break\n return count\n \n def trim(normalized, indent):\n indent = min(count(normalized), indent)\n return normalized[indent:]\n \n lines = [normalize(line) for line in source.splitlines()]\n indent = min_indent(lines) \n return \"\\n\".join(trim(line, indent) for line in lines)\n\nid_pattern = re.compile(r\"\\s*[a-zA-Z0-9_]+\\s*\")\nsubscript_pattern = re.compile(r\"\\[(\\s*[0-9]+)\\s*\\]\")\nfilter_pattern = re.compile(r\"\\{(\\s*[a-zA-Z0-9_=]+)\\s*\\}\")\n\ndef npath(node, path):\n \"\"\"\n XPath inspired utility to find a specific node or attribute within\n an AST.\n \n The path is an expression that is applied to the given node.\n \n Attribute access and indexing in case of lists works like in Python:\n \n * `.name`\n * `[42]`\n \n It is also possible to filter for a specific expression or statement:\n \n * `[=]`\n * `[+]`\n * ...\n \n If there is only one node that matches, the result is a node, otherwise\n a list of nodes.\n \n A single `*` returns all nodes reachable from the current node in\n depth-first order.\n \"\"\"\n \n def check_empty(node, path):\n if path:\n return None, None\n return 0, node\n \n def check_flatten(node, path):\n if not path.startswith(\".**\"):\n return None, None\n \n flat = []\n \n class Traverser(ast.NodeVisitor):\n def __init__(self, first):\n self.first = first\n def visit(self, node):\n if node is not self.first:\n flat.append(node)\n return ast.NodeVisitor.visit(self, node)\n \n if not isinstance(node, list):\n node = [node]\n \n for n in node:\n Traverser(n).visit(n)\n \n return len(\".**\"), flat\n \n def check_attribute(node, path):\n if not path.startswith(\".\"):\n return None, None\n match = id_pattern.match(path, 1)\n if not match:\n raise ValueError(\"Invalid attribute name %r\" % path)\n name = match.group(0).strip()\n \n if not hasattr(node, name):\n raise ValueError(\"%r has no attribute %r. Path is %r\" %(node, name, path))\n \n return match.end(0), getattr(node, name)\n \n def check_subscript(node, path):\n if not path.startswith(\"[\"):\n return None, None\n match = subscript_pattern.match(path)\n if not match:\n raise ValueError(\"Invalid subscript %r\" % path)\n index = int(match.group(1).strip())\n\n return match.end(0), node[index]\n \n def check_filter(node, path):\n if not path.startswith(\"{\"):\n return None, None\n match = filter_pattern.match(path)\n if not match:\n raise ValueError(\"Invalid filter %r\" % path)\n criteria = match.group(1).strip()\n \n if not isinstance(node, list):\n node = [node]\n \n if \"=\" in criteria:\n attr, value = criteria.split(\"=\")\n result = [n for n in node if str(getattr(n, attr, None)) == value]\n else:\n result = [n for n in node if n.__class__.__name__ == criteria]\n \n if not result:\n raise ValueError(\"No node of type %r found. Nodes: %s\" %(criteria, node))\n \n if len(result) == 1:\n result = result[0]\n \n return match.end(0), result\n \n\n \n pos, nxt = check_empty(node, path)\n if pos is not None:\n return nxt\n \n if pos is None:\n pos, nxt = check_flatten(node, path)\n \n if pos is None:\n pos, nxt = check_attribute(node, path)\n \n if pos is None:\n pos, nxt = check_subscript(node, path)\n \n if pos is None:\n pos, nxt = check_filter(node, path)\n \n if pos is None:\n raise ValueError(\"Invalid npath: %r\" % path)\n \n \n return npath(nxt, path[pos:])\n \n\ndef version(supported_versions, version=sys.version_info):\n \"\"\"\n Decorator for tests that should only run on some versions of Python.\n \n @version(\"2.7+\")\n def test():\n # runs on 2.7.0, 2.7.1, ..., 2.8.0, 2.8.1, ...\n # but not on 3.x\n \n @version(\"2.7+ 3.3+\")\n def test():\n # runs on both 2.7 and newer and 3.3 and newer.\n \n \n \"\"\"\n match = False\n for cond in supported_versions.split():\n c_match = True\n for i, number in enumerate(cond.split(\".\")):\n plus = number[-1] == \"+\"\n if plus: \n number = number[:-1]\n number = int(number)\n if not plus:\n c_match = c_match and version[i] == number\n else:\n c_match = c_match and version[i] >= number\n match = match or c_match\n \n def wrapper_factory(f):\n if match:\n return f\n else:\n return None\n \n return wrapper_factory\n \n \n ", "id": "6744168", "language": "Python", "matching_score": 1.4806703329086304, "max_stars_count": 0, "path": "lenatu/tools.py" }, { "content": "import sys\n\nif sys.version_info >= (3,0):\n from lenatu._facts3 import *\nelse:\n from lenatu._facts2 import *\n \n", "id": "9621987", "language": "Python", "matching_score": 0.008827890269458294, "max_stars_count": 0, "path": "lenatu/_facts.py" }, { "content": "'''\nCreated on 17.03.2016\n\n@author: stefan\n'''\nimport unittest\nfrom lenatu import tools\nimport lenatu\nimport ast\n\nclass TestScope(unittest.TestCase):\n \n def setUp(self):\n self.cache = {}\n\n\n def test_module_global(self):\n src = \"\"\"\n x = 1\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{Name}.id_block\")\n \n def test_implicit_local(self):\n src = \"\"\"\n def f():\n x = 1\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{Name}.id_block\")\n \n def test_implicit_global(self):\n src = \"\"\"\n def f():\n x + 1\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{Name}.id_block\")\n \n @tools.version(\"3.0+\")\n def test_parameter_arg(self):\n src = \"\"\"\n def f(x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{arg}.arg_block\")\n \n @tools.version(\"2.0+\") \n def test_parameter_P2(self):\n src = \"\"\"\n def f(x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{id=x}.id_block\")\n \n @tools.version(\"2.0+\")\n def test_vararg_P2(self):\n src = \"\"\"\n def f(*x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{vararg=x}.vararg_block\")\n \n @tools.version(\"3.0 3.1 3.2 3.3\")\n def test_vararg_P30(self):\n src = \"\"\"\n def f(*x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{arguments}.vararg_block\")\n \n @tools.version(\"3.4+\")\n def test_vararg_P34(self):\n src = \"\"\"\n def f(*x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{arg=x}.arg_block\")\n \n @tools.version(\"2.0+\")\n def test_kwarg_P2(self):\n src = \"\"\"\n def f(**x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{kwarg=x}.kwarg_block\")\n \n @tools.version(\"3.0 3.1 3.2 3.3\")\n def test_kwarg_P30(self):\n src = \"\"\"\n def f(**x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{arguments}.kwarg_block\")\n \n @tools.version(\"3.4+\")\n def test_kwarg_P34(self):\n src = \"\"\"\n def f(**x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{arg=x}.arg_block\")\n \n def test_default(self):\n src = \"\"\"\n def foo(x=y):\n pass\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{id=y}.id_block\") \n \n @tools.version(\"3.0+\")\n def test_arg_annotation(self):\n src = \"\"\"\n def foo(x:y):\n pass\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{arg}.annotation.id_block\")\n \n def test_implicit_closure(self):\n src = \"\"\"\n def f():\n x = 1\n def g():\n x + 1\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{name=g}.**{id=x}.id_block\")\n \n @tools.version(\"3.0+\")\n def test_explict_closure(self):\n src = \"\"\"\n def f():\n x = 1\n def g():\n nonlocal x\n x = 2\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{name=g}.**{id=x}.id_block\")\n \n def test_local_hides_closure(self):\n src = \"\"\"\n def f():\n x = 1\n def g():\n x = 2\n \"\"\"\n self.assertSame(src, \n \".**{name=g}.defined_block\", \n \".**{name=g}.**{id=x}.id_block\") \n \n def test_explicit_global_closure(self):\n src = \"\"\"\n def f():\n x = 1\n def g():\n global x\n x + 1\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{name=g}.**{id=x}.id_block\") \n \n def test_class(self):\n src = \"\"\"\n class f():\n pass\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{ClassDef}.name_block\") \n \n def test_class_member(self):\n src = \"\"\"\n class f():\n x = 1\n \"\"\"\n self.assertSame(src, \n \".**{ClassDef}.defined_block\", \n \".**{id=x}.id_block\")\n \n def test_class_uses_closure(self):\n src = \"\"\"\n def f(x):\n class g():\n y = x + 1\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{ClassDef}.**{id=x}.id_block\")\n \n def test_class_members_no_closure(self):\n src = \"\"\"\n class f():\n x = 1\n def g():\n y = x + 1\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{name=g}.**{id=x}.id_block\")\n \n def test_class_bypassed(self):\n src = \"\"\"\n def f():\n x = 1\n class g():\n x = 2\n def h():\n print(x)\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{name=h}.**{id=x}.id_block\")\n \n def test_import(self):\n src = \"\"\"\n def f():\n import x\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{alias}.name_block\")\n \n def test_import_as(self):\n src = \"\"\"\n def f():\n import x as y\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{alias}.asname_block\")\n \n def test_except(self):\n src = \"\"\"\n def f():\n try:\n pass\n except ValueError as e:\n pass\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{ExceptHandler}.name_block\")\n \n @tools.version(\"3.0+\")\n def test_except_nonlocal(self):\n src = \"\"\"\n def f():\n nonlocal e\n try:\n pass\n except ValueError as e:\n pass\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{ExceptHandler}.name_block\")\n \n def test_generator_element(self):\n src = \"\"\"\n def f():\n (x for x in y)\n \"\"\"\n self.assertSame(src, \n \".**{GeneratorExp}.defined_block\", \n \".**{GeneratorExp}.elt.id_block\")\n \n def test_generator_iterable(self):\n src = \"\"\"\n def f(y):\n (x for x in y)\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{GeneratorExp}.generators.**{id=y}.id_block\")\n \n def test_with(self):\n src = \"\"\"\n def f():\n with x as y:\n pass\n \"\"\"\n self.assertSame(src, \n \".**{name=f}.defined_block\", \n \".**{id=y}.id_block\")\n self.assertSame(src, \n \".defined_block\", \n \".**{id=x}.id_block\")\n \n \n def get(self, src, path):\n node = self.parse(src)\n return tools.npath(node, path)\n \n def assertSame(self, src, path_a, path_b):\n node = self.parse(src)\n a = tools.npath(node, path_a)\n b = tools.npath(node, path_b)\n self.assertIs(a, b)\n \n def assertNotSame(self, src, path_a, path_b):\n node = self.parse(src)\n a = tools.npath(node, path_a)\n b = tools.npath(node, path_b)\n self.assertIsNot(a, b)\n\n def parse(self, src):\n if src not in self.cache:\n src = tools.unindent(src)\n node = ast.parse(src)\n lenatu.augment(node)\n self.cache[src] = node\n else:\n node = self.cache[src]\n return node", "id": "7261259", "language": "Python", "matching_score": 3.9384217262268066, "max_stars_count": 0, "path": "lenatu/test_scope.py" }, { "content": "'''\nCreated on 16.11.2015\n\n@author: stefan\n'''\nimport unittest\nfrom lenatu import tools\nimport ast\nimport lenatu\n\nclass BlockDetector(unittest.TestCase):\n \n def setUp(self):\n self.cache = {}\n \n def test_Module_def(self):\n src = \"\"\"\n pass\n \"\"\"\n self.assertTrue(isinstance(self.get(src, \".defined_block\"), lenatu.Block))\n \n\n def test_FunctionDef_def(self):\n src = \"\"\"\n def foo():\n pass\n \"\"\"\n self.assertNotSame(src,\n \".defined_block\",\n \".**{FunctionDef}.defined_block\")\n \n def test_ClassDef_def(self):\n src = \"\"\"\n class foo():\n pass\n \"\"\"\n self.assertNotSame(src,\n \".defined_block\",\n \".**{ClassDef}.defined_block\")\n \n \n def test_Lambda_def(self):\n src = \"\"\"\n lambda x:None\n \"\"\"\n self.assertNotSame(src,\n \".defined_block\",\n \".**{Lambda}.defined_block\")\n \n def test_Generator_def(self):\n src = \"\"\"\n (x for x in y)\n \"\"\"\n self.assertNotSame(src,\n \".defined_block\",\n \".**{GeneratorExp}.defined_block\")\n\n def test_Module_exec(self):\n src = \"\"\n node = self.get(src, \"\")\n self.assertFalse(hasattr(node, \"executed_in\"))\n \n\n def test_FunctionDef_exec(self):\n src = \"\"\"\n def foo():\n pass\n \"\"\"\n self.assertSame(src, \".defined_block\", \".**{FunctionDef}.executed_in\")\n\n\n def test_ClassDef_exec(self):\n src = \"\"\"\n class foo():\n pass\n \"\"\"\n self.assertSame(src, \".defined_block\", \".**{ClassDef}.executed_in\")\n \n \n def test_Lambda_exec(self):\n src = \"\"\"\n lambda x: None\n \"\"\"\n self.assertSame(src, \".defined_block\", \".**{Lambda}.executed_in\")\n \n \n def test_GeneratorExp_exec(self):\n src = \"\"\"\n (x for x in x)\n \"\"\"\n self.assertSame(src, \".defined_block\", \".**{GeneratorExp}.executed_in\")\n \n def test_Module_body(self):\n src = \"\"\"\n 1 + 2\n \"\"\"\n self.assertSame(src, \".defined_block\", \".**{BinOp}.executed_in\")\n \n def test_FunctionDef_body(self):\n src = \"\"\"\n def foo():\n 1 + 2\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{BinOp}.executed_in\")\n \n def test_FunctionDef_args(self):\n src = \"\"\"\n def foo(x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{FunctionDef}.args.defined_block\")\n self.assertSame(src, \n \".**{FunctionDef}.executed_in\", \n \".**{FunctionDef}.args.executed_in\")\n \n def test_ClassDef_body(self):\n src = \"\"\"\n class foo():\n 1 + 2\n \"\"\"\n self.assertSame(src, \n \".**{ClassDef}.defined_block\", \n \".**{BinOp}.executed_in\")\n \n def test_Lambda_args(self):\n src = \"\"\"\n lambda x : None\n \"\"\"\n self.assertSame(src, \n \".**{Lambda}.defined_block\", \n \".**{Lambda}.args.defined_block\")\n self.assertSame(src, \n \".**{Lambda}.executed_in\", \n \".**{Lambda}.args.executed_in\")\n \n def test_GeneratorExp_elt(self):\n src = \"\"\"\n (1+2 for x in x)\n \"\"\"\n self.assertSame(src, \n \".**{GeneratorExp}.defined_block\", \n \".**{BinOp}.executed_in\")\n \n def test_GeneratorExp_comprehension(self):\n src = \"\"\"\n (None for x in 1 + 2)\n \"\"\"\n self.assertSame(src, \n \".**{GeneratorExp}.defined_block\", \n \".**{GeneratorExp}.generators[0].executed_in\")\n \n\n @tools.version(\"2.0+\")\n def test_arguments_args_args_P2(self):\n src = \"\"\"\n def foo(x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{FunctionDef}.args.args[0].executed_in\")\n \n @tools.version(\"3.0+\")\n def test_arguments_args_args_P3(self):\n src = \"\"\"\n def foo(x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{FunctionDef}.args.args[0].defined_block\")\n self.assertSame(src, \n \".**{FunctionDef}.executed_in\", \n \".**{FunctionDef}.args.args[0].executed_in\")\n \n \n @tools.version(\"3.4+\")\n def test_arguments_args_vararg(self):\n \"\"\"\n `vararg` only became a node in 3.4. it was an identifier attribute\n on `arguments` before. Those are covered by test_scope.\n \"\"\"\n src = \"\"\"\n def foo(*x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{FunctionDef}.args.vararg.defined_block\")\n self.assertSame(src, \n \".**{FunctionDef}.executed_in\", \n \".**{FunctionDef}.args.vararg.executed_in\")\n \n @tools.version(\"3.0+\")\n def test_arguments_kwonlyargs(self):\n src = \"\"\"\n def foo(*x, a):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{FunctionDef}.args.kwonlyargs[0].defined_block\")\n self.assertSame(src, \n \".**{FunctionDef}.executed_in\", \n \".**{FunctionDef}.args.kwonlyargs[0].executed_in\")\n \n\n def test_arguments_args(self):\n src = \"\"\"\n def foo(**x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{FunctionDef}.args.defined_block\")\n self.assertSame(src, \n \".**{FunctionDef}.executed_in\", \n \".**{FunctionDef}.args.executed_in\")\n \n @tools.version(\"3.4+\")\n def test_arguments_args_kwarg(self):\n \"\"\"\n `kwarg` only became a node in 3.4. it was an identifier attribute\n on `arguments` before. Those are covered by test_scope.\n \"\"\"\n src = \"\"\"\n def foo(**x):\n pass\n \"\"\"\n self.assertSame(src, \n \".**{FunctionDef}.defined_block\", \n \".**{FunctionDef}.args.kwarg.defined_block\")\n self.assertSame(src, \n \".**{FunctionDef}.executed_in\", \n \".**{FunctionDef}.args.kwarg.executed_in\")\n \n @tools.version(\"3.0+\")\n def test_arguments_vararg_annotation(self):\n \"\"\"\n The way vararg and kwarg annotations were modelled in the AST changed\n from 3.3 to 3.4, but this test should work for both.\n \"\"\"\n src = \"\"\"\n def foo(*x:y):\n pass\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{id=y}.executed_in\")\n \n \n @tools.version(\"3.0+\")\n def test_arguments_kwarg_annotation(self):\n \"\"\"\n The way vararg and kwarg annotations were modelled in the AST changed\n from 3.3 to 3.4, but this test should work for both.\n \"\"\"\n src = \"\"\"\n def foo(**x:y):\n pass\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{id=y}.executed_in\")\n \n @tools.version(\"3.0+\")\n def test_arg_annotation(self):\n src = \"\"\"\n def foo(x:1+2):\n pass\n \"\"\"\n self.assertSame(src, \n \".defined_block\", \n \".**{arg}.annotation.executed_in\")\n \n \n\n \n \n \n def get(self, src, path):\n node = self.parse(src)\n return tools.npath(node, path)\n \n def assertSame(self, src, path_a, path_b):\n node = self.parse(src)\n a = tools.npath(node, path_a)\n b = tools.npath(node, path_b)\n self.assertIs(a, b)\n \n def assertNotSame(self, src, path_a, path_b):\n node = self.parse(src)\n a = tools.npath(node, path_a)\n b = tools.npath(node, path_b)\n self.assertIsNot(a, b)\n\n def parse(self, src):\n if src not in self.cache:\n src = tools.unindent(src)\n node = ast.parse(src)\n lenatu.augment(node)\n self.cache[src] = node\n else:\n node = self.cache[src]\n return node", "id": "1451292", "language": "Python", "matching_score": 2.003948211669922, "max_stars_count": 0, "path": "lenatu/test_block.py" }, { "content": "from lenatu._facts import * # @UnusedWildImport\n\nclass Block(object):\n \"\"\"\n A block is a sequence of statements that form a unit (think stack-frame). \n \n The bodies of modules, classes, and functions are in blocks belonging to those. Generators\n have a block too.\n \n Blocks can be nested, and are important to decide the scope of a variable.\n \n .. attribute:: defined_by\n \n Node that defines this block. For example a `ast.Function` or `ast.Module`.\n \n .. attribute:: local_variables\n \n Identifiers of variables local to this block.\n \"\"\"\n \n def __init__(self, defined_by):\n self.defined_by = defined_by\n self.local_variables = None\n\n \n def __repr__(self):\n return \"Block(%r)\" % self.defined_by\n\n\ndef _visit(node, executed_in=None, defined_block=None):\n \"\"\"\n :param node: The node we visit\n :param executed_in The block this node is executed in\n :param defined_block The block this node helps to define.\n \n We differentiate between three types of AST nodes:\n \n * Regular nodes like `ast.BinOp` that are executed in a block,\n as are all the nodes they reference.\n \n `visit` is invoked with `executed_in`, but `defined_block` is\n `None.\n \n * Nodes that define a new block, such as `ast.Module` and \n `ast.FunctionDef`. There are two blocks involved here, the block\n in which they are executed, and the new block they define for their\n body. Some attributes will belong to the new block, others to the\n block in which the node is executed in. Some will be of the third\n type (see below).\n \n `visit` is invoked with `executed_in`, but `defined_block` is\n `None. The later might seem surprising, but the caller does not have\n the defined block, the `visit` of a definer node will create it.\n \n * Nodes that provide additional information to a definer block, such\n as `ast.Arg`. These are similar to definer blocks as they are \n have attributes that belong to the new block, others to the\n block in which the definer block is executed in.\n \n `visit` is invoked with `executed_in` set to the block in which\n the definer block they belong to is executed in. `defined_block`\n is set to the block defined by their definer node.\n \n \"\"\"\n if executed_in is None and not isinstance(node, ast.mod):\n raise ValueError(\"Expected top-level node (one of the ast.mod types)\")\n \n if executed_in is not None:\n node.executed_in = executed_in\n \n if isinstance(node, DEFINER):\n defined_block = Block(node)\n \n if defined_block is not None:\n node.defined_block = defined_block\n \n for field, value in ast.iter_fields(node):\n kind = CHILD_BLOCK.get((type(node), field), EXEC)\n \n if kind == EXEC:\n _visit_helper(value, executed_in=executed_in, defined_block=None)\n elif kind == DEFINED:\n _visit_helper(value, executed_in=defined_block, defined_block=None)\n elif kind == MIXED:\n _visit_helper(value, executed_in=executed_in, defined_block=defined_block)\n else:\n raise ValueError(\"unexpected field kind %r\" % kind)\n\n\ndef _visit_helper(value, executed_in=None, defined_block=None):\n if isinstance(value, list):\n for v in value:\n _visit_helper(v, executed_in=executed_in, defined_block=defined_block)\n \n elif isinstance(value, ast.AST):\n _visit(value, executed_in=executed_in, defined_block=defined_block)\n\n\ndef augment_blocks(node):\n \"\"\"\n Analyze the AST add/overwrite the attributes described in the documentation.\n \"\"\"\n _visit(node)\n", "id": "12731690", "language": "Python", "matching_score": 3.821491241455078, "max_stars_count": 0, "path": "lenatu/_block.py" }, { "content": "'''\nCreated on 19.11.2015\n\n@author: stefan\n'''\nimport sys\nif sys.version_info >= (3,0): \n import ast\n \n \n # --------------------\n # Definition of blocks\n # --------------------\n \n #: AST types that define a new block\n DEFINER = (\n ast.Module,\n ast.Interactive,\n ast.Expression,\n ast.Suite,\n ast.FunctionDef,\n ast.ClassDef,\n ast.Lambda,\n ast.GeneratorExp\n )\n if hasattr(ast, \"AsyncFunctionDef\"):\n DEFINER += (ast.AsyncFunctionDef, )\n \n \n # -----------------------------------\n # Block to which a child node belongs\n # -----------------------------------\n \n \n #: child node belongs to the block that executed the parent \n EXEC = \"exec\"\n \n #: child node belongs to the block that the parent defines\n DEFINED = \"defined\"\n \n #: the child node has children that belong to both, the block that the parent\n #: executes in, and the block the parent defines.\n MIXED = \"mixed\"\n \n #: maps (node-type, attribute-name) to either EXEC, DEFINED, or MIXED.\n #: If a combination is missing, it is EXEC.\n CHILD_BLOCK = { # default is EXEC\n (ast.Module, \"body\"): DEFINED,\n (ast.Interactive, \"body\"): DEFINED,\n (ast.Expression, \"body\"):DEFINED,\n (ast.Suite, \"body\"):DEFINED,\n (ast.FunctionDef, \"body\"):DEFINED,\n (ast.FunctionDef, \"args\"):MIXED,\n (ast.ClassDef, \"body\"):DEFINED,\n (ast.Lambda, \"body\"):DEFINED,\n (ast.Lambda, \"args\"):MIXED,\n (ast.GeneratorExp, \"elt\"):DEFINED,\n (ast.GeneratorExp, \"generators\"):DEFINED,\n (ast.arguments, \"args\"):MIXED,\n (ast.arguments, \"kwonlyargs\"):MIXED,\n (ast.arg, \"arg\"):DEFINED,\n }\n if sys.version_info >= (3,4):\n CHILD_BLOCK[(ast.arguments, \"vararg\")] = MIXED\n CHILD_BLOCK[(ast.arguments, \"kwarg\")] = MIXED\n \n if sys.version_info >= (3,5):\n # Python 3.5+\n for (t, f), k in dict(CHILD_BLOCK).items():\n if t == ast.FunctionDef:\n CHILD_BLOCK[(ast.AsyncFunctionDef, f)] = k\n \n \n \n # -------------------------------------------------\n # Identifier that refer to variables.\n # -------------------------------------------------\n \n #: Code assigns the variable\n ASSIGNED = \"assigned\"\n \n #: Code reads the variable\n READ = \"read\"\n \n #: Code explicitly declares the variable as global\n GLOBAL = \"global\"\n \n #: Code explicitly declares the variable as nonlocal\n NONLOCAL = \"nonlocal\"\n \n \n def _name_fields(node):\n if isinstance(node.ctx, (ast.Load, ast.AugLoad)):\n return [(\"id\", READ, EXEC)]\n else:\n return [(\"id\", ASSIGNED, EXEC)]\n \n def _alias_fields(node):\n if node.asname is None:\n return [(\"name\", ASSIGNED, EXEC)]\n else:\n return [(\"asname\", ASSIGNED, EXEC)]\n \n \n #: Maps node-type to a function that takes the node (of that type) as\n #: a parameter. The function returns a list of (attribute-name, usage) tuples\n #: for each attribute of that node which is referring to a variable.\n NAME_FIELDS = {\n ast.FunctionDef: lambda n:[(\"name\", ASSIGNED, EXEC)],\n ast.ClassDef: lambda n:[(\"name\", ASSIGNED, EXEC)],\n ast.Global: lambda n:[(\"names\", GLOBAL, EXEC)],\n ast.Nonlocal: lambda n:[(\"names\", NONLOCAL, EXEC)],\n ast.Name: _name_fields,\n ast.ExceptHandler: lambda n:[(\"name\", ASSIGNED, EXEC)],\n ast.arg: lambda n:[(\"arg\", ASSIGNED, DEFINED)],\n ast.alias: _alias_fields\n }\n if sys.version_info < (3,4):\n NAME_FIELDS[ast.arguments] = lambda n:[(\"vararg\", ASSIGNED, DEFINED), (\"kwarg\", ASSIGNED, DEFINED)]\n \n \n def is_local_variable(usages):\n \"\"\"\n Given the set of usages of a variable within a block, return if this\n variable is in the scope of this block.\n \"\"\"\n return ASSIGNED in usages and GLOBAL not in usages and NONLOCAL not in usages\n \n \n def are_locals_visible_to_childen(block):\n \"\"\"\n Are variables that are local to the given block visible to blocks declared\n within this block?\n \n This is the case for all blocks but those of classes.\n \"\"\"\n return not isinstance(block.defined_by, ast.ClassDef)", "id": "11072354", "language": "Python", "matching_score": 3.3096859455108643, "max_stars_count": 0, "path": "lenatu/_facts3.py" }, { "content": "import ast\nfrom lenatu import _facts as facts\nimport collections\n\n\ndef _nodes_of_block(block):\n \"\"\"\n Returns nodes that define or execute the given block.\n \"\"\"\n def visit(node):\n if getattr(node, \"executed_in\", block) is block or getattr(node, \"defined_block\", None) is block:\n yield node\n for child in ast.iter_child_nodes(node):\n for n in visit(child): yield n\n\n return visit(block.defined_by)\n \n \ndef _variable_accesses_in(block):\n \"\"\"\n Returns (node, attribute, variable-name, usage) tuples for all\n accesses to variables by code executed in the given block.\n \"\"\"\n for node in _nodes_of_block(block):\n \n defined_block = getattr(node, \"defined_block\", None)\n executed_in = getattr(node, \"executed_in\", None)\n \n f = facts.NAME_FIELDS.get(type(node), lambda _:[])\n for attribute, usage, which_block in f(node):\n \n accessed_block = defined_block if which_block == facts.DEFINED else executed_in\n if (accessed_block == block): \n value = getattr(node, attribute)\n if isinstance(value, list):\n for v in value:\n yield (node, attribute, v, usage)\n else:\n yield (node, attribute, value, usage)\n\n \ndef _blocks_defined_in(block):\n \"\"\"\n Returns blocks that are directly defined in the given block.\n \"\"\"\n def visit(node):\n if getattr(node, \"executed_in\", None) is block or getattr(node, \"defined_block\", None) is block:\n \n if getattr(node, \"executed_in\", None) is block and getattr(node, \"defined_block\", block) is not block:\n yield node.defined_block\n # no need to look inside the children of this node. We'll only find\n # the same block again.\n else:\n for child in ast.iter_child_nodes(node):\n for b in visit(child): yield b\n \n return visit(block.defined_by)\n \n \ndef _scope_lookup(identifier, usages, blocks):\n \"\"\"\n Find the block the given identifier belongs to.\n \n We search backwards, starting with block[-1]. block[0] must be a module.\n \n Blocks from classes are ignored (with the exception of `block[-1]).\n \"\"\"\n \n if not isinstance(blocks[0].defined_by, ast.Module):\n raise ValueError(\"block[0] should be a module.\")\n\n if facts.GLOBAL in usages:\n return blocks[0]\n \n for block in reversed(blocks):\n if block == blocks[-1]:\n if facts.NONLOCAL in usages:\n continue # don't look in the local block\n else:\n if not facts.are_locals_visible_to_childen(block):\n continue # skip over enclosing class-blocks \n \n if identifier in block.local_variables:\n return block\n else:\n # identifier is a global variable which isn't assigned directly in the module.\n return blocks[0]\n\n \ndef _assign_scopes(block, enclosing_blocks):\n \"\"\"\n Sets `block.local_variables` and the `xyz_block` attributes of all\n nodes executed within `block`.\n \n :param enclosing_blocks: Enclosing blocks (without `block`). The module\n is `enclosing_blocks[0]` and the direct parent is `enclosing_blocks[-1]`.\n Empty if `block` is the module.\n All these blocks must have `local_variables` set already.\n \"\"\"\n \n all_usages = collections.defaultdict(set)\n for _, _, identifier, usage in _variable_accesses_in(block):\n all_usages[identifier].add(usage) \n \n \n local_variables = [identifier for identifier, usages in all_usages.items() if facts.is_local_variable(usages)]\n block.local_variables = local_variables\n \n # Variables used in this block are local to one of these blocks\n candidate_blocks = enclosing_blocks + [block]\n \n # For each used variable find the block that variable is defined in.\n scope_map = {identifier : _scope_lookup(identifier, usages, candidate_blocks) for identifier, usages in all_usages.items()}\n\n # Inject scopes into the AST nodes\n for node, attribute, _ , _ in _variable_accesses_in(block):\n variable = getattr(node, attribute)\n if isinstance(variable, list):\n scope = [scope_map[v] for v in variable]\n else:\n scope = scope_map[variable]\n setattr(node, attribute + \"_block\", scope)\n \n \ndef augment_scopes(block, enclosing_blocks=[]):\n \"\"\"\n Augment the block and all sub-blocks with scope information.\n \n This will set the block's `local_variables` field and adds the \n `xyz_block` attributes to the nodes.\n \"\"\"\n _assign_scopes(block, enclosing_blocks)\n for child_block in _blocks_defined_in(block):\n augment_scopes(child_block, enclosing_blocks + [block])\n \n ", "id": "12504889", "language": "Python", "matching_score": 2.6165435314178467, "max_stars_count": 0, "path": "lenatu/_scope.py" }, { "content": "from lenatu._block import Block, augment_blocks\nfrom lenatu._scope import augment_scopes\n\ndef augment(node):\n augment_blocks(node)\n augment_scopes(node.defined_block)\n \n", "id": "11510460", "language": "Python", "matching_score": 0.7227847576141357, "max_stars_count": 0, "path": "lenatu/__init__.py" } ]
2.616544
vlad-ghita
[ { "content": "\"\"\"Run 'Use Cases' example application.\"\"\"\n\nimport sys\n\nfrom containers import Adapters, TestAdapters, UseCases\n\n\nif __name__ == '__main__':\n environment, email = sys.argv[1:]\n\n if environment == 'prod':\n adapters = Adapters()\n elif environment == 'test':\n adapters = TestAdapters()\n\n use_cases = UseCases(adapters=adapters)\n\n use_case = use_cases.signup()\n use_case.execute(email)\n", "id": "11225994", "language": "Python", "matching_score": 2.8925728797912598, "max_stars_count": 0, "path": "examples/miniapps/use_cases/run.py" }, { "content": "\"\"\"Dependency injection containers for 'Use Cases' example application.\"\"\"\n\nfrom dependency_injector import containers, providers\n\nfrom example.adapters import SmtpEmailSender, EchoEmailSender\nfrom example.use_cases import SignupUseCase\n\n\nclass Adapters(containers.DeclarativeContainer):\n \"\"\"Adapters container.\"\"\"\n\n email_sender = providers.Singleton(SmtpEmailSender)\n\n\nclass TestAdapters(containers.DeclarativeContainer):\n \"\"\"Adapters container.\n\n This container is used for testing purposes.\n \"\"\"\n\n email_sender = providers.Singleton(EchoEmailSender)\n\n\nclass UseCases(containers.DeclarativeContainer):\n \"\"\"Use cases container.\"\"\"\n\n adapters = providers.DependenciesContainer()\n\n signup = providers.Factory(SignupUseCase,\n email_sender=adapters.email_sender)\n", "id": "1903968", "language": "Python", "matching_score": 0.9684547781944275, "max_stars_count": 0, "path": "examples/miniapps/use_cases/containers.py" }, { "content": "\"\"\"Dependency injector common unit tests.\"\"\"\n\nimport unittest2 as unittest\n\nfrom dependency_injector import __version__\n\n\nclass VersionTest(unittest.TestCase):\n\n def test_version_follows_semantic_versioning(self):\n self.assertEqual(len(__version__.split('.')), 3)\n", "id": "4973941", "language": "Python", "matching_score": 1.8274897336959839, "max_stars_count": 1, "path": "tests/unit/test_common_py2_py3.py" }, { "content": "\"\"\"Dependency injector config providers unit tests.\"\"\"\n\nimport unittest2 as unittest\n\nfrom dependency_injector import providers\n\n\nclass ConfigTests(unittest.TestCase):\n\n def setUp(self):\n self.config = providers.Configuration(name='config')\n\n def tearDown(self):\n del self.config\n\n def test_providers_are_providers(self):\n self.assertTrue(providers.is_provider(self.config.a))\n self.assertTrue(providers.is_provider(self.config.a.b))\n self.assertTrue(providers.is_provider(self.config.a.b.c))\n self.assertTrue(providers.is_provider(self.config.a.b.d))\n\n def test_providers_are_not_delegates(self):\n self.assertFalse(providers.is_delegated(self.config.a))\n self.assertFalse(providers.is_delegated(self.config.a.b))\n self.assertFalse(providers.is_delegated(self.config.a.b.c))\n self.assertFalse(providers.is_delegated(self.config.a.b.d))\n\n def test_providers_identity(self):\n self.assertIs(self.config.a, self.config.a)\n self.assertIs(self.config.a.b, self.config.a.b)\n self.assertIs(self.config.a.b.c, self.config.a.b.c)\n self.assertIs(self.config.a.b.d, self.config.a.b.d)\n\n def test_get_name(self):\n self.assertEqual(self.config.a.b.c.get_name(), 'config.a.b.c')\n\n def test_providers_value_setting(self):\n a = self.config.a\n ab = self.config.a.b\n abc = self.config.a.b.c\n abd = self.config.a.b.d\n\n self.config.update({'a': {'b': {'c': 1, 'd': 2}}})\n\n self.assertEqual(a(), {'b': {'c': 1, 'd': 2}})\n self.assertEqual(ab(), {'c': 1, 'd': 2})\n self.assertEqual(abc(), 1)\n self.assertEqual(abd(), 2)\n\n def test_providers_with_already_set_value(self):\n self.config.update({'a': {'b': {'c': 1, 'd': 2}}})\n\n a = self.config.a\n ab = self.config.a.b\n abc = self.config.a.b.c\n abd = self.config.a.b.d\n\n self.assertEqual(a(), {'b': {'c': 1, 'd': 2}})\n self.assertEqual(ab(), {'c': 1, 'd': 2})\n self.assertEqual(abc(), 1)\n self.assertEqual(abd(), 2)\n\n def test_providers_value_override(self):\n a = self.config.a\n ab = self.config.a.b\n abc = self.config.a.b.c\n abd = self.config.a.b.d\n\n self.config.override({'a': {'b': {'c': 1, 'd': 2}}})\n\n self.assertEqual(a(), {'b': {'c': 1, 'd': 2}})\n self.assertEqual(ab(), {'c': 1, 'd': 2})\n self.assertEqual(abc(), 1)\n self.assertEqual(abd(), 2)\n\n def test_providers_with_already_overridden_value(self):\n self.config.override({'a': {'b': {'c': 1, 'd': 2}}})\n\n a = self.config.a\n ab = self.config.a.b\n abc = self.config.a.b.c\n abd = self.config.a.b.d\n\n self.assertEqual(a(), {'b': {'c': 1, 'd': 2}})\n self.assertEqual(ab(), {'c': 1, 'd': 2})\n self.assertEqual(abc(), 1)\n self.assertEqual(abd(), 2)\n\n def test_providers_with_default_value(self):\n self.config = providers.Configuration(\n name='config', default={'a': {'b': {'c': 1, 'd': 2}}})\n\n a = self.config.a\n ab = self.config.a.b\n abc = self.config.a.b.c\n abd = self.config.a.b.d\n\n self.assertEqual(a(), {'b': {'c': 1, 'd': 2}})\n self.assertEqual(ab(), {'c': 1, 'd': 2})\n self.assertEqual(abc(), 1)\n self.assertEqual(abd(), 2)\n\n def test_providers_with_default_value_overriding(self):\n self.config = providers.Configuration(\n name='config', default={'a': {'b': {'c': 1, 'd': 2}}})\n\n self.assertEqual(self.config.a(), {'b': {'c': 1, 'd': 2}})\n self.assertEqual(self.config.a.b(), {'c': 1, 'd': 2})\n self.assertEqual(self.config.a.b.c(), 1)\n self.assertEqual(self.config.a.b.d(), 2)\n\n self.config.override({'a': {'b': {'c': 3, 'd': 4}}})\n self.assertEqual(self.config.a(), {'b': {'c': 3, 'd': 4}})\n self.assertEqual(self.config.a.b(), {'c': 3, 'd': 4})\n self.assertEqual(self.config.a.b.c(), 3)\n self.assertEqual(self.config.a.b.d(), 4)\n\n self.config.reset_override()\n self.assertEqual(self.config.a(), {'b': {'c': 1, 'd': 2}})\n self.assertEqual(self.config.a.b(), {'c': 1, 'd': 2})\n self.assertEqual(self.config.a.b.c(), 1)\n self.assertEqual(self.config.a.b.d(), 2)\n\n def test_value_of_undefined_option(self):\n self.assertIsNone(self.config.a())\n\n def test_getting_of_special_attributes(self):\n with self.assertRaises(AttributeError):\n self.config.__name__\n\n def test_getting_of_special_attributes_from_child(self):\n a = self.config.a\n with self.assertRaises(AttributeError):\n a.__name__\n\n def test_deepcopy(self):\n provider = providers.Configuration('config')\n provider_copy = providers.deepcopy(provider)\n\n self.assertIsNot(provider, provider_copy)\n self.assertIsInstance(provider, providers.Configuration)\n\n def test_deepcopy_from_memo(self):\n provider = providers.Configuration('config')\n provider_copy_memo = providers.Configuration('config')\n\n provider_copy = providers.deepcopy(\n provider, memo={id(provider): provider_copy_memo})\n\n self.assertIs(provider_copy, provider_copy_memo)\n\n def test_deepcopy_overridden(self):\n provider = providers.Configuration('config')\n object_provider = providers.Object(object())\n\n provider.override(object_provider)\n\n provider_copy = providers.deepcopy(provider)\n object_provider_copy = provider_copy.overridden[0]\n\n self.assertIsNot(provider, provider_copy)\n self.assertIsInstance(provider, providers.Configuration)\n\n self.assertIsNot(object_provider, object_provider_copy)\n self.assertIsInstance(object_provider_copy, providers.Object)\n\n def test_repr(self):\n self.assertEqual(repr(self.config),\n '<dependency_injector.providers.'\n 'Configuration({0}) at {1}>'.format(\n repr('config'),\n hex(id(self.config))))\n\n def test_repr_child(self):\n self.assertEqual(repr(self.config.a.b.c),\n '<dependency_injector.providers.'\n 'Configuration({0}) at {1}>'.format(\n repr('config.a.b.c'),\n hex(id(self.config.a.b.c))))\n", "id": "8816155", "language": "Python", "matching_score": 3.3067116737365723, "max_stars_count": 0, "path": "tests/unit/providers/test_configuration_py2_py3.py" }, { "content": "\"\"\"Dependency injector provider utils unit tests.\"\"\"\n\nimport unittest2 as unittest\n\nfrom dependency_injector import (\n providers,\n errors,\n)\n\n\nclass IsProviderTests(unittest.TestCase):\n\n def test_with_instance(self):\n self.assertTrue(providers.is_provider(providers.Provider()))\n\n def test_with_class(self):\n self.assertFalse(providers.is_provider(providers.Provider))\n\n def test_with_string(self):\n self.assertFalse(providers.is_provider('some_string'))\n\n def test_with_object(self):\n self.assertFalse(providers.is_provider(object()))\n\n def test_with_subclass_instance(self):\n class SomeProvider(providers.Provider):\n pass\n\n self.assertTrue(providers.is_provider(SomeProvider()))\n\n def test_with_class_with_getattr(self):\n class SomeClass(object):\n def __getattr__(self, _):\n return False\n\n self.assertFalse(providers.is_provider(SomeClass()))\n\n\nclass EnsureIsProviderTests(unittest.TestCase):\n\n def test_with_instance(self):\n provider = providers.Provider()\n self.assertIs(providers.ensure_is_provider(provider), provider)\n\n def test_with_class(self):\n self.assertRaises(errors.Error,\n providers.ensure_is_provider,\n providers.Provider)\n\n def test_with_string(self):\n self.assertRaises(errors.Error,\n providers.ensure_is_provider,\n 'some_string')\n\n def test_with_object(self):\n self.assertRaises(errors.Error, providers.ensure_is_provider, object())\n", "id": "10366494", "language": "Python", "matching_score": 0.8497641086578369, "max_stars_count": 1, "path": "tests/unit/providers/test_utils_py2_py3.py" }, { "content": "\"\"\"The Code, that demonstrates dependency injection pattern.\"\"\"\n\n\nclass Service(object):\n \"\"\"Some \"Service\".\"\"\"\n\n\nclass Client(object):\n \"\"\"Some \"Client\" that uses \"Service\".\"\"\"\n\n def __init__(self, service): # Service instance is injected into Client\n \"\"\"Initializer.\"\"\"\n self.service = service\n\n\nif __name__ == '__main__':\n service = Service() # Application creates Service instance\n client = Client(service) # and inject Service instance into the Client\n", "id": "3564688", "language": "Python", "matching_score": 0.9336063265800476, "max_stars_count": 0, "path": "examples/di_demo/example_di.py" }, { "content": "\"\"\"Example main module.\"\"\"\n\n\ndef main(uid, password, photo, users_service, auth_service, photos_service):\n \"\"\"Authenticate user and upload photo.\"\"\"\n user = users_service.get_user_by_id(uid)\n auth_service.authenticate(user, password)\n photos_service.upload_photo(user['uid'], photo)\n", "id": "572668", "language": "Python", "matching_score": 1.2247488498687744, "max_stars_count": 0, "path": "examples/miniapps/services_v2/example/main.py" }, { "content": "\"\"\"Mail service and user registration DI container example.\"\"\"\n\nfrom dependency_injector.containers import DeclarativeContainer\nfrom dependency_injector.providers import Callable, Singleton\n\nimport example\n\n\nclass Container(DeclarativeContainer):\n \"\"\"DI container.\"\"\"\n\n mail_service = Singleton(example.MailService,\n host='localhost',\n port=587,\n login='my_login',\n password='<PASSWORD>')\n\n add_user = Callable(example.add_user,\n mailer=mail_service)\n\n\nif __name__ == '__main__':\n print('Using real mail service:')\n Container.add_user('<EMAIL>', 'password')\n # Using real mail service:\n # Connecting server localhost:587 with my_login:super_secret_password\n # Sending \"Your password is password\" to \"<EMAIL>\"\n\n print('Using mail service stub:')\n Container.add_user('<EMAIL>', 'password',\n mailer=example.MailServiceStub())\n # Using mail service stub:\n # Emulating sending \"Your password is password\" to \"<EMAIL>\"\n\n # Also you can override provider by another provider:\n Container.mail_service.override(Singleton(example.MailServiceStub))\n print('Using mail service stub by overriding mail service provider:')\n Container.add_user('<EMAIL>', 'password')\n # Using mail service stub by overriding mail service provider:\n # Emulating sending \"Your password is password\" to \"<EMAIL>\"\n Container.mail_service.reset_override() # Resetting provider overriding\n", "id": "4820706", "language": "Python", "matching_score": 0.6669859886169434, "max_stars_count": 0, "path": "examples/miniapps/mail_service/container.py" }, { "content": "\"\"\"A naive example of dependency injection on Python.\n\nExample implementation of dependency injection in Python from Martin Fowler's\narticle about dependency injection and inversion of control:\n\nhttp://www.martinfowler.com/articles/injection.html\n\nThis mini application uses ``movies`` library, that is configured to work with\nsqlite movies database and csv file movies database.\n\"\"\"\n\nimport sqlite3\n\nimport movies\nimport movies.finders\n\nimport example.db\nimport example.main\n\nimport settings\nimport fixtures\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nclass ResourcesModule(containers.DeclarativeContainer):\n \"\"\"IoC container of application resource providers.\"\"\"\n\n database = providers.Singleton(sqlite3.connect, settings.MOVIES_DB_PATH)\n\n\[email protected](movies.MoviesModule)\nclass DbMoviesModule(movies.MoviesModule):\n \"\"\"IoC container for overriding movies module component providers.\"\"\"\n\n finder = providers.Factory(movies.finders.SqliteMovieFinder,\n database=ResourcesModule.database,\n **movies.MoviesModule.finder.kwargs)\n\n\[email protected](movies.MoviesModule)\nclass CsvMoviesModule(movies.MoviesModule):\n \"\"\"IoC container for overriding movies module component providers.\"\"\"\n\n finder = providers.Factory(movies.finders.CsvMovieFinder,\n csv_file_path=settings.MOVIES_CSV_PATH,\n delimiter=',',\n **movies.MoviesModule.finder.kwargs)\n\n\nclass DbApplication(containers.DeclarativeContainer):\n \"\"\"IoC container of database application component providers.\"\"\"\n\n main = providers.Callable(example.main.main,\n movie_lister=DbMoviesModule.lister)\n\n init_db = providers.Callable(example.db.init_sqlite,\n movies_data=fixtures.MOVIES_SAMPLE_DATA,\n database=ResourcesModule.database)\n\n\nclass CsvApplication(containers.DeclarativeContainer):\n \"\"\"IoC container of csv application component providers.\"\"\"\n\n main = providers.Callable(example.main.main,\n movie_lister=CsvMoviesModule.lister)\n\n init_db = providers.Callable(example.db.init_csv,\n movies_data=fixtures.MOVIES_SAMPLE_DATA,\n csv_file_path=settings.MOVIES_CSV_PATH,\n delimiter=',')\n\n\nif __name__ == '__main__':\n DbApplication.init_db()\n DbApplication.main()\n\n CsvApplication.init_db()\n CsvApplication.main()\n", "id": "1190788", "language": "Python", "matching_score": 6.602135181427002, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/app_db_csv.py" }, { "content": "\"\"\"A naive example of dependency injection on Python.\n\nExample implementation of dependency injection in Python from Martin Fowler's\narticle about dependency injection and inversion of control:\n\nhttp://www.martinfowler.com/articles/injection.html\n\nThis mini application uses ``movies`` library, that is configured to work with\ncsv file movies database.\n\"\"\"\n\nimport movies\nimport movies.finders\n\nimport example.db\nimport example.main\n\nimport settings\nimport fixtures\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\[email protected](movies.MoviesModule)\nclass MyMoviesModule(containers.DeclarativeContainer):\n \"\"\"IoC container for overriding movies module component providers.\"\"\"\n\n finder = providers.Factory(movies.finders.CsvMovieFinder,\n csv_file_path=settings.MOVIES_CSV_PATH,\n delimiter=',',\n **movies.MoviesModule.finder.kwargs)\n\n\nclass CsvApplication(containers.DeclarativeContainer):\n \"\"\"IoC container of csv application component providers.\"\"\"\n\n main = providers.Callable(example.main.main,\n movie_lister=movies.MoviesModule.lister)\n\n init_db = providers.Callable(example.db.init_csv,\n movies_data=fixtures.MOVIES_SAMPLE_DATA,\n csv_file_path=settings.MOVIES_CSV_PATH,\n delimiter=',')\n\n\nif __name__ == '__main__':\n CsvApplication.init_db()\n CsvApplication.main()\n", "id": "56080", "language": "Python", "matching_score": 2.0987207889556885, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/app_csv.py" }, { "content": "\"\"\"Example main module.\"\"\"\n\n\ndef main(movie_lister):\n \"\"\"Run application.\n\n This program prints info about all movies that were directed by different\n persons and then prints all movies that were released in 2015.\n\n :param movie_lister: Movie lister instance\n :type movie_lister: movies.listers.MovieLister\n \"\"\"\n print(movie_lister.movies_directed_by('<NAME>'))\n print(movie_lister.movies_directed_by('<NAME>'))\n print(movie_lister.movies_directed_by('<NAME>'))\n\n print(movie_lister.movies_released_in(2015))\n", "id": "1175266", "language": "Python", "matching_score": 0.9018293619155884, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/example/main.py" }, { "content": "\"\"\"Movies package.\n\nTop-level package of movies library. This package contains IoC container of\nmovies module component providers - ``MoviesModule``. It is recommended to use\nmovies library functionality by fetching required instances from\n``MoviesModule`` providers.\n\n``MoviesModule.finder`` is a factory that provides abstract component\n``finders.MovieFinder``. This provider should be overridden by provider of\nconcrete finder implementation in terms of library configuration.\n\nEach of ``MoviesModule`` providers could be overridden.\n\"\"\"\n\nimport movies.finders\nimport movies.listers\nimport movies.models\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nclass MoviesModule(containers.DeclarativeContainer):\n \"\"\"IoC container of movies module component providers.\"\"\"\n\n movie = providers.Factory(movies.models.Movie)\n\n finder = providers.AbstractFactory(movies.finders.MovieFinder,\n movie_model=movie.provider)\n\n lister = providers.Factory(movies.listers.MovieLister,\n movie_finder=finder)\n", "id": "4320453", "language": "Python", "matching_score": 2.2663605213165283, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/movies/__init__.py" }, { "content": "\"\"\"Movie finders module.\n\nThis module contains all finder implementations.\n\"\"\"\n\nimport csv\n\n\nclass MovieFinder(object):\n \"\"\"Movie finder component.\n\n Movie finder component is responsible for fetching movies data from\n different storages.\n \"\"\"\n\n def __init__(self, movie_model):\n \"\"\"Initializer.\n\n :param movie_model: Movie model's factory\n :type movie_model: movies.models.Movie\n \"\"\"\n self._movie_model = movie_model\n\n def find_all(self):\n \"\"\"Return all found movies.\n\n :rtype: list[movies.models.Movie]\n :return: List of movie instances.\n \"\"\"\n raise NotImplementedError()\n\n\nclass CsvMovieFinder(MovieFinder):\n \"\"\"Movie finder that fetches movies data from csv file.\"\"\"\n\n def __init__(self, movie_model, csv_file_path, delimiter):\n \"\"\"Initializer.\n\n :param movie_model: Movie model's factory\n :type movie_model: movies.models.Movie\n\n :param csv_file_path: Path to csv file with movies data\n :type csv_file_path: str\n\n :param delimiter: Csv file's delimiter\n :type delimiter: str\n \"\"\"\n self._csv_file_path = csv_file_path\n self._delimiter = delimiter\n super(CsvMovieFinder, self).__init__(movie_model)\n\n def find_all(self):\n \"\"\"Return all found movies.\n\n :rtype: list[movies.models.Movie]\n :return: List of movie instances.\n \"\"\"\n with open(self._csv_file_path) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=self._delimiter)\n return [self._movie_model(*row) for row in csv_reader]\n\n\nclass SqliteMovieFinder(MovieFinder):\n \"\"\"Movie finder that fetches movies data from sqlite database.\"\"\"\n\n def __init__(self, movie_model, database):\n \"\"\"Initializer.\n\n :param movie_model: Movie model's factory\n :type movie_model: (object) -> movies.models.Movie\n\n :param database: Connection to sqlite database with movies data\n :type database: sqlite3.Connection\n \"\"\"\n self._database = database\n super(SqliteMovieFinder, self).__init__(movie_model)\n\n def find_all(self):\n \"\"\"Return all found movies.\n\n :rtype: list[movies.models.Movie]\n :return: List of movie instances.\n \"\"\"\n with self._database:\n rows = self._database.execute('SELECT name, year, director '\n 'FROM movies')\n return [self._movie_model(*row) for row in rows]\n", "id": "6307200", "language": "Python", "matching_score": 2.9673571586608887, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/movies/finders.py" }, { "content": "\"\"\"Example database module.\"\"\"\n\nimport csv\n\n\ndef init_sqlite(movies_data, database):\n \"\"\"Initialize sqlite3 movies database.\n\n :param movies_data: Data about movies\n :type movies_data: tuple[tuple]\n\n :param database: Connection to sqlite database with movies data\n :type database: sqlite3.Connection\n \"\"\"\n with database:\n database.execute('CREATE TABLE IF NOT EXISTS movies '\n '(name text, year int, director text)')\n database.execute('DELETE FROM movies')\n database.executemany('INSERT INTO movies VALUES (?,?,?)', movies_data)\n\n\ndef init_csv(movies_data, csv_file_path, delimiter):\n \"\"\"Initialize csv movies database.\n\n :param movies_data: Data about movies\n :type movies_data: tuple[tuple]\n\n :param csv_file_path: Path to csv file with movies data\n :type csv_file_path: str\n\n :param delimiter: Csv file's delimiter\n :type delimiter: str\n \"\"\"\n with open(csv_file_path, 'w') as csv_file:\n csv.writer(csv_file, delimiter=delimiter).writerows(movies_data)\n", "id": "6785966", "language": "Python", "matching_score": 1.7946127653121948, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/example/db.py" }, { "content": "\"\"\"Settings module.\n\nThis module contains application's settings and constants.\n\"\"\"\n\nimport os\n\n\nDATA_DIR = os.path.abspath(os.path.dirname(__file__) + '/data')\nMOVIES_CSV_PATH = DATA_DIR + '/movies.csv'\nMOVIES_DB_PATH = DATA_DIR + '/movies.db'\n", "id": "5640812", "language": "Python", "matching_score": 0.9927933812141418, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/settings.py" }, { "content": "\"\"\"Fixtures module.\"\"\"\n\n\nMOVIES_SAMPLE_DATA = (\n ('The Hunger Games: Mockingjay - Part 2', 2015, '<NAME>'),\n ('The 33', 2015, '<NAME>'),\n ('Star Wars: Episode VII - The Force Awakens', 2015, '<NAME>'),\n)\n", "id": "3821490", "language": "Python", "matching_score": 0.0956239178776741, "max_stars_count": 0, "path": "examples/miniapps/movie_lister/fixtures.py" }, { "content": "\"\"\"Example games module.\"\"\"\n\n\nclass Game(object):\n \"\"\"Base game class.\"\"\"\n\n def __init__(self, player1, player2):\n \"\"\"Initializer.\"\"\"\n self.player1 = player1\n self.player2 = player2\n\n def play(self):\n \"\"\"Play game.\"\"\"\n print('{0} and {1} are playing {2}'.format(\n self.player1, self.player2, self.__class__.__name__.lower()))\n\n\nclass Chess(Game):\n \"\"\"Chess game.\"\"\"\n\n\nclass Checkers(Game):\n \"\"\"Checkers game.\"\"\"\n\n\nclass Ludo(Game):\n \"\"\"Ludo game.\"\"\"\n", "id": "8890143", "language": "Python", "matching_score": 2.5343093872070312, "max_stars_count": 0, "path": "examples/providers/factory_aggregate/games.py" }, { "content": "\"\"\"`FactoryAggregate` providers example.\"\"\"\n\nimport sys\n\nimport dependency_injector.providers as providers\n\nfrom games import Chess, Checkers, Ludo\n\n\ngame_factory = providers.FactoryAggregate(chess=providers.Factory(Chess),\n checkers=providers.Factory(Checkers),\n ludo=providers.Factory(Ludo))\n\nif __name__ == '__main__':\n game_type = sys.argv[1].lower()\n player1 = sys.argv[2].capitalize()\n player2 = sys.argv[3].capitalize()\n\n selected_game = game_factory(game_type, player1, player2)\n selected_game.play()\n\n # $ python example.py chess <NAME>\n # John and Jane are playing chess\n #\n # $ python example.py checkers <NAME>\n # John and Jane are playing checkers\n #\n # $ python example.py ludo <NAME>\n # John and Jane are playing ludo\n", "id": "10685008", "language": "Python", "matching_score": 0.5335922241210938, "max_stars_count": 0, "path": "examples/providers/factory_aggregate/example.py" }, { "content": "\"\"\"Specializing declarative container and factory provider example.\"\"\"\n\nimport collections\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\nimport dependency_injector.errors as errors\n\n\nclass SequenceProvider(providers.Factory):\n \"\"\"Sequence factory.\n\n Can provide only sequence objects.\n \"\"\"\n\n provided_type = collections.Sequence\n\n\nclass SequencesContainer(containers.DeclarativeContainer):\n \"\"\"IoC container.\n\n Can contain only sequence providers.\n \"\"\"\n\n provider_type = SequenceProvider\n\n\nif __name__ == '__main__':\n try:\n class _SequenceContainer1(SequencesContainer):\n object_provider = providers.Factory(object)\n except errors.Error as exception:\n print(exception)\n # <class '__main__._SequenceContainer1'> can contain only\n # <class '__main__.SequenceProvider'> instances\n\n try:\n class _SequenceContainer2(SequencesContainer):\n object_provider = SequenceProvider(object)\n except errors.Error as exception:\n print(exception)\n # <class '__main__.SequenceProvider'> can provide only\n # <class '_abcoll.Sequence'> instances\n\n class _SequenceContaier3(SequencesContainer):\n list_provider = SequenceProvider(list)\n\n assert _SequenceContaier3.list_provider() == list()\n", "id": "2664223", "language": "Python", "matching_score": 3.7952628135681152, "max_stars_count": 0, "path": "examples/containers/declarative_provider_type.py" }, { "content": "\"\"\"Specializing dynamic container and factory provider example.\"\"\"\n\nimport collections\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\nimport dependency_injector.errors as errors\n\n\nclass SequenceProvider(providers.Factory):\n \"\"\"Sequence factory.\n\n Can provide only sequence objects.\n \"\"\"\n\n provided_type = collections.Sequence\n\n\nsequences_container = containers.DynamicContainer()\nsequences_container.provider_type = SequenceProvider\n\n\nif __name__ == '__main__':\n try:\n sequences_container.object_provider = providers.Factory(object)\n except errors.Error as exception:\n print(exception)\n # <dependency_injector.containers.DynamicContainer object at\n # 0x107820ed0> can contain only <class '__main__.SequenceProvider'>\n # instances\n\n try:\n sequences_container.object_provider = SequenceProvider(object)\n except errors.Error as exception:\n print(exception)\n # <class '__main__.SequenceProvider'> can provide only\n # <class '_abcoll.Sequence'> instances\n\n sequences_container.list_provider = SequenceProvider(list)\n\n assert sequences_container.list_provider() == list()\n", "id": "5075762", "language": "Python", "matching_score": 1.7652124166488647, "max_stars_count": 0, "path": "examples/containers/dynamic_provider_type.py" }, { "content": "\"\"\"Declarative IoC container overriding example.\"\"\"\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nclass Container(containers.DeclarativeContainer):\n \"\"\"IoC container.\"\"\"\n\n sequence_factory = providers.Factory(list)\n\n\nclass OverridingContainer(containers.DeclarativeContainer):\n \"\"\"Overriding IoC container.\"\"\"\n\n sequence_factory = providers.Factory(tuple)\n\n\n# Overriding `Container` with `OverridingContainer`:\nContainer.override(OverridingContainer)\n\n# Creating some objects using overridden container:\nsequence_1 = Container.sequence_factory([1, 2, 3])\nsequence_2 = Container.sequence_factory([3, 2, 1])\n\n# Making some asserts:\nassert Container.overridden == (OverridingContainer,)\nassert sequence_1 == (1, 2, 3) and sequence_2 == (3, 2, 1)\n", "id": "7158369", "language": "Python", "matching_score": 1.9489086866378784, "max_stars_count": 0, "path": "examples/containers/override_declarative.py" }, { "content": "\"\"\"Declarative IoC containers inheritance example.\"\"\"\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nclass ContainerA(containers.DeclarativeContainer):\n \"\"\"Example IoC container A.\"\"\"\n\n provider1 = providers.Factory(object)\n\n\nclass ContainerB(ContainerA):\n \"\"\"Example IoC container B.\"\"\"\n\n provider2 = providers.Singleton(object)\n\n\n# Making some asserts for `providers` attribute:\nassert ContainerA.providers == dict(provider1=ContainerA.provider1)\nassert ContainerB.providers == dict(provider1=ContainerA.provider1,\n provider2=ContainerB.provider2)\n\n# Making some asserts for `cls_providers` attribute:\nassert ContainerA.cls_providers == dict(provider1=ContainerA.provider1)\nassert ContainerB.cls_providers == dict(provider2=ContainerB.provider2)\n\n# Making some asserts for `inherited_providers` attribute:\nassert ContainerA.inherited_providers == dict()\nassert ContainerB.inherited_providers == dict(provider1=ContainerB.provider1)\n", "id": "7333587", "language": "Python", "matching_score": 0.9875277280807495, "max_stars_count": 0, "path": "examples/containers/declarative_inheritance.py" }, { "content": "\"\"\"Dynamic container simple example.\"\"\"\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\n# Defining dynamic container:\ncontainer = containers.DynamicContainer()\ncontainer.factory1 = providers.Factory(object)\ncontainer.factory2 = providers.Factory(object)\n\n# Creating some objects:\nobject1 = container.factory1()\nobject2 = container.factory2()\n\n# Making some asserts:\nassert object1 is not object2\nassert isinstance(object1, object) and isinstance(object2, object)\n", "id": "12525714", "language": "Python", "matching_score": 2.7742223739624023, "max_stars_count": 0, "path": "examples/containers/dynamic.py" }, { "content": "\"\"\"Declarative IoC container simple example.\"\"\"\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\n# Defining declarative IoC container:\nclass Container(containers.DeclarativeContainer):\n \"\"\"Example IoC container.\"\"\"\n\n factory1 = providers.Factory(object)\n\n factory2 = providers.Factory(object)\n\n\n# Creating some objects:\nobject1 = Container.factory1()\nobject2 = Container.factory2()\n\n# Making some asserts:\nassert object1 is not object2\nassert isinstance(object1, object)\nassert isinstance(object2, object)\n", "id": "6906162", "language": "Python", "matching_score": 1.6518315076828003, "max_stars_count": 0, "path": "examples/containers/declarative.py" }, { "content": "\"\"\"Factory provider example.\"\"\"\n\nfrom dependency_injector import providers\n\n\nobject_factory = providers.Factory(object)\n\n\nif __name__ == '__main__':\n object1 = object_factory()\n object2 = object_factory()\n\n assert object1 is not object2\n assert isinstance(object1, object) and isinstance(object2, object)\n", "id": "7826213", "language": "Python", "matching_score": 1.2460976839065552, "max_stars_count": 0, "path": "examples/speech/factory.py" }, { "content": "\"\"\"`ThreadLocalSingleton` providers example.\"\"\"\n\nimport threading\nimport Queue\n\nimport dependency_injector.providers as providers\n\n\ndef example(example_object, queue):\n \"\"\"Put provided object in the provided queue.\"\"\"\n queue.put(example_object)\n\n\n# Create thread-local singleton provider for some object (main thread):\nthread_local_object = providers.ThreadLocalSingleton(object)\n\n# Create singleton provider for thread-safe queue:\nqueue = providers.Singleton(Queue.Queue)\n\n# Create callable provider for example(), inject dependencies:\nexample = providers.DelegatedCallable(example,\n example_object=thread_local_object,\n queue=queue)\n\n# Create factory provider for threads that are targeted to execute example():\nthread_factory = providers.Factory(threading.Thread,\n target=example)\n\nif __name__ == '__main__':\n # Create 10 threads for concurrent execution of example():\n threads = []\n for thread_number in range(10):\n threads.append(thread_factory(name='Thread{0}'.format(thread_number)))\n\n # Start execution of all created threads:\n for thread in threads:\n thread.start()\n\n # Wait while threads would complete their work:\n for thread in threads:\n thread.join()\n\n # Making some asserts (main thread):\n all_objects = set()\n\n while not queue().empty():\n all_objects.add(queue().get())\n\n assert len(all_objects) == len(threads)\n # Queue contains same number of objects as number of threads where\n # thread-local singleton provider was used.\n", "id": "11145369", "language": "Python", "matching_score": 0.8161015510559082, "max_stars_count": 0, "path": "examples/providers/singleton_thread_locals.py" }, { "content": "\"\"\"`Factory` providers delegation example.\"\"\"\n\nimport collections\n\nimport dependency_injector.providers as providers\n\n\nPhoto = collections.namedtuple('Photo', [])\n\n\nclass User(object):\n \"\"\"Example user model.\"\"\"\n\n def __init__(self, photos_factory):\n \"\"\"Initializer.\"\"\"\n self.photos_factory = photos_factory\n self._main_photo = None\n\n @property\n def main_photo(self):\n \"\"\"Return user's main photo.\"\"\"\n if not self._main_photo:\n self._main_photo = self.photos_factory()\n return self._main_photo\n\n\n# Defining User and Photo factories using DelegatedFactory provider:\nphotos_factory = providers.DelegatedFactory(Photo)\nusers_factory = providers.DelegatedFactory(User,\n photos_factory=photos_factory)\n\n# or using Delegate(Factory(...))\n\nphotos_factory = providers.Factory(Photo)\nusers_factory = providers.Factory(User,\n photos_factory=providers.Delegate(\n photos_factory))\n\n\n# or using Factory(...).delegate()\n\nphotos_factory = providers.Factory(Photo)\nusers_factory = providers.Factory(User,\n photos_factory=photos_factory.delegate())\n\n\n# Creating several User objects:\nuser1 = users_factory() # Same as: user1 = User(photos_factory=photos_factory)\nuser2 = users_factory() # Same as: user2 = User(photos_factory=photos_factory)\n\n# Making some asserts:\nassert isinstance(user1.main_photo, Photo)\nassert isinstance(user2.main_photo, Photo)\n\n# or using Factory(...).provider\n\nphotos_factory = providers.Factory(Photo)\nusers_factory = providers.Factory(User,\n photos_factory=photos_factory.provider)\n\n\n# Creating several User objects:\nuser1 = users_factory() # Same as: user1 = User(photos_factory=photos_factory)\nuser2 = users_factory() # Same as: user2 = User(photos_factory=photos_factory)\n\n# Making some asserts:\nassert isinstance(user1.main_photo, Photo)\nassert isinstance(user2.main_photo, Photo)\n", "id": "10707895", "language": "Python", "matching_score": 3.473449945449829, "max_stars_count": 0, "path": "examples/providers/factory_delegation.py" }, { "content": "\"\"\"`Factory` providers init injections example.\"\"\"\n\nimport collections\n\nimport dependency_injector.providers as providers\n\n\nCreditCard = collections.namedtuple('CreditCard', [])\nPhoto = collections.namedtuple('Photo', [])\nUser = collections.namedtuple('User', ['uid', 'main_photo', 'credit_card'])\n\n# User, Photo and CreditCard factories:\ncredit_cards_factory = providers.Factory(CreditCard)\nphotos_factory = providers.Factory(Photo)\nusers_factory = providers.Factory(User,\n main_photo=photos_factory,\n credit_card=credit_cards_factory)\n\n# Creating several User objects:\nuser1 = users_factory(1)\n# Same as: user1 = User(1,\n# main_photo=Photo(),\n# credit_card=CreditCard())\nuser2 = users_factory(2)\n# Same as: user2 = User(2,\n# main_photo=Photo(),\n# credit_card=CreditCard())\n\n\n# Context keyword arguments have priority on keyword argument injections:\nmain_photo = Photo()\ncredit_card = CreditCard()\n\nuser3 = users_factory(3,\n main_photo=main_photo,\n credit_card=credit_card)\n# Same as: user3 = User(3,\n# main_photo=main_photo,\n# credit_card=credit_card)\n", "id": "11425268", "language": "Python", "matching_score": 3.1985392570495605, "max_stars_count": 0, "path": "examples/providers/factory_init_injections.py" }, { "content": "\"\"\"`Factory` providers example.\"\"\"\n\nimport collections\n\nimport dependency_injector.providers as providers\n\n\nUser = collections.namedtuple('User', [])\n\n# Factory provider creates new instance of specified class on every call.\nusers_factory = providers.Factory(User)\n\n# Creating several User objects:\nuser1 = users_factory() # Same as: user1 = User()\nuser2 = users_factory() # Same as: user2 = User()\n", "id": "10902558", "language": "Python", "matching_score": 0.9198609590530396, "max_stars_count": 0, "path": "examples/providers/factory.py" }, { "content": "\"\"\"`Singleton` providers resetting example.\"\"\"\n\nimport collections\n\nimport dependency_injector.providers as providers\n\n\nUsersService = collections.namedtuple('UsersService', [])\n\n# Users service singleton provider:\nusers_service_provider = providers.Singleton(UsersService)\n\n# Retrieving several UsersService objects:\nusers_service1 = users_service_provider()\nusers_service2 = users_service_provider()\n\n# Making some asserts:\nassert users_service1 is users_service2\n\n# Resetting of memorized instance:\nusers_service_provider.reset()\n\n# Retrieving one more UserService object:\nusers_service3 = users_service_provider()\n\n# Making some asserts:\nassert users_service3 is not users_service1\n", "id": "6349850", "language": "Python", "matching_score": 3.5812411308288574, "max_stars_count": 0, "path": "examples/providers/singleton_reseting.py" }, { "content": "\"\"\"`Singleton` providers example.\"\"\"\n\nimport collections\n\nimport dependency_injector.providers as providers\n\n\nUsersService = collections.namedtuple('UsersService', [])\n\n# Singleton provider creates new instance of specified class on first call and\n# returns same instance on every next call.\nusers_service_provider = providers.Singleton(UsersService)\n\n# Retrieving several UserService objects:\nusers_service1 = users_service_provider()\nusers_service2 = users_service_provider()\n\n# Making some asserts:\nassert users_service1 is users_service2\n", "id": "2379827", "language": "Python", "matching_score": 1.671553373336792, "max_stars_count": 0, "path": "examples/providers/singleton.py" }, { "content": "\"\"\"Creation of dynamic container based on some configuration example.\"\"\"\n\nimport collections\n\nimport dependency_injector.containers as containers\n\n\n# Defining several example services:\nUsersService = collections.namedtuple('UsersService', [])\nAuthService = collections.namedtuple('AuthService', [])\n\n\ndef import_cls(cls_name):\n \"\"\"Import class by its fully qualified name.\n\n In terms of current example it is just a small helper function. Please,\n don't use it in production approaches.\n \"\"\"\n path_components = cls_name.split('.')\n module = __import__('.'.join(path_components[:-1]),\n locals(),\n globals(),\n fromlist=path_components[-1:])\n return getattr(module, path_components[-1])\n\n\n# \"Parsing\" some configuration:\nconfig = {\n 'services': {\n 'users': {\n 'class': '__main__.UsersService',\n 'provider_class': 'dependency_injector.providers.Factory',\n },\n 'auth': {\n 'class': '__main__.AuthService',\n 'provider_class': 'dependency_injector.providers.Factory',\n }\n }\n}\n\n# Creating empty container of service providers:\nservices = containers.DynamicContainer()\n\n# Filling dynamic container with service providers using configuration:\nfor service_name, service_info in config['services'].iteritems():\n # Runtime importing of service and service provider classes:\n service_cls = import_cls(service_info['class'])\n service_provider_cls = import_cls(service_info['provider_class'])\n\n # Binding service provider to the dynamic service providers catalog:\n setattr(services, service_name, service_provider_cls(service_cls))\n\n# Creating some objects:\nusers_service = services.users()\nauth_service = services.auth()\n\n# Making some asserts:\nassert isinstance(users_service, UsersService)\nassert isinstance(auth_service, AuthService)\n", "id": "10515001", "language": "Python", "matching_score": 3.2093961238861084, "max_stars_count": 0, "path": "examples/containers/dynamic_runtime_creation.py" }, { "content": "\"\"\"Declarative IoC container's provider injections example.\"\"\"\n\nimport sqlite3\nimport collections\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nUsersService = collections.namedtuple('UsersService', ['db'])\nAuthService = collections.namedtuple('AuthService', ['db', 'users_service'])\n\n\nclass Services(containers.DeclarativeContainer):\n \"\"\"IoC container of service providers.\"\"\"\n\n database = providers.Singleton(sqlite3.connect, ':memory:')\n\n users = providers.Factory(UsersService,\n db=database)\n\n auth = providers.Factory(AuthService,\n db=database,\n users_service=users)\n\n\n# Retrieving service providers from container:\nusers_service = Services.users()\nauth_service = Services.auth()\n\n# Making some asserts:\nassert users_service.db is auth_service.db is Services.database()\nassert isinstance(auth_service.users_service, UsersService)\nassert users_service is not Services.users()\nassert auth_service is not Services.auth()\n", "id": "3694131", "language": "Python", "matching_score": 2.7757461071014404, "max_stars_count": 0, "path": "examples/containers/declarative_injections.py" }, { "content": "\"\"\"Example of dependency injection in Python.\"\"\"\n\nimport logging\nimport sqlite3\n\nimport boto3\n\nimport example.main\nimport example.services\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nclass Core(containers.DeclarativeContainer):\n \"\"\"IoC container of core component providers.\"\"\"\n\n config = providers.Configuration('config')\n\n logger = providers.Singleton(logging.Logger, name='example')\n\n\nclass Gateways(containers.DeclarativeContainer):\n \"\"\"IoC container of gateway (API clients to remote services) providers.\"\"\"\n\n database = providers.Singleton(sqlite3.connect, Core.config.database.dsn)\n\n s3 = providers.Singleton(\n boto3.client, 's3',\n aws_access_key_id=Core.config.aws.access_key_id,\n aws_secret_access_key=Core.config.aws.secret_access_key)\n\n\nclass Services(containers.DeclarativeContainer):\n \"\"\"IoC container of business service providers.\"\"\"\n\n users = providers.Factory(example.services.UsersService,\n db=Gateways.database,\n logger=Core.logger)\n\n auth = providers.Factory(example.services.AuthService,\n db=Gateways.database,\n logger=Core.logger,\n token_ttl=Core.config.auth.token_ttl)\n\n photos = providers.Factory(example.services.PhotosService,\n db=Gateways.database,\n s3=Gateways.s3,\n logger=Core.logger)\n\n\nclass Application(containers.DeclarativeContainer):\n \"\"\"IoC container of application component providers.\"\"\"\n\n main = providers.Callable(example.main.main,\n users_service=Services.users,\n auth_service=Services.auth,\n photos_service=Services.photos)\n", "id": "5710058", "language": "Python", "matching_score": 4.894290924072266, "max_stars_count": 0, "path": "examples/miniapps/services_v1/containers.py" }, { "content": "\"\"\"Example of dependency injection in Python.\"\"\"\n\nimport logging\nimport sqlite3\n\nimport boto3\n\nfrom dependency_injector import containers, providers\nfrom example import services, main\n\n\nclass IocContainer(containers.DeclarativeContainer):\n \"\"\"Application IoC container.\"\"\"\n\n config = providers.Configuration('config')\n logger = providers.Singleton(logging.Logger, name='example')\n\n # Gateways\n\n database_client = providers.Singleton(sqlite3.connect, config.database.dsn)\n\n s3_client = providers.Singleton(\n boto3.client, 's3',\n aws_access_key_id=config.aws.access_key_id,\n aws_secret_access_key=config.aws.secret_access_key,\n )\n\n # Services\n\n users_service = providers.Factory(\n services.UsersService,\n db=database_client,\n logger=logger,\n )\n\n auth_service = providers.Factory(\n services.AuthService,\n token_ttl=config.auth.token_ttl,\n db=database_client,\n logger=logger,\n )\n\n photos_service = providers.Factory(\n services.PhotosService,\n db=database_client,\n s3=s3_client,\n logger=logger,\n )\n\n # Misc\n\n main = providers.Callable(\n main.main,\n users_service=users_service,\n auth_service=auth_service,\n photos_service=photos_service,\n )\n", "id": "9546341", "language": "Python", "matching_score": 3.0886929035186768, "max_stars_count": 0, "path": "examples/miniapps/services_v2/container.py" }, { "content": "\"\"\"Run example of dependency injection in Python.\"\"\"\n\nimport sys\nimport logging\n\nfrom container import IocContainer\n\n\nif __name__ == '__main__':\n # Configure container:\n container = IocContainer(\n config={\n 'database': {\n 'dsn': ':memory:',\n },\n 'aws': {\n 'access_key_id': 'KEY',\n 'secret_access_key': 'SECRET',\n },\n 'auth': {\n 'token_ttl': 3600,\n },\n }\n )\n container.logger().addHandler(logging.StreamHandler(sys.stdout))\n\n # Run application:\n container.main(*sys.argv[1:])\n", "id": "8666187", "language": "Python", "matching_score": 4.53033447265625, "max_stars_count": 0, "path": "examples/miniapps/services_v2/run.py" }, { "content": "\"\"\"Run example application.\"\"\"\n\nimport sys\nimport logging\n\nfrom containers import Core, Application\n\n\nif __name__ == '__main__':\n # Configure platform:\n Core.config.override({'database': {'dsn': ':memory:'},\n 'aws': {'access_key_id': 'KEY',\n 'secret_access_key': 'SECRET'},\n 'auth': {'token_ttl': 3600}})\n Core.logger().addHandler(logging.StreamHandler(sys.stdout))\n\n # Run application:\n Application.main(uid=sys.argv[1],\n password=<PASSWORD>[2],\n photo=sys.argv[3])\n", "id": "5125260", "language": "Python", "matching_score": 1.9932024478912354, "max_stars_count": 0, "path": "examples/miniapps/services_v1/run.py" }, { "content": "\"\"\"Run 'Bundles' example application.\"\"\"\n\nimport sqlite3\nimport boto3\n\nfrom dependency_injector import containers\nfrom dependency_injector import providers\n\nfrom bundles.users import Users\nfrom bundles.photos import Photos\n\n\nclass Core(containers.DeclarativeContainer):\n \"\"\"Core container.\"\"\"\n\n config = providers.Configuration('config')\n sqlite = providers.Singleton(sqlite3.connect, config.database.dsn)\n s3 = providers.Singleton(\n boto3.client, 's3',\n aws_access_key_id=config.aws.access_key_id,\n aws_secret_access_key=config.aws.secret_access_key)\n\n\nif __name__ == '__main__':\n # Initializing containers\n core = Core(config={'database': {'dsn': ':memory:'},\n 'aws': {'access_key_id': 'KEY',\n 'secret_access_key': 'SECRET'}})\n users = Users(database=core.sqlite)\n photos = Photos(database=core.sqlite, file_storage=core.s3)\n\n # Fetching few users\n user_repository = users.user_repository()\n user1 = user_repository.get(id=1)\n user2 = user_repository.get(id=2)\n\n # Making some checks\n assert user1.id == 1\n assert user2.id == 2\n assert user_repository.db is core.sqlite()\n", "id": "1451748", "language": "Python", "matching_score": 3.003103494644165, "max_stars_count": 0, "path": "examples/miniapps/bundles/run.py" }, { "content": "\"\"\"Photos bundle.\"\"\"\n\nfrom dependency_injector import containers\nfrom dependency_injector import providers\n\nfrom . import entities\nfrom . import repositories\n\n\nclass Photos(containers.DeclarativeContainer):\n \"\"\"Photos bundle container.\"\"\"\n\n database = providers.Dependency()\n file_storage = providers.Dependency()\n\n photo = providers.Factory(entities.Photo)\n photo_repository = providers.Singleton(repositories.PhotoRepository,\n object_factory=photo.provider,\n fs=file_storage,\n db=database)\n", "id": "9517312", "language": "Python", "matching_score": 3.1608073711395264, "max_stars_count": 0, "path": "examples/miniapps/bundles/bundles/photos/__init__.py" }, { "content": "\"\"\"Users bundle.\"\"\"\n\nfrom dependency_injector import containers\nfrom dependency_injector import providers\n\nfrom . import entities\nfrom . import repositories\n\n\nclass Users(containers.DeclarativeContainer):\n \"\"\"Users bundle container.\"\"\"\n\n database = providers.Dependency()\n\n user = providers.Factory(entities.User)\n user_repository = providers.Singleton(repositories.UserRepository,\n object_factory=user.provider,\n db=database)\n", "id": "5340950", "language": "Python", "matching_score": 1.0542887449264526, "max_stars_count": 0, "path": "examples/miniapps/bundles/bundles/users/__init__.py" }, { "content": "\"\"\"Photos bundle entities module.\"\"\"\n\n\nclass Photo(object):\n \"\"\"Photo entity.\"\"\"\n", "id": "2098655", "language": "Python", "matching_score": 1.2228962182998657, "max_stars_count": 0, "path": "examples/miniapps/bundles/bundles/photos/entities.py" }, { "content": "\"\"\"Users bundle entities module.\"\"\"\n\n\nclass User(object):\n \"\"\"User entity.\"\"\"\n\n def __init__(self, id):\n \"\"\"Initializer.\"\"\"\n self.id = id\n", "id": "11417170", "language": "Python", "matching_score": 0.24469338357448578, "max_stars_count": 0, "path": "examples/miniapps/bundles/bundles/users/entities.py" }, { "content": "\"\"\"Sample data classes.\"\"\"\n\n\nclass SqlAlchemyDatabaseService:\n \"\"\"Database service of an entity.\"\"\"\n\n def __init__(self, session, base_class):\n \"\"\"Initialize object.\"\"\"\n self.session = session\n self.base_class = base_class\n\n\nclass TokensService:\n \"\"\"Tokens service.\"\"\"\n\n def __init__(self, id_generator, database):\n \"\"\"Initialize object.\"\"\"\n self.id_generator = id_generator\n self.database = database\n\n\nclass Token:\n \"\"\"Token entity.\"\"\"\n\n\nclass UsersService:\n \"\"\"Users service.\"\"\"\n\n def __init__(self, id_generator, database):\n \"\"\"Initialize object.\"\"\"\n self.id_generator = id_generator\n self.database = database\n\n\nclass User:\n \"\"\"User entity.\"\"\"\n\n\n# Sample objects\nsession = object()\nid_generator = object()\n", "id": "6212313", "language": "Python", "matching_score": 3.0679445266723633, "max_stars_count": 0, "path": "examples/miniapps/factory_patterns/data.py" }, { "content": "\"\"\"`Chained Factories` pattern.\"\"\"\n\nfrom dependency_injector import providers\n\nfrom data import (\n id_generator,\n session,\n SqlAlchemyDatabaseService,\n TokensService,\n Token,\n UsersService,\n User,\n)\n\n\n# \"Chained Factories\" pattern\n\ndatabase = providers.Factory(SqlAlchemyDatabaseService, session=session)\n\ntokens = providers.Factory(\n TokensService,\n id_generator=id_generator,\n database=providers.Factory(database, base_class=Token),\n)\n\nusers = providers.Factory(\n UsersService,\n id_generator=id_generator,\n database=providers.Factory(database, base_class=User),\n)\n\ntokens_service = tokens()\nassert tokens_service.database.base_class is Token\n\nusers_service = users()\nassert users_service.database.base_class is User\n\n# Explanation & some more examples\n\n# 1. Keyword arguments of upper level factory are added to lower level factory\nchained_dict_factory = providers.Factory(\n providers.Factory(dict, arg1=1),\n arg2=2,\n)\nprint(chained_dict_factory()) # prints: {'arg1': 1, 'arg2': 2}\n\n# 2. Keyword arguments of upper level factory have priority\nchained_dict_factory = providers.Factory(\n providers.Factory(dict, arg1=1),\n arg1=2,\n)\nprint(chained_dict_factory()) # prints: {'arg1': 2}\n\n# 3. Keyword arguments provided from context have most priority\nchained_dict_factory = providers.Factory(\n providers.Factory(dict, arg1=1),\n arg1=2,\n)\nprint(chained_dict_factory(arg1=3)) # prints: {'arg1': 3}\n", "id": "1781278", "language": "Python", "matching_score": 4.8018083572387695, "max_stars_count": 0, "path": "examples/miniapps/factory_patterns/chained_factories.py" }, { "content": "\"\"\"`Factory of Factories` pattern.\"\"\"\n\nfrom dependency_injector import providers\n\nfrom data import (\n id_generator,\n session,\n SqlAlchemyDatabaseService,\n TokensService,\n Token,\n UsersService,\n User,\n)\n\n\n# \"Factory of Factories\" pattern\n\ndatabase_factory = providers.Factory(\n providers.Factory,\n SqlAlchemyDatabaseService,\n session=session,\n)\n\ntokens = providers.Factory(\n TokensService,\n id_generator=id_generator,\n database=database_factory(base_class=Token),\n)\n\nusers = providers.Factory(\n UsersService,\n id_generator=id_generator,\n database=database_factory(base_class=User),\n)\n\ntokens_service = tokens()\nassert tokens_service.database.base_class is Token\n\nusers_service = users()\nassert users_service.database.base_class is User\n\n# Explanation & some more examples\n\n# 1. Keyword arguments of upper level factory are added to lower level factory\nfactory_of_dict_factories = providers.Factory(\n providers.Factory,\n dict,\n arg1=1,\n)\ndict_factory = factory_of_dict_factories(arg2=2)\nprint(dict_factory()) # prints: {'arg1': 1, 'arg2': 2}\n\n# 2. Keyword arguments of upper level factory have priority\nfactory_of_dict_factories = providers.Factory(\n providers.Factory,\n dict,\n arg1=1,\n)\ndict_factory = factory_of_dict_factories(arg1=2)\nprint(dict_factory()) # prints: {'arg1': 2}\n\n# 3. Keyword arguments provided from context have most priority\nfactory_of_dict_factories = providers.Factory(\n providers.Factory,\n dict,\n arg1=1,\n)\ndict_factory = factory_of_dict_factories(arg1=2)\nprint(dict_factory(arg1=3)) # prints: {'arg1': 3}\n", "id": "9566709", "language": "Python", "matching_score": 2.3661797046661377, "max_stars_count": 0, "path": "examples/miniapps/factory_patterns/factory_of_factories.py" }, { "content": "\"\"\"`Dependency` providers example.\"\"\"\n\nimport sqlite3\nimport contextlib\n\nimport dependency_injector.providers as providers\n\n\nclass UsersService(object):\n \"\"\"Example class UsersService.\n\n UsersService has dependency on DBAPI 2.0 database connection.\n \"\"\"\n\n def __init__(self, database):\n \"\"\"Initializer.\n\n :param database: Database connection.\n :type database: sqlite3.dbapi2.Connection\n \"\"\"\n self.database = database\n self.database.row_factory = sqlite3.dbapi2.Row\n\n def init_database(self):\n \"\"\"Initialize database, if it has not been initialized yet.\"\"\"\n with contextlib.closing(self.database.cursor()) as cursor:\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS users(\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name VARCHAR(32)\n )\n \"\"\")\n\n def create(self, name):\n \"\"\"Create user with provided name and return his id.\"\"\"\n with contextlib.closing(self.database.cursor()) as cursor:\n cursor.execute('INSERT INTO users(name) VALUES (?)', (name,))\n return cursor.lastrowid\n\n def get_by_id(self, id):\n \"\"\"Return user info by user id.\"\"\"\n with contextlib.closing(self.database.cursor()) as cursor:\n cursor.execute('SELECT id, name FROM users WHERE id=?', (id,))\n return cursor.fetchone()\n\n\n# Database and UsersService providers:\ndatabase = providers.Dependency(instance_of=sqlite3.dbapi2.Connection)\nusers_service_factory = providers.Factory(UsersService,\n database=database)\n\n# Out of library's scope.\n#\n# Setting database provider:\ndatabase.provided_by(providers.Singleton(sqlite3.dbapi2.Connection,\n database=':memory:',\n timeout=30,\n detect_types=True,\n isolation_level='EXCLUSIVE'))\n\n# Creating UsersService instance:\nusers_service = users_service_factory()\n\n# Initializing UsersService database:\nusers_service.init_database()\n\n# Creating test user and retrieving full information about him:\ntest_user_id = users_service.create(name='test_user')\ntest_user = users_service.get_by_id(test_user_id)\n\n# Making some asserts:\nassert test_user['id'] == 1\nassert test_user['name'] == 'test_user'\n", "id": "4343251", "language": "Python", "matching_score": 2.7870423793792725, "max_stars_count": 0, "path": "examples/providers/dependency.py" }, { "content": "\"\"\"Overriding user's model example.\"\"\"\n\nimport dependency_injector.providers as providers\n\n\nclass User(object):\n \"\"\"Example class User.\"\"\"\n\n def __init__(self, id, password):\n \"\"\"Initializer.\"\"\"\n self.id = id\n self.password = password\n super(User, self).__init__()\n\n\nclass UsersService(object):\n \"\"\"Example class UsersService.\"\"\"\n\n def __init__(self, user_cls):\n \"\"\"Initializer.\"\"\"\n self.user_cls = user_cls\n super(UsersService, self).__init__()\n\n def get_by_id(self, id):\n \"\"\"Find user by his id and return user model.\"\"\"\n return self.user_cls(id=id, password='secret' + str(id))\n\n\n# Users factory and UsersService provider:\nusers_service = providers.Factory(UsersService, user_cls=User)\n\n# Getting several users and making some asserts:\nuser1 = users_service().get_by_id(1)\nuser2 = users_service().get_by_id(2)\n\nassert isinstance(user1, User)\nassert user1.id == 1\nassert user1.password == '<PASSWORD>'\n\nassert isinstance(user2, User)\nassert user2.id == 2\nassert user2.password == '<PASSWORD>'\n\nassert user1 is not user2\n\n# Extending user model and user service for adding custom attributes without\n# making any changes to client's code.\n\n\nclass ExtendedUser(User):\n \"\"\"Example class ExtendedUser.\"\"\"\n\n def __init__(self, id, password, first_name=None, last_name=None,\n gender=None):\n \"\"\"Initializer.\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.gender = gender\n super(ExtendedUser, self).__init__(id, password)\n\n\nclass ExtendedUsersService(UsersService):\n \"\"\"Example class ExtendedUsersService.\"\"\"\n\n def get_by_id(self, id):\n \"\"\"Find user by his id and return user model.\"\"\"\n user = super(ExtendedUsersService, self).get_by_id(id)\n user.first_name = 'John' + str(id)\n user.last_name = 'Smith' + str(id)\n user.gender = 'male'\n return user\n\n\n# Overriding users_service provider:\nextended_users_service = providers.Factory(ExtendedUsersService,\n user_cls=ExtendedUser)\nusers_service.override(extended_users_service)\n\n# Getting few other users users and making some asserts:\nuser3 = users_service().get_by_id(3)\nuser4 = users_service().get_by_id(4)\n\nassert isinstance(user3, ExtendedUser)\nassert user3.id == 3\nassert user3.password == '<PASSWORD>'\nassert user3.first_name == 'John3'\nassert user3.last_name == 'Smith3'\n\nassert isinstance(user4, ExtendedUser)\nassert user4.id == 4\nassert user4.password == '<PASSWORD>'\nassert user4.first_name == 'John4'\nassert user4.last_name == 'Smith4'\n\nassert user3 is not user4\n", "id": "10201823", "language": "Python", "matching_score": 1.7727817296981812, "max_stars_count": 0, "path": "examples/providers/overriding_users_model.py" }, { "content": "\"\"\"Example of dependency injection and password hashing in Python.\"\"\"\n\nimport passlib.hash\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nclass UsersService(object):\n \"\"\"Users service.\"\"\"\n\n def __init__(self, password_hasher):\n \"\"\"Initializer.\"\"\"\n self._password_hasher = password_hasher\n\n def create_user(self, name, password):\n \"\"\"Create user with hashed password.\"\"\"\n hashed_password = self._password_hasher(password)\n return dict(name=name, password=hashed_password)\n\n\nclass Container(containers.DeclarativeContainer):\n \"\"\"Inversion of control container.\"\"\"\n\n password_hasher = providers.Callable(\n passlib.hash.sha256_crypt.encrypt,\n salt_size=16,\n rounds=10000)\n\n users_service = providers.Factory(\n UsersService,\n password_hasher=password_hasher.provider)\n\n\nif __name__ == '__main__':\n container = Container()\n users_service = container.users_service()\n\n user1 = users_service.create_user(name='Roman', password='<PASSWORD>')\n user2 = users_service.create_user(name='Vitaly', password='<PASSWORD>')\n\n print(user1, user2)\n", "id": "11216263", "language": "Python", "matching_score": 3.1597065925598145, "max_stars_count": 0, "path": "examples/miniapps/password_hashing/example.py" }, { "content": "\"\"\"`Callable` providers with keyword arguments example.\"\"\"\n\nimport passlib.hash\n\nimport dependency_injector.providers as providers\n\n\n# Password hasher and verifier providers:\npassword_hasher = providers.Callable(passlib.hash.sha256_crypt.encrypt,\n salt_size=16,\n rounds=10000)\npassword_verifier = providers.Callable(passlib.hash.sha256_crypt.verify)\n\n# Making some asserts:\nhashed_password = password_hasher('<PASSWORD>')\nassert password_verifier('<PASSWORD>', hashed_password)\n", "id": "1260777", "language": "Python", "matching_score": 2.0890982151031494, "max_stars_count": 0, "path": "examples/providers/callable_kwargs.py" }, { "content": "\"\"\"`Callable` providers with positional arguments example.\"\"\"\n\nimport dependency_injector.providers as providers\n\n\n# Creating even and odd filter providers:\neven_filter = providers.Callable(filter, lambda x: x % 2 == 0)\nodd_filter = providers.Callable(filter, lambda x: x % 2 != 0)\n\n# Creating even and odd ranges using range() and filter providers:\neven_range = even_filter(range(1, 10))\nodd_range = odd_filter(range(1, 10))\n\n# Making some asserts:\nassert even_range == [2, 4, 6, 8]\nassert odd_range == [1, 3, 5, 7, 9]\n", "id": "4613495", "language": "Python", "matching_score": 1.6818290948867798, "max_stars_count": 0, "path": "examples/providers/callable_args.py" }, { "content": "\"\"\"Object providers example.\"\"\"\n\nimport dependency_injector.providers as providers\n\n\n# Creating object provider:\nobject_provider = providers.Object(1)\n\n# Making some asserts:\nassert object_provider() == 1\n", "id": "491398", "language": "Python", "matching_score": 0.20312075316905975, "max_stars_count": 0, "path": "examples/providers/object.py" }, { "content": "\"\"\"`Coroutine` providers example with @asyncio.coroutine decorator.\n\nCurrent example works only fot Python 3.4+.\n\"\"\"\n\nimport asyncio\n\nimport dependency_injector.providers as providers\n\n\[email protected]\ndef coroutine_function(arg1, arg2):\n \"\"\"Sample coroutine function.\"\"\"\n yield from asyncio.sleep(0.1)\n return arg1, arg2\n\n\ncoroutine_provider = providers.Coroutine(coroutine_function, arg1=1, arg2=2)\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n arg1, arg2 = loop.run_until_complete(coroutine_provider())\n\n assert (arg1, arg2) == (1, 2)\n assert asyncio.iscoroutinefunction(coroutine_provider)\n", "id": "120756", "language": "Python", "matching_score": 4.034546852111816, "max_stars_count": 0, "path": "examples/providers/coroutine.py" }, { "content": "\"\"\"`Coroutine` providers example with async / await syntax.\n\nCurrent example works only fot Python 3.5+.\n\"\"\"\n\nimport asyncio\n\nimport dependency_injector.providers as providers\n\n\nasync def coroutine_function(arg1, arg2):\n \"\"\"Sample coroutine function.\"\"\"\n await asyncio.sleep(0.1)\n return arg1, arg2\n\n\ncoroutine_provider = providers.Coroutine(coroutine_function, arg1=1, arg2=2)\n\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n arg1, arg2 = loop.run_until_complete(coroutine_provider())\n\n assert (arg1, arg2) == (1, 2)\n assert asyncio.iscoroutinefunction(coroutine_provider)\n", "id": "7381447", "language": "Python", "matching_score": 0.060560740530490875, "max_stars_count": 0, "path": "examples/providers/coroutine_async_await.py" }, { "content": "\"\"\"@inject decorator example.\"\"\"\n\nfrom container import Container\n\nfrom dependency_injector.injections import inject\n\n\n@inject(car_factory=Container.car_factory.delegate())\n@inject(extra_engine=Container.engine_factory)\ndef main(car_factory, extra_engine):\n \"\"\"Run application.\"\"\"\n car1 = car_factory(serial_number=1)\n car2 = car_factory(serial_number=2, engine=extra_engine)\n\n assert car1.serial_number == 1 and car2.serial_number == 2\n assert car1.engine is not car2.engine\n assert car2.engine is extra_engine\n\n\nif __name__ == '__main__':\n main()\n", "id": "1407778", "language": "Python", "matching_score": 2.74173641204834, "max_stars_count": 0, "path": "examples/speech/inject.py" }, { "content": "\"\"\"IoC container example.\"\"\"\n\nimport collections\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nEngine = collections.namedtuple('Engine', [])\nCar = collections.namedtuple('Car', ['serial_number', 'engine'])\n\n\nclass Container(containers.DeclarativeContainer):\n \"\"\"IoC container.\"\"\"\n\n engine_factory = providers.Factory(Engine)\n\n car_factory = providers.Factory(Car, engine=engine_factory)\n\n\nif __name__ == '__main__':\n car1 = Container.car_factory(serial_number=1)\n car2 = Container.car_factory(serial_number=2)\n\n assert car1.serial_number == 1 and car2.serial_number == 2\n assert car1.engine is not car2.engine\n", "id": "11662143", "language": "Python", "matching_score": 1.3277866840362549, "max_stars_count": 0, "path": "examples/speech/container.py" }, { "content": "\"\"\"Dependency injection example, Cars & Engines.\"\"\"\n\nimport example.cars\nimport example.engines\n\n\nif __name__ == '__main__':\n gasoline_car = example.cars.Car(example.engines.GasolineEngine())\n diesel_car = example.cars.Car(example.engines.DieselEngine())\n electro_car = example.cars.Car(example.engines.ElectroEngine())\n", "id": "268892", "language": "Python", "matching_score": 2.5436317920684814, "max_stars_count": 0, "path": "examples/miniapps/engines_cars/example_di.py" }, { "content": "\"\"\"Dependency injection example, Cars & Engines IoC containers.\"\"\"\n\nimport example.cars\nimport example.engines\n\nimport dependency_injector.containers as containers\nimport dependency_injector.providers as providers\n\n\nclass Engines(containers.DeclarativeContainer):\n \"\"\"IoC container of engine providers.\"\"\"\n\n gasoline = providers.Factory(example.engines.GasolineEngine)\n\n diesel = providers.Factory(example.engines.DieselEngine)\n\n electro = providers.Factory(example.engines.ElectroEngine)\n\n\nclass Cars(containers.DeclarativeContainer):\n \"\"\"IoC container of car providers.\"\"\"\n\n gasoline = providers.Factory(example.cars.Car,\n engine=Engines.gasoline)\n\n diesel = providers.Factory(example.cars.Car,\n engine=Engines.diesel)\n\n electro = providers.Factory(example.cars.Car,\n engine=Engines.electro)\n\n\nif __name__ == '__main__':\n gasoline_car = Cars.gasoline()\n diesel_car = Cars.diesel()\n electro_car = Cars.electro()\n", "id": "4441143", "language": "Python", "matching_score": 2.297621488571167, "max_stars_count": 0, "path": "examples/miniapps/engines_cars/example_ioc_containers.py" }, { "content": "\"\"\"Dependency injection example, engines module.\"\"\"\n\n\nclass Engine(object):\n \"\"\"Example engine base class.\n\n Engine is a heart of every car. Engine is a very common term and could be\n implemented in very different ways.\n \"\"\"\n\n\nclass GasolineEngine(Engine):\n \"\"\"Gasoline engine.\"\"\"\n\n\nclass DieselEngine(Engine):\n \"\"\"Diesel engine.\"\"\"\n\n\nclass ElectroEngine(Engine):\n \"\"\"Electro engine.\"\"\"\n", "id": "1776475", "language": "Python", "matching_score": 1.579667568206787, "max_stars_count": 0, "path": "examples/miniapps/engines_cars/example/engines.py" }, { "content": "\"\"\"Dependency injection example, cars module.\"\"\"\n\n\nclass Car(object):\n \"\"\"Example car.\"\"\"\n\n def __init__(self, engine):\n \"\"\"Initializer.\"\"\"\n self._engine = engine # Engine is injected\n", "id": "8508448", "language": "Python", "matching_score": 0.10377287119626999, "max_stars_count": 0, "path": "examples/miniapps/engines_cars/example/cars.py" }, { "content": "\"\"\"Dependency injector top-level package.\"\"\"\n\n__version__ = '3.14.3'\n\"\"\"Version number that follows semantic versioning.\n\n:type: str\n\"\"\"\n", "id": "3478266", "language": "Python", "matching_score": 0.8420904874801636, "max_stars_count": 0, "path": "src/dependency_injector/__init__.py" }, { "content": "\"\"\"Bundles package.\"\"\"\n", "id": "10998277", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "examples/miniapps/bundles/bundles/__init__.py" }, { "content": "\"\"\"`AbstractFactory` providers example.\"\"\"\n\nimport cache\n\nimport dependency_injector.providers as providers\n\n\n# Define abstract cache client factory:\ncache_client_factory = providers.AbstractFactory(cache.AbstractCacheClient)\n\nif __name__ == '__main__':\n # Override abstract factory with redis client factory:\n cache_client_factory.override(providers.Factory(cache.RedisCacheClient,\n host='localhost',\n port=6379,\n db=0))\n redis_cache = cache_client_factory()\n print(redis_cache) # <cache.RedisCacheClient object at 0x10975bc50>\n\n # Override abstract factory with memcache client factory:\n cache_client_factory.override(providers.Factory(cache.MemcacheCacheClient,\n hosts=['10.0.1.1',\n '10.0.1.2',\n '10.0.1.3'],\n port=11211,\n prefix='my_app'))\n memcache_cache = cache_client_factory()\n print(memcache_cache) # <cache.MemcacheCacheClient object at 0x10975bc90>\n", "id": "11618816", "language": "Python", "matching_score": 3.1628942489624023, "max_stars_count": 0, "path": "examples/providers/abstract_factory/example.py" }, { "content": "\"\"\"Example hierarchy of cache clients with abstract base class.\"\"\"\n\n\nclass AbstractCacheClient(object):\n \"\"\"Abstract cache client.\"\"\"\n\n\nclass RedisCacheClient(AbstractCacheClient):\n \"\"\"Cache client implementation based on Redis.\"\"\"\n\n def __init__(self, host, port, db):\n \"\"\"Initializer.\"\"\"\n self.host = host\n self.port = port\n self.db = db\n\n\nclass MemcacheCacheClient(AbstractCacheClient):\n \"\"\"Cache client implementation based on Memcached.\"\"\"\n\n def __init__(self, hosts, port, prefix):\n \"\"\"Initializer.\"\"\"\n self.hosts = hosts\n self.port = port\n self.prefix = prefix\n", "id": "6760930", "language": "Python", "matching_score": 0.8523302674293518, "max_stars_count": 0, "path": "examples/providers/abstract_factory/cache.py" }, { "content": "\"\"\"TBD.\"\"\"\n\n\nclass ApiClient(object):\n \"\"\"Some API client.\"\"\"\n\n def __init__(self, host, api_key):\n \"\"\"Initializer.\"\"\"\n self.host = host\n self.api_key = api_key\n\n def call(self, operation, data):\n \"\"\"Make some network operations.\"\"\"\n print('API call [{0}:{1}], method - {2}, data - {3}'.format(\n self.host, self.api_key, operation, repr(data)))\n", "id": "6435309", "language": "Python", "matching_score": 2.1658854484558105, "max_stars_count": 0, "path": "examples/miniapps/api_client/api.py" }, { "content": "\"\"\"TBD.\"\"\"\n\nfrom dependency_injector import providers\n\nimport api\nimport models\n\n\n# Creating ApiClient and User providers:\napi_client = providers.Singleton(api.ApiClient,\n host='production.com',\n api_key='PROD_API_KEY')\nuser_factory = providers.Factory(models.User,\n api_client=api_client)\n\n\nif __name__ == '__main__':\n # Creating several users and register them:\n user1 = user_factory(1)\n user1.register()\n # API call [production.com:PROD_API_KEY], method - register, data -\n # {'id': 1}\n\n user2 = user_factory(2)\n user2.register()\n # API call [production.com:PROD_API_KEY], method - register, data -\n # {'id': 2}\n\n # Overriding of ApiClient on dev environment:\n api_client.override(providers.Singleton(api.ApiClient,\n host='localhost',\n api_key='DEV_API_KEY'))\n\n user3 = user_factory(3)\n user3.register()\n # API call [localhost:DEV_API_KEY], method - register, data - {'id': 3}\n", "id": "3901399", "language": "Python", "matching_score": 3.1469719409942627, "max_stars_count": 0, "path": "examples/miniapps/api_client/main.py" }, { "content": "\"\"\"TBD.\"\"\"\n\nfrom mock import Mock\n\nimport main\nimport api\n\n# Mock ApiClient for testing:\nwith main.api_client.override(Mock(api.ApiClient)) as api_client_mock:\n user = main.user_factory('test')\n user.register()\n api_client_mock().call.assert_called_with('register', {'id': 'test'})\n", "id": "7469717", "language": "Python", "matching_score": 1.8953220844268799, "max_stars_count": 0, "path": "examples/miniapps/api_client/tests.py" }, { "content": "\"\"\"TBD.\"\"\"\n\n\nclass User(object):\n \"\"\"User model.\"\"\"\n\n def __init__(self, id, api_client):\n \"\"\"Initializer.\"\"\"\n self.id = id\n self.api_client = api_client\n\n def register(self):\n \"\"\"Register user.\"\"\"\n self.api_client.call('register', {'id': self.id})\n", "id": "8962508", "language": "Python", "matching_score": 1.4641042947769165, "max_stars_count": 0, "path": "examples/miniapps/api_client/models.py" } ]
1.948909
kremazar
[ { "content": "from menus.base import NavigationNode\nfrom menus.menu_pool import menu_pool\nfrom django.utils.translation import ugettext_lazy as _\nfrom cms.menu_bases import CMSAttachMenu\nfrom menus.base import Modifier\n\nfrom cms.models import Page\n\nclass TestMenu(CMSAttachMenu):\n\n name = _(\"test menu\")\n\n def get_nodes(self, request):\n nodes = []\n n = NavigationNode(_('sample root page'), \"/\", 1)\n n2 = NavigationNode(_('sample settings page'), \"/bye/\", 2)\n n3 = NavigationNode(_('sample account page'), \"/hello/\", 3)\n n4 = NavigationNode(_('sample my profile page'), \"/hello/world/\", 4, 3)\n nodes.append(n)\n nodes.append(n2)\n nodes.append(n3)\n nodes.append(n4)\n return nodes\nclass UserMenu(Menu):\n def get_nodes(self, request):\n return [\n NavigationNode(_(\"Profile\"), reverse(profile), 1, attr={'visible_for_anonymous': False}),\n NavigationNode(_(\"Log in\"), reverse(login), 3, attr={'visible_for_authenticated': False}),\n NavigationNode(_(\"Sign up\"), reverse(logout), 4, attr={'visible_for_authenticated': False}),\n NavigationNode(_(\"Log out\"), reverse(logout), 2, attr={'visible_for_anonymous': False}),\n ]\n\nclass MyExampleModifier(Modifier):\n \"\"\"\n This modifier makes the changed_by attribute of a page\n accessible for the menu system.\n \"\"\"\n def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):\n # only do something when the menu has already been cut\n if post_cut:\n # only consider nodes that refer to cms pages\n # and put them in a dict for efficient access\n page_nodes = {n.id: n for n in nodes if n.attr[\"is_page\"]}\n # retrieve the attributes of interest from the relevant pages\n pages = Page.objects.filter(id__in=page_nodes.keys()).values('id', 'changed_by')\n # loop over all relevant pages\n for page in pages:\n # take the node referring to the page\n node = page_nodes[page['id']]\n # put the changed_by attribute on the node\n node.attr[\"changed_by\"] = page['changed_by']\n return nodes\n\nclass Level(Modifier):\n \"\"\"\n marks all node levels\n \"\"\"\n post_cut = True\n\n def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):\n if breadcrumb:\n return nodes\n for node in nodes:\n if not node.parent:\n if post_cut:\n node.menu_level = 0\n else:\n node.level = 0\n self.mark_levels(node, post_cut)\n return nodes\n\n def mark_levels(self, node, post_cut):\n for child in node.children:\n if post_cut:\n child.menu_level = node.menu_level + 1\n else:\n child.level = node.level + 1\n self.mark_levels(child, post_cut)\n\nmenu_pool.register_modifier(Level)\nmenu_pool.register_modifier(MyExampleModifier)\nmenu_pool.register_menu(TestMenu)", "id": "403876", "language": "Python", "matching_score": 1.1677320003509521, "max_stars_count": 0, "path": "project/menu.py" }, { "content": "from django.db import models\nfrom django.conf import settings\nfrom datetime import datetime \nfrom djrichtextfield.models import RichTextField\nfrom ckeditor.fields import RichTextField\nfrom django.utils.html import mark_safe\nfrom django.contrib import admin\nfrom django.contrib.admin import ModelAdmin, TabularInline\nfrom django.utils.html import mark_safe\nfrom django.urls import reverse\nfrom django.utils.text import slugify\n\n\nclass News(models.Model):\n author = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE\n )\n title = models.CharField(max_length=150)\n description = RichTextField(blank=True,null=True)\n date = models.DateTimeField(default=datetime.now, blank=True)\n publish_date = models.DateTimeField(default=datetime.now, blank=True)\n slug = models.SlugField(\n default='',\n editable=False,\n )\n is_published=models.BooleanField(default=False)\n def get_first_image(self):\n return self.images.first()\n\n def get_absolute_url(self):\n kwargs = {\n 'pk': self.id,\n 'slug': self.slug\n }\n return reverse('article-pk-slug-detail', kwargs=kwargs)\n def save(self, *args, **kwargs):\n value = self.title\n self.slug = slugify(value, allow_unicode=True)\n super().save(*args, **kwargs)\n\nclass Category(models.Model):\n news = models.ForeignKey(News,default=1,on_delete=models.CASCADE, related_name='category')\n name = models.CharField(max_length=50)\n slug = models.SlugField(\n default='',\n editable=False,\n )\n def __unicode__(self):\n return self.name\n def get_absolute_url(self):\n kwargs = {\n 'pk': self.id,\n 'slug': self.slug\n }\n return reverse('article-pk-slug-detail', kwargs=kwargs)\n def save(self, *args, **kwargs):\n value = self.name\n self.slug = slugify(value, allow_unicode=True)\n super().save(*args, **kwargs)\n\n\nclass Img(models.Model):\n news = models.ForeignKey(News,default=1,on_delete=models.CASCADE, related_name='images')\n photo = models.ImageField(upload_to='media/images/')\n def __unicode__(self):\n return self.photo\n\n\n \n\n\n \n\n\n", "id": "7414672", "language": "Python", "matching_score": 2.885380744934082, "max_stars_count": 0, "path": "news/models.py" }, { "content": "from django.contrib import admin\nfrom news.models import News,Img,Category\n\nclass ImgInline(admin.TabularInline):\n model = Img\n\nclass CategoryInline(admin.TabularInline):\n model = Category\n\nclass NewsAdmin(admin.ModelAdmin):\n inlines = [\n ImgInline,\n CategoryInline\n ]\n\n\nadmin.site.register(News,NewsAdmin)\nadmin.site.register(Img)\nadmin.site.register(Category)", "id": "11236090", "language": "Python", "matching_score": 1.5176758766174316, "max_stars_count": 0, "path": "news/admin.py" }, { "content": "from django import template\nfrom django.template.loader import get_template\nregister = template.Library()\n\nfrom ..models import News\n\n\[email protected]_tag('news/count.html')\ndef show_results():\n obj = News.objects.all().order_by('-date')[:4]\n news = News.objects.all()\n return {'choices': obj,'news':news}\n \n\n\n\n\n", "id": "9578474", "language": "Python", "matching_score": 0.4652969241142273, "max_stars_count": 0, "path": "news/templatetags/custom_tags.py" }, { "content": "from django.shortcuts import render,get_object_or_404\nfrom django.http import Http404\nfrom news.models import News,Img,Category\nfrom django.views.generic import ListView\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render\nfrom django.views.generic import ListView\nfrom django.utils import timezone\n\n\n\nclass NewsList(ListView):\n paginate_by = 2\n model = News\n\ndef index(request):\n now = timezone.now()\n news= News.objects.filter(publish_date__lte=now)\n # img=Img.objects.all()\n paginator = Paginator(news, 4) \n page = request.GET.get('page')\n news=paginator.get_page(page)\n context = {'news': news,\n }\n return render(request, 'news/news.html', context)\n\ndef detail(request, news_id):\n news = get_object_or_404(News, pk=news_id)\n context = {'news': news,\n }\n return render(request, 'news/detail.html',context)\n\ndef title(request, news_id, news_title):\n news = get_object_or_404(News, pk=news_id)\n news2 = News.objects.get(slug=news_title)\n context = {'news': news,\n 'news2': news2,\n }\n return render(request, 'news/detail.html',context)\n\ndef category(request,news_category):\n news = News.objects.filter(category__slug=news_category)\n context = {'news': news,\n }\n return render(request, 'news/news.html',context)\n", "id": "4527497", "language": "Python", "matching_score": 1.9648373126983643, "max_stars_count": 0, "path": "news/views.py" }, { "content": "from django.urls import path\n\nfrom . import views\n\napp_name = 'news'\nurlpatterns = [\n path('', views.index, name='index'),\n path('<int:news_id>/', views.detail, name='detail'),\n path('<int:news_id>/<str:news_title>', views.title, name='title'),\n path('<str:news_category>/', views.category, name='category')\n]", "id": "3584860", "language": "Python", "matching_score": 0.25340405106544495, "max_stars_count": 0, "path": "news/urls.py" }, { "content": "# Generated by Django 2.1.15 on 2020-02-26 11:26\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0004_auto_20200225_1237'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='img',\n name='news',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='news.News'),\n ),\n ]\n", "id": "3584487", "language": "Python", "matching_score": 3.647118091583252, "max_stars_count": 0, "path": "news/migrations/0005_auto_20200226_1126.py" }, { "content": "# Generated by Django 2.1.15 on 2020-02-25 12:03\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='news',\n name='image',\n ),\n migrations.AddField(\n model_name='img',\n name='news',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='news.News'),\n ),\n ]\n", "id": "12755076", "language": "Python", "matching_score": 1.9902254343032837, "max_stars_count": 0, "path": "news/migrations/0002_auto_20200225_1203.py" }, { "content": "# Generated by Django 2.1.15 on 2020-03-02 12:24\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0009_auto_20200302_1219'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='news',\n name='slug',\n field=models.SlugField(default='slug', max_length=150),\n ),\n migrations.AlterField(\n model_name='news',\n name='title',\n field=models.CharField(max_length=150),\n ),\n ]\n", "id": "965416", "language": "Python", "matching_score": 3.3844306468963623, "max_stars_count": 0, "path": "news/migrations/0010_auto_20200302_1224.py" }, { "content": "# Generated by Django 2.1.15 on 2020-03-03 12:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0011_auto_20200302_1241'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='news',\n name='is_published',\n field=models.BooleanField(default=False),\n ),\n ]\n", "id": "4720635", "language": "Python", "matching_score": 2.589134693145752, "max_stars_count": 0, "path": "news/migrations/0012_news_is_published.py" }, { "content": "# Generated by Django 2.1.15 on 2020-02-28 14:56\n\nimport datetime\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0005_auto_20200226_1126'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='news',\n name='publish_date',\n field=models.DateTimeField(blank=True, default=datetime.datetime.now),\n ),\n ]\n", "id": "5247681", "language": "Python", "matching_score": 2.4748544692993164, "max_stars_count": 0, "path": "news/migrations/0006_news_publish_date.py" }, { "content": "# Generated by Django 2.1.15 on 2020-03-03 13:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0012_news_is_published'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='category',\n name='slug',\n field=models.SlugField(default='', editable=False),\n ),\n ]\n", "id": "5801758", "language": "Python", "matching_score": 2.3596858978271484, "max_stars_count": 0, "path": "news/migrations/0013_category_slug.py" } ]
2.174956
brianaydemir
[ { "content": "\"\"\"\nType-directed parsing of JSON-like and YAML-like objects.\n\"\"\"\n# FIXME: Unnecessarily verbose exception traces.\n\nimport dataclasses\nimport enum\nimport pathlib\nimport typing\nfrom typing import Any, Literal, Type, TypeVar, Union, cast\n\nimport yaml\n\n__all__ = [\n \"ParseError\",\n #\n \"parse\",\n \"unparse\",\n \"reparse\",\n #\n \"load_yaml\",\n]\n\nT = TypeVar(\"T\")\n\n\nclass ParseError(Exception):\n \"\"\"\n An object cannot be parsed according to some specification.\n \"\"\"\n\n\ndef parse(data: Any, spec: Type[T]) -> T:\n \"\"\"\n Coerces `data` into an object that is compatible with the type `spec`.\n\n The coercion process is aware of:\n\n - `dict`, `list`\n - `Any`, `Literal`, `Union` (including `Optional`)\n - data classes\n - enumerations (`data` is passed as-is to the constructor)\n \"\"\"\n\n try:\n return cast(T, _parse(data, spec))\n except ParseError as exn:\n raise ParseError(f\"Failed to parse {data!r} as {spec!r}\") from exn\n\n\ndef _parse(data: Any, spec: Any) -> Any:\n # pylint: disable=too-many-branches,too-many-return-statements\n \"\"\"\n Implements the core logic of `parse`.\n\n This implementation allows us to:\n\n 1. Annotate `parse` with a meaningful type. Mypy cannot handle the\n code flow here. The clearest and simplest solution is to `cast` the\n final result and leave everything else dynamically typed.\n\n 2. Percolate errors in a way that provides meaningful context. The\n value that fails to parse might be buried deep in the original data\n structure.\n \"\"\"\n\n origin = typing.get_origin(spec)\n args = typing.get_args(spec)\n\n if origin is dict:\n if isinstance(data, dict):\n new_data = {}\n\n for (k, v) in data.items():\n new_k = parse(k, args[0])\n new_v = parse(v, args[1])\n\n new_data[new_k] = new_v\n\n return new_data\n\n raise ParseError(\"Not a dictionary\")\n\n if origin is list:\n if isinstance(data, list):\n return [parse(x, args[0]) for x in data]\n raise ParseError(\"Not a list\")\n\n if spec is Any:\n return data\n\n if origin is Literal:\n if data in args:\n return data\n raise ParseError(\"Not a valid literal\")\n\n if origin is Union:\n for new_spec in args:\n try:\n return parse(data, new_spec)\n except ParseError:\n pass\n raise ParseError(\"Exhausted the Union's branches\")\n\n if dataclasses.is_dataclass(spec):\n if isinstance(data, dict):\n new_data = {}\n\n for field in dataclasses.fields(spec):\n if field.name in data:\n new_data[field.name] = parse(data[field.name], field.type)\n\n try:\n return spec(**new_data)\n except (TypeError, ValueError) as exn:\n raise ParseError(\"Failed to construct data class instance\") from exn\n\n raise ParseError(\"Not a dictionary\")\n\n if isinstance(spec, type) and issubclass(spec, enum.Enum):\n try:\n return spec(data)\n except (TypeError, ValueError) as exn:\n raise ParseError(\"Failed to construct enumeration member\") from exn\n\n if isinstance(spec, type) and isinstance(data, spec):\n return data\n\n raise ParseError(\"Unrecognized object and type\")\n\n\ndef unparse(data: Any) -> Any:\n \"\"\"\n Coerces `data` into a JSON-like or YAML-like object.\n\n Informally, this function acts as the inverse of `parse`. The order of\n fields in data classes will be preserved if the implementation of `dict`\n preserves the insertion order of keys.\n \"\"\"\n\n if isinstance(data, enum.Enum):\n return data.value\n\n if dataclasses.is_dataclass(data):\n new_data = {}\n\n for field in dataclasses.fields(data):\n k = field.name\n v = getattr(data, field.name)\n\n if v is not field.default or field.default is not None:\n new_data[unparse(k)] = unparse(v)\n\n return new_data\n\n if isinstance(data, list):\n return [unparse(x) for x in data]\n\n if isinstance(data, dict):\n return {unparse(k): unparse(v) for (k, v) in sorted(data.items())}\n\n return data\n\n\ndef reparse(data: Any, spec: Type[T]) -> T:\n \"\"\"\n Coerces `data` into an object that is compatible with the type `spec`.\n \"\"\"\n\n return parse(unparse(data), spec)\n\n\ndef load_yaml(path: pathlib.Path, spec: Type[T]) -> T:\n \"\"\"\n Loads a single, \"safe\" YAML object with the given `spec` from a file.\n \"\"\"\n\n with open(path, encoding=\"utf-8\") as fp:\n obj = yaml.safe_load(fp)\n return parse(obj, spec)\n", "id": "10635096", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "baydemir/parsing.py" } ]
0
johnbradley
[ { "content": "from cwlpy import Workflow, WorkflowStep as Step, WorkflowStepConnection as Connection\nfrom ruamel import yaml\n\n# https://github.com/common-workflow-language/cwl-v1.1/blob/master/tests/revsort.cwl\n\n########################################\n# Create workflow and steps\n########################################\n\nworkflow = Workflow('revsort')\nrev_step = Step('rev', run='revtool.cwl')\nsort_step = Step('sorted', run='sorttool.cwl')\n\n# Add steps by chaining\nworkflow.step(rev_step).step(sort_step)\n\n########################################\n# Connect workflow and steps\n########################################\n\n# workflow.input -> rev_step.input\nworkflow.connect_input(rev_step, 'input')\n# workflow.reverse_sort -> sort_step.output\nworkflow.connect_input(sort_step, 'reverse_sort', 'reverse')\n# rev_step.output -> sort_step.input\nworkflow.connect_steps(rev_step, sort_step, 'output','input')\n# sort_step.output -> workflow.output\nworkflow.connect_output(sort_step, 'output')\n\nprint(yaml.safe_dump(workflow.save(), default_flow_style=False))\n", "id": "7882625", "language": "Python", "matching_score": 3.061349868774414, "max_stars_count": 3, "path": "example.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import Workflow, WorkflowStep as Step\nfrom ruamel import yaml\n\n\nREVSORT_WORKFLOW = \"\"\"class: Workflow\ncwlVersion: v1.0\nid: revsort\ninputs:\n- id: wf-input\n- id: wf-reverse_sort\noutputs:\n- id: wf-output\n outputSource: sorted/sortstep-output\nsteps:\n- id: rev\n in:\n - id: revstep-input\n source:\n id: wf-input\n out:\n - id: revstep-output\n run: revtool.cwl\n- id: sorted\n in:\n - id: sortstep-reverse\n source:\n id: wf-reverse_sort\n - id: sortstep-input\n source: rev/revstep-output\n out:\n - id: sortstep-output\n run: sorttool.cwl\n\"\"\"\n\n\nclass WorkflowBuilderTestCase(TestCase):\n\n def test_building_revsort(self):\n\n workflow = Workflow('revsort')\n rev_step = Step('rev', run='revtool.cwl')\n sort_step = Step('sorted', run='sorttool.cwl')\n\n # Add steps by chaining\n workflow.step(rev_step).step(sort_step)\n\n ########################################\n # Connect workflow and steps\n ########################################\n\n # workflow.input -> rev_step.input, workflow.reverse_sort -> sort_step.output\n workflow.\\\n connect_input(rev_step, 'wf-input', 'revstep-input').\\\n connect_input(sort_step, 'wf-reverse_sort', 'sortstep-reverse')\n\n # rev_step.output -> sort_step.input\n workflow.connect_steps(rev_step, sort_step, 'revstep-output', 'sortstep-input')\n\n # sort_step.output -> workflow.output\n workflow.connect_output(sort_step, 'sortstep-output', 'wf-output')\n\n yaml_string = yaml.safe_dump(workflow.save(), default_flow_style=False)\n self.assertMultiLineEqual(REVSORT_WORKFLOW, str(yaml_string))\n", "id": "7754833", "language": "Python", "matching_score": 2.2291176319122314, "max_stars_count": 3, "path": "tests/test_workflow_building.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import WorkflowInputConnection, WorkflowStepConnection, WorkflowOutputConnection, \\\n ValidationException, Workflow, WorkflowStep\n\n\nclass WorkflowStepConnectionCommonTests(object):\n\n def setUp(self):\n self.test_cls = None\n self.workflow = Workflow('workflow-1')\n self.steps = [WorkflowStep('step-1'), WorkflowStep('step-2')]\n [self.workflow.add_step(step) for step in self.steps]\n\n def test_initializes_successfully(self):\n connection = self.test_cls(self.workflow, self.steps)\n self.assertIsNotNone(connection)\n self.assertEqual(connection.workflow, self.workflow)\n self.assertEqual(connection.steps, self.steps)\n\n def test_fails_if_steps_not_part_of_workflow(self):\n step = WorkflowStep('step-3')\n self.assertNotIn(step, self.workflow.steps)\n with self.assertRaises(ValidationException) as cm:\n self.test_cls(self.workflow, [step])\n self.assertIn('not a part of workflow', repr(cm.exception))\n\n def test_fails_if_not_a_workflow(self):\n with self.assertRaises(ValidationException) as cm:\n self.test_cls({}, self.steps)\n self.assertIn('not a Workflow', repr(cm.exception))\n\n def test_fails_if_steps_not_steps(self):\n with self.assertRaises(ValidationException) as cm:\n self.test_cls(self.workflow, [1, 2, 3])\n self.assertIn('not a WorkflowStep', repr(cm.exception))\n\n\nclass WorkflowInputConnectionTestCase(WorkflowStepConnectionCommonTests, TestCase):\n\n def setUp(self):\n super(WorkflowInputConnectionTestCase, self).setUp()\n self.test_cls = WorkflowInputConnection\n self.connection = WorkflowInputConnection(self.workflow, self.steps)\n\n def test_connects_workflow_input_to_step_inputs(self):\n workflow_input_id = 'workflow-input-1'\n step_input_ids = ['step-1-input-1', 'step-2-input-1']\n self.connection.connect(workflow_input_id, step_input_ids)\n # Step should have input connected to workflow\n saved = self.workflow.save()\n # Source of first step's first input is the workflow input id\n self.assertEqual(saved['steps'][0]['in'][0]['source']['id'], 'workflow-input-1')\n # Source of second step's input is also the workflow input id\n self.assertEqual(saved['steps'][1]['in'][0]['source']['id'], 'workflow-input-1')\n\n def test_validates_length_of_input_ids(self):\n step_inputs = ['step1-input1', 'step1-input2', 'step1-input3']\n self.assertNotEqual(len(step_inputs), len(self.steps))\n with self.assertRaises(ValidationException) as cm:\n self.connection.connect('workflow-input', step_inputs)\n self.assertIn('len does not match', repr(cm.exception))\n\n def test_reuses_workflow_input_parameter_by_id(self):\n self.assertEqual(len(self.workflow.inputs), 0)\n # When making two connections to a single workflow input, the workflow should have input\n workflow_input_id = 'workflow-input-1'\n step_input_ids = ['step-1-input-1', 'step-2-input-1']\n self.connection.connect(workflow_input_id, step_input_ids)\n self.assertEqual(len(self.workflow.inputs), 1)\n\n def test_connects_multiple_inputs_single_step(self):\n workflow = Workflow('workflow')\n step = WorkflowStep('step')\n workflow.add_step(step)\n connection = WorkflowInputConnection(workflow, [step])\n connection.connect('workflow-input-1', ['step-input-1'])\n connection.connect('workflow-input-2', ['step-input-2'])\n saved = workflow.save()\n step_inputs = saved['steps'][0]['in']\n self.assertEqual(step_inputs[0]['source']['id'], 'workflow-input-1')\n self.assertEqual(step_inputs[0]['id'], 'step-input-1')\n self.assertEqual(step_inputs[1]['source']['id'], 'workflow-input-2')\n self.assertEqual(step_inputs[1]['id'], 'step-input-2')\n\n def test_fails_if_step_already_connected(self):\n workflow = Workflow('workflow')\n step = WorkflowStep('step')\n workflow.add_step(step)\n connection = WorkflowInputConnection(workflow, [step])\n connection.connect('workflow-input-1', ['step-input-1'])\n connection.connect('workflow-input-2', ['step-input-2'])\n with self.assertRaises(ValidationException) as cm:\n connection.connect('workflow-input-3', ['step-input-1'])\n self.assertIn('Step already has input with id: step-input-1', repr(cm.exception))\n\n\nclass WorkflowOutputConnectionTestCase(WorkflowStepConnectionCommonTests, TestCase):\n\n def setUp(self):\n super(WorkflowOutputConnectionTestCase, self).setUp()\n self.test_cls = WorkflowOutputConnection\n # Just connecting the last step\n self.connection = WorkflowOutputConnection(self.workflow, self.steps[1:2])\n\n def test_fails_when_connecting_output_to_multiple_steps(self):\n self.assertEqual(len(self.steps), 2)\n connection = WorkflowOutputConnection(self.workflow, self.steps)\n with self.assertRaises(ValidationException) as cm:\n connection.connect('s1', ['w1'])\n self.assertIn('Cannot connect multiple steps', repr(cm.exception))\n\n def test_connects_workflow_outputs_to_step_output(self):\n workflow_output_ids = ['workflow-output-1', 'workflow-output-2']\n step_output_id = 'step-2-output-1'\n self.connection.connect(step_output_id, workflow_output_ids)\n # Step should have input connected to workflow\n saved = self.workflow.save()\n workflow_outputs = saved['outputs']\n # We're connecting two workflow outputs to output-1 of step-2\n self.assertEqual(workflow_outputs[0]['outputSource'], 'step-2/step-2-output-1')\n self.assertEqual(workflow_outputs[0]['id'], 'workflow-output-1')\n self.assertEqual(workflow_outputs[1]['outputSource'], 'step-2/step-2-output-1')\n self.assertEqual(workflow_outputs[1]['id'], 'workflow-output-2')\n\n def test_reuses_workflow_step_output_by_id(self):\n # Connect 1 step's output to two workflow outputs\n # Verify that two workflow outputs are created and the step only has one output\n self.assertEqual(len(self.workflow.steps[1].out), 0)\n self.assertEqual(len(self.workflow.outputs), 0)\n step_output_id = 'step-2-output-1'\n self.connection.connect(step_output_id, ['workflow-output-1'])\n self.connection.connect(step_output_id, ['workflow-output-2'])\n self.assertEqual(len(self.workflow.steps[1].out), 1)\n self.assertEqual(len(self.workflow.outputs), 2)\n\n def test_fails_if_workflow_output_already_connected(self):\n workflow_output_ids = ['workflow-output-1']\n # Connecting another step to the same workflow output should fail.\n self.connection.connect('step-2-output-1', workflow_output_ids)\n with self.assertRaises(ValidationException) as cm:\n self.connection.connect('step-2-output-2', workflow_output_ids)\n self.assertIn('Output parameter exists and is already connected', repr(cm.exception))\n\n\nclass WorkflowStepConnectionTestCase(WorkflowStepConnectionCommonTests, TestCase):\n\n def setUp(self):\n super(WorkflowStepConnectionTestCase, self).setUp()\n self.test_cls = WorkflowStepConnection\n # Step output -> input connections require exactly two steps\n self.connection = WorkflowStepConnection(self.workflow, self.steps)\n\n def test_requires_two_steps(self):\n single_step = self.steps[0:1]\n self.assertEqual(len(single_step), 1)\n connection1 = WorkflowStepConnection(self.workflow, single_step)\n with self.assertRaises(ValidationException) as cm:\n connection1.connect('O', 'I')\n self.assertIn('Can only connect with two steps', repr(cm.exception))\n four_steps = self.steps + self.steps\n self.assertEqual(len(four_steps), 4)\n connection4 = WorkflowStepConnection(self.workflow, four_steps)\n with self.assertRaises(ValidationException) as cm:\n connection4.connect('O', 'I')\n self.assertIn('Can only connect with two steps', repr(cm.exception))\n\n def test_connects_step_output_to_input(self):\n self.connection.connect('step-1-output', 'step-2-input')\n saved = self.workflow.save()\n step_1_outputs = saved['steps'][0]['out']\n step_2_inputs = saved['steps'][1]['in']\n self.assertEqual(step_1_outputs[0]['id'], 'step-1-output')\n self.assertEqual(step_2_inputs[0]['id'], 'step-2-input')\n self.assertEqual(step_2_inputs[0]['source'], 'step-1/step-1-output')\n\n def test_reuses_workflow_step_output_by_id(self):\n self.assertEqual(len(self.workflow.steps[0].out), 0)\n self.assertEqual(len(self.workflow.steps[1].in_), 0)\n self.connection.connect('step-1-output', 'step-2-input-1')\n self.connection.connect('step-1-output', 'step-2-input-2')\n # Connecting output of step 1 to two inputs on step 2 should result in one output and two inputs\n self.assertEqual(len(self.workflow.steps[0].out), 1)\n self.assertEqual(len(self.workflow.steps[1].in_), 2)\n\n def test_fails_if_input_already_connected(self):\n self.connection.connect('step-1-output-1', 'step-2-input')\n with self.assertRaises(ValidationException) as cm:\n self.connection.connect('step-1-output-2', 'step-2-input')\n self.assertIn('Step already has input', repr(cm.exception))\n", "id": "5948010", "language": "Python", "matching_score": 3.4003419876098633, "max_stars_count": 3, "path": "tests/test_workflow_step_connection.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import WorkflowStepInput, ValidationException\n\n\nclass WorkflowStepInputTestCase(TestCase):\n\n def setUp(self):\n self.step_input = WorkflowStepInput('my-step-input')\n\n def test_id(self):\n self.assertEqual(self.step_input.id, 'my-step-input')\n\n def test_save(self):\n saved = self.step_input.save()\n self.assertEqual(saved['id'], 'my-step-input')\n\n def test_set_source_string(self):\n self.step_input.set_source('step1/output')\n self.assertEqual(self.step_input.source, 'step1/output')\n\n def test_set_source_list(self):\n source_list = ['step1/output','step2/output']\n self.step_input.set_source(source_list)\n self.assertEqual(self.step_input.source, source_list)\n\n def test_validates_source(self):\n with self.assertRaises(ValidationException) as cm:\n self.step_input.set_source({})\n self.assertIn('Source must be', repr(cm.exception))\n\n def test_validates_source_list(self):\n with self.assertRaises(ValidationException) as cm:\n self.step_input.set_source([1,2,3])\n self.assertIn('Source must be', repr(cm.exception))\n", "id": "3518893", "language": "Python", "matching_score": 2.831026315689087, "max_stars_count": 3, "path": "tests/test_workflow_step_input.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import WorkflowStep, WorkflowStepInput, WorkflowStepOutput, ValidationException\n\n\nclass WorkflowStepTestCase(TestCase):\n\n def setUp(self):\n self.step = WorkflowStep('my-step')\n\n def test_id(self):\n self.assertEqual(self.step.id, 'my-step')\n\n def test_save(self):\n saved = self.step.save()\n self.assertEqual(saved['id'], 'my-step')\n\n def test_add_input(self):\n step_input = WorkflowStepInput('step-input-1')\n self.step.add_input(step_input)\n self.assertIn(step_input, self.step.in_)\n\n def test_validates_add_input_type(self):\n with self.assertRaises(ValidationException) as cm:\n self.step.add_input('a string')\n self.assertIn('Not a WorkflowStepInput', repr(cm.exception))\n\n def test_validates_add_input_uniqueness(self):\n self.step.add_input(WorkflowStepInput('step-input-1'))\n with self.assertRaises(ValidationException) as cm:\n self.step.add_input(WorkflowStepInput('step-input-1'))\n self.assertIn('Step already has input with id', repr(cm.exception))\n\n def test_add_output(self):\n step_output = WorkflowStepOutput('step-output-1')\n self.step.add_output(step_output)\n self.assertIn(step_output, self.step.out)\n\n def test_validates_add_output_type(self):\n with self.assertRaises(ValidationException) as cm:\n self.step.add_output('a string')\n self.assertIn('Not a WorkflowStepOutput', repr(cm.exception))\n\n def test_validates_add_output_uniqueness(self):\n self.step.add_output(WorkflowStepOutput('step-output-1'))\n with self.assertRaises(ValidationException) as cm:\n self.step.add_output(WorkflowStepOutput('step-output-1'))\n self.assertIn('Step already has output with id', repr(cm.exception))\n\n def test_validates_set_run(self):\n # Must be one of six.string_types, CommandLineTool, ExpressionTool, Workflow]\n with self.assertRaises(ValidationException) as cm:\n self.step.set_run(1000)\n self.assertIn('Not an allowed type', repr(cm.exception))\n\n def test_set_run(self):\n self.step.set_run('tool.cwl')\n self.assertEqual(self.step.run, 'tool.cwl')\n\n def test_finds_workflow_step_output_by_id(self):\n step_output = WorkflowStepOutput('step-output-1')\n self.step.add_output(step_output)\n self.assertEqual(self.step.workflow_step_output_by_id('step-output-1'), step_output)\n self.assertIsNone(self.step.workflow_step_output_by_id('foobar'))\n", "id": "3482767", "language": "Python", "matching_score": 4.25505256652832, "max_stars_count": 3, "path": "tests/test_workflow_step.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import WorkflowStepOutput\n\n\nclass WorkflowStepOutputTestCase(TestCase):\n\n def setUp(self):\n self.step_output = WorkflowStepOutput('my-step-output')\n\n def test_id(self):\n self.assertEqual(self.step_output.id, 'my-step-output')\n\n def test_save(self):\n saved = self.step_output.save()\n self.assertEqual(saved['id'], 'my-step-output')\n\n", "id": "378449", "language": "Python", "matching_score": 1.0462064743041992, "max_stars_count": 3, "path": "tests/test_workflow_step_output.py" }, { "content": "import os\nimport six\nimport cwl_schema\n\nCWL_VERSION_STRING = 'v1.0'\nLOADING_OPTIONS = cwl_schema.LoadingOptions()\nBASE_URI = cwl_schema.file_uri(os.getcwd())\n\n\ndef _is_list_of_strings(source_list):\n if isinstance(source_list, list):\n return all([isinstance(source, six.string_types) for source in source_list])\n else:\n return False\n\n\nclass ValidationException(cwl_schema.ValidationException):\n pass\n\n\nclass TemplateDocs(object):\n # These should probably be factories\n Workflow = {\n 'class': 'Workflow',\n 'cwlVersion': CWL_VERSION_STRING,\n 'inputs': [],\n 'outputs': [],\n 'steps': [],\n }\n\n WorkflowStep = {\n 'id': '',\n 'in': [],\n 'out': [],\n 'run': '',\n }\n\n WorkflowStepInput = {\n 'id': '',\n }\n\n WorkflowStepOutput = {\n 'id': '',\n }\n\n InputParameter = {\n 'id': '',\n }\n\n WorkflowOutputParameter = {\n 'id': '',\n }\n\n\nclass Workflow(cwl_schema.Workflow):\n\n def __init__(self, id):\n super(Workflow, self).__init__(dict(TemplateDocs.Workflow), id, LOADING_OPTIONS)\n self.id = id\n\n def add_step(self, step):\n # Must be a step!\n if not isinstance(step, cwl_schema.WorkflowStep):\n raise ValidationException(\"Not a WorkflowStep\")\n self.steps.append(step)\n\n def step(self, step):\n self.add_step(step)\n return self\n\n def input_parameter_by_id(self, id):\n for param in self.inputs:\n if param.id == id:\n return param\n return None\n\n def add_input_parameter(self, input_parameter):\n if not isinstance(input_parameter, cwl_schema.InputParameter):\n raise ValidationException(\"Not an InputParameter\")\n self.inputs.append(input_parameter)\n return self\n\n def add_output_parameter(self, output_parameter):\n if not isinstance(output_parameter, cwl_schema.WorkflowOutputParameter):\n raise ValidationException(\"Not a WorkflowOutputParameter\")\n self.outputs.append(output_parameter)\n return self\n\n def connect_input(self, step, workflow_input_id, step_input_id=None):\n connection = WorkflowInputConnection(self, [step])\n if step_input_id is None:\n step_input_id = workflow_input_id\n connection.connect(workflow_input_id, [step_input_id])\n return self\n\n def connect_output(self, step, step_output_id, workflow_output_id=None):\n connection = WorkflowOutputConnection(self, [step])\n if workflow_output_id is None:\n workflow_output_id = step_output_id\n connection.connect(step_output_id, [workflow_output_id])\n return self\n\n def connect_steps(self, output_step, input_step, step_output_id, step_input_id=None):\n connection = WorkflowStepConnection(self, [output_step, input_step])\n if step_input_id is None:\n step_input_id = step_output_id\n connection.connect(step_output_id, step_input_id)\n return self\n\n\nclass WorkflowStep(cwl_schema.WorkflowStep):\n\n def __init__(self, id, run=None):\n super(WorkflowStep, self).__init__(TemplateDocs.WorkflowStep, id, LOADING_OPTIONS)\n if run:\n self.set_run(run)\n\n def add_input(self, step_input):\n if not isinstance(step_input, cwl_schema.WorkflowStepInput):\n raise ValidationException(\"Not a WorkflowStepInput\")\n input_ids = [i.id for i in self.in_]\n if step_input.id in input_ids:\n raise ValidationException(\"Step already has input with id: \" + step_input.id)\n self.in_.append(step_input)\n\n def add_output(self, step_output):\n if not isinstance(step_output, cwl_schema.WorkflowStepOutput):\n raise ValidationException(\"Not a WorkflowStepOutput\")\n output_ids = [o.id for o in self.out]\n if step_output.id in output_ids:\n raise ValidationException(\"Step already has output with id: \" + step_output.id)\n self.out.append(step_output)\n\n def set_run(self, run):\n # Would like this to be a @property, but that's awkward with the codegen\n allowed_types = [six.string_types, cwl_schema.CommandLineTool, cwl_schema.ExpressionTool, cwl_schema.Workflow]\n if not any([isinstance(run, allowed) for allowed in allowed_types]):\n raise ValidationException(\"Not an allowed type\")\n self.run = run\n\n def workflow_step_output_by_id(self, id):\n for workflow_step_output in self.out:\n if workflow_step_output.id == id:\n return workflow_step_output\n return None\n\n\nclass WorkflowStepInput(cwl_schema.WorkflowStepInput):\n\n def __init__(self, id):\n super(WorkflowStepInput, self).__init__(TemplateDocs.WorkflowStepInput, id, LOADING_OPTIONS)\n\n def set_source(self, source):\n # Validate that it's a string or a list of strings\n if not _is_list_of_strings(source) and not isinstance(source, six.string_types):\n raise ValidationException(\"Source must be a string or array of strings\")\n # TODO: Inspect the link and make sure the type is valid\n self.source = source\n\n\nclass WorkflowStepOutput(cwl_schema.WorkflowStepOutput):\n\n def __init__(self, id):\n super(WorkflowStepOutput, self).__init__(TemplateDocs.WorkflowStepOutput, id, LOADING_OPTIONS)\n\n\nclass InputParameter(cwl_schema.InputParameter):\n\n def __init__(self, id):\n super(InputParameter, self).__init__(TemplateDocs.InputParameter, id, LOADING_OPTIONS)\n\n\nclass WorkflowOutputParameter(cwl_schema.WorkflowOutputParameter):\n\n def __init__(self, id):\n super(WorkflowOutputParameter, self).__init__(TemplateDocs.WorkflowOutputParameter, id, LOADING_OPTIONS)\n\n def set_outputSource(self, outputSource):\n if not _is_list_of_strings(outputSource) and not isinstance(outputSource, six.string_types):\n raise ValidationException(\"outputSource must be a string or array of strings\")\n # TODO: Inspect the link and make sure the type is valid\n self.outputSource = outputSource\n\n\nclass WorkflowStepConnectionBase(object):\n\n # TODO: Verify input and output data types when connecting\n # It's possible (and likely) that \"run\" will be file name\n # and not a fully-formed object. But we can easily load that\n # file and inspect its inputs/outputs/data types when connecting.\n\n def __init__(self, workflow, steps):\n if not isinstance(workflow, cwl_schema.Workflow):\n raise ValidationException(\"workflow is not a Workflow\")\n for step in steps:\n if not isinstance(step, cwl_schema.WorkflowStep):\n raise ValidationException(\"step is not a WorkflowStep\")\n if step not in workflow.steps:\n raise ValidationException(\"step is not a part of workflow\")\n self.workflow = workflow\n self.steps = steps\n\n\nclass WorkflowInputConnection(WorkflowStepConnectionBase):\n\n def _connect_workflow_single_input(self, workflow_input_id, step_input_id, step):\n # If workflow has an input parameter, get it\n input_parameter = self.workflow.input_parameter_by_id(workflow_input_id)\n if not input_parameter:\n input_parameter = InputParameter(workflow_input_id)\n self.workflow.add_input_parameter(input_parameter)\n workflow_step_input = WorkflowStepInput(step_input_id)\n # Now connect them\n workflow_step_input.source = input_parameter\n # This verifies the step is not already connected\n step.add_input(workflow_step_input)\n\n def connect(self, workflow_input_id, step_input_ids):\n \"\"\"\n Connects a workflow input to step inputs\n \"\"\"\n # The workflow input may be connected more than once\n # but the step input should only be connected once\n if len(step_input_ids) != len(self.steps):\n raise ValidationException(\"step_input_ids len does not match steps len\")\n for index, step in enumerate(self.steps):\n step_input_id = step_input_ids[index]\n self._connect_workflow_single_input(workflow_input_id, step_input_id, step)\n\n\nclass WorkflowStepConnection(WorkflowStepConnectionBase):\n\n def connect(self, step_output_id, step_input_id):\n # Simple case, connecting 1:1 output->input\n if not len(self.steps) == 2:\n raise ValidationException(\"Can only connect with two steps\")\n output_step, input_step = self.steps\n workflow_step_output = output_step.workflow_step_output_by_id(step_output_id)\n if not workflow_step_output:\n workflow_step_output = WorkflowStepOutput(step_output_id)\n output_step.add_output(workflow_step_output)\n workflow_step_input = WorkflowStepInput(step_input_id)\n source = '{}/{}'.format(output_step.id, step_output_id)\n workflow_step_input.set_source(source)\n input_step.add_input(workflow_step_input) # Should raise if already connected\n\n\nclass WorkflowOutputConnection(WorkflowStepConnectionBase):\n\n def _connect_workflow_single_output(self, workflow_output_id, step_output_id, step):\n # If step has an output, get it\n workflow_step_output = step.workflow_step_output_by_id(step_output_id)\n if not workflow_step_output:\n workflow_step_output = WorkflowStepOutput(step_output_id)\n step.add_output(workflow_step_output)\n # Check existing output parameters\n output_parameters = [output for output in self.workflow.outputs if output.id == workflow_output_id]\n for output_parameter in output_parameters:\n if output_parameter.outputSource:\n raise ValidationException('Output parameter exists and is already connected')\n if not output_parameters:\n output_parameters = [WorkflowOutputParameter(workflow_output_id)]\n\n output_source = '{}/{}'.format(step.id, step_output_id)\n for output_parameter in output_parameters:\n output_parameter.set_outputSource(output_source)\n self.workflow.add_output_parameter(output_parameter)\n\n def connect(self, step_output_id, workflow_output_ids):\n \"\"\"\n Connect's a workflow's output to a step's output\n \"\"\"\n # A step output may be connected to multiple workflow outputs\n # But for now (until sink is implemented), mutliple steps may not be connected to a single workflow output\n if len(self.steps) != 1:\n raise ValidationException(\"Cannot connect multiple steps to a single workflow output\")\n for workflow_output_id in workflow_output_ids:\n self._connect_workflow_single_output(workflow_output_id, step_output_id, self.steps[0])\n", "id": "546987", "language": "Python", "matching_score": 5.484827518463135, "max_stars_count": 3, "path": "cwlpy/cwlpy.py" }, { "content": "from .cwlpy import Workflow, \\\n WorkflowStep, \\\n WorkflowStepInput, \\\n WorkflowStepOutput, \\\n InputParameter, \\\n WorkflowOutputParameter, \\\n WorkflowInputConnection, \\\n WorkflowStepConnection, \\\n WorkflowOutputConnection, \\\n ValidationException\n\n", "id": "11849811", "language": "Python", "matching_score": 1.8785724639892578, "max_stars_count": 3, "path": "cwlpy/__init__.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import Workflow, WorkflowStep, InputParameter, WorkflowOutputParameter, ValidationException\n\n\nclass WorkflowTestCase(TestCase):\n\n def setUp(self):\n self.workflow = Workflow('my-workflow')\n\n def test_id(self):\n self.assertEqual(self.workflow.id, 'my-workflow')\n\n def test_save(self):\n saved = self.workflow.save()\n self.assertEqual(saved['class'], 'Workflow')\n self.assertEqual(saved['id'], 'my-workflow')\n\n def test_validates_add_step(self):\n with self.assertRaises(ValidationException) as cm:\n self.workflow.add_step('not-a-workflowstep')\n self.assertIn('Not a WorkflowStep', repr(cm.exception))\n\n def test_add_input_parameter(self):\n input_parameter = InputParameter('input-1')\n self.workflow.add_input_parameter(input_parameter)\n self.assertIn(input_parameter, self.workflow.inputs)\n\n def test_validates_add_input_parameter_type(self):\n with self.assertRaises(ValidationException) as cm:\n self.workflow.add_input_parameter('not-input-parameter')\n self.assertIn('Not an InputParameter', repr(cm.exception))\n\n def test_finds_input_parameter_by_id(self):\n input_parameter = InputParameter('input-2')\n self.workflow.add_input_parameter(input_parameter)\n self.assertEqual(input_parameter, self.workflow.input_parameter_by_id('input-2'))\n self.assertIsNone(self.workflow.input_parameter_by_id('foobar'))\n\n def test_add_output_parameter(self):\n output_parameter = WorkflowOutputParameter('output-1')\n self.workflow.add_output_parameter(output_parameter)\n self.assertIn(output_parameter, self.workflow.outputs)\n\n def test_validates_add_output_parameter(self):\n with self.assertRaises(ValidationException) as cm:\n self.workflow.add_output_parameter('not-output-parameter')\n self.assertIn('Not a WorkflowOutputParameter', repr(cm.exception))\n\n\nclass WorkflowWithStepsTestCase(TestCase):\n\n def setUp(self):\n self.workflow = Workflow('my-workflow')\n self.step1 = WorkflowStep('my-step-1')\n self.step2 = WorkflowStep('my-step-2')\n\n def test_add_step(self):\n self.assertEqual(len(self.workflow.steps), 0)\n self.workflow.add_step(self.step1)\n self.assertEqual(len(self.workflow.steps), 1)\n self.assertIn(self.step1, self.workflow.steps)\n\n def test_step_convenience_method(self):\n self.assertEqual(len(self.workflow.steps), 0)\n retval = self.workflow.step(self.step2)\n self.assertEqual(len(self.workflow.steps), 1)\n self.assertIn(self.step2, self.workflow.steps)\n self.assertEqual(self.workflow, retval)\n\n def test_connect_input(self):\n self.workflow.add_step(self.step1)\n retval = self.workflow.connect_input(self.step1, 'wf-input', 'step-1-input')\n self.assertEqual(self.workflow, retval)\n self.assertEqual(retval.steps[0].in_[0].id, 'step-1-input')\n self.assertEqual(retval.steps[0].in_[0].source.id, 'wf-input')\n\n def test_connect_input_assumes_id(self):\n self.workflow.add_step(self.step1)\n retval = self.workflow.connect_input(self.step1, 'input-1')\n self.assertEqual(self.workflow, retval)\n self.assertEqual(retval.steps[0].in_[0].id, 'input-1')\n self.assertEqual(retval.steps[0].in_[0].source.id, 'input-1')\n\n def test_connect_steps(self):\n self.workflow.add_step(self.step1)\n self.workflow.add_step(self.step2)\n retval = self.workflow.connect_steps(self.step1, self.step2, 'step-1-output', 'step-2-input')\n self.assertEqual(self.workflow, retval)\n self.assertEqual(retval.steps[0].out[0].id, 'step-1-output')\n self.assertEqual(retval.steps[1].in_[0].source, 'my-step-1/step-1-output')\n self.assertEqual(retval.steps[1].in_[0].id, 'step-2-input')\n\n def test_connect_steps_assumes_id(self):\n self.workflow.add_step(self.step1)\n self.workflow.add_step(self.step2)\n retval = self.workflow.connect_steps(self.step1, self.step2, 'internal')\n self.assertEqual(self.workflow, retval)\n self.assertEqual(retval.steps[0].out[0].id, 'internal')\n self.assertEqual(retval.steps[1].in_[0].source, 'my-step-1/internal')\n self.assertEqual(retval.steps[1].in_[0].id, 'internal')\n\n def test_connect_output(self):\n self.workflow.add_step(self.step2)\n retval = self.workflow.connect_output(self.step2, 'step-2-output', 'wf-output')\n self.assertEqual(self.workflow, retval)\n self.assertEqual(retval.steps[0].out[0].id, 'step-2-output')\n self.assertEqual(retval.outputs[0].outputSource, 'my-step-2/step-2-output')\n\n def test_connect_output_assumes_id(self):\n self.workflow.add_step(self.step2)\n retval = self.workflow.connect_output(self.step2, 'output-2')\n self.assertEqual(self.workflow, retval)\n self.assertEqual(retval.steps[0].out[0].id, 'output-2')\n self.assertEqual(retval.outputs[0].outputSource, 'my-step-2/output-2')\n", "id": "1847747", "language": "Python", "matching_score": 4.571779727935791, "max_stars_count": 3, "path": "tests/test_workflow.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import WorkflowOutputParameter, ValidationException\n\n\nclass WorkflowOutputParameterTestCase(TestCase):\n\n def setUp(self):\n self.output_parameter = WorkflowOutputParameter('my-output-parameter')\n\n def test_id(self):\n self.assertEqual(self.output_parameter.id, 'my-output-parameter')\n\n def test_save(self):\n saved = self.output_parameter.save()\n self.assertEqual(saved['id'], 'my-output-parameter')\n\n def test_set_output_source_string(self):\n self.output_parameter.set_outputSource('step1/output')\n self.assertEqual(self.output_parameter.outputSource, 'step1/output')\n\n def test_set_output_source_list(self):\n source_list = ['step1/output','step2/output']\n self.output_parameter.set_outputSource(source_list)\n self.assertEqual(self.output_parameter.outputSource, source_list)\n\n def test_validates_output_source(self):\n with self.assertRaises(ValidationException) as cm:\n self.output_parameter.set_outputSource({})\n self.assertIn('outputSource must be', repr(cm.exception))\n\n def test_validates_output_source_list(self):\n with self.assertRaises(ValidationException) as cm:\n self.output_parameter.set_outputSource([1,2,3])\n self.assertIn('outputSource must be', repr(cm.exception))\n", "id": "12274065", "language": "Python", "matching_score": 2.5079140663146973, "max_stars_count": 3, "path": "tests/test_workflow_output_parameter.py" }, { "content": "from unittest import TestCase\n\nfrom cwlpy import InputParameter\n\n\nclass InputParameterTestCase(TestCase):\n\n def setUp(self):\n self.input_parameter = InputParameter('my-input-parameter')\n\n def test_id(self):\n self.assertEqual(self.input_parameter.id, 'my-input-parameter')\n\n def test_save(self):\n saved = self.input_parameter.save()\n self.assertEqual(saved['id'], 'my-input-parameter')\n", "id": "4557186", "language": "Python", "matching_score": 0.09554097056388855, "max_stars_count": 3, "path": "tests/test_input_parameter.py" }, { "content": "#\n# This file was autogenerated using schema-salad-tool --codegen=python\n#\nfrom __future__ import absolute_import\nimport ruamel.yaml\nfrom ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq\nimport re\nimport os\nimport traceback\n\nfrom typing import (Any, AnyStr, Callable, cast, Dict, List, Iterable, Tuple,\n TypeVar, Union, Text)\nimport six\n\nlineno_re = re.compile(u\"^(.*?:[0-9]+:[0-9]+: )(( *)(.*))\")\n\ndef _add_lc_filename(r, source): # type: (ruamel.yaml.comments.CommentedBase, AnyStr) -> None\n if isinstance(r, ruamel.yaml.comments.CommentedBase):\n r.lc.filename = source\n if isinstance(r, list):\n for d in r:\n _add_lc_filename(d, source)\n elif isinstance(r, dict):\n for d in six.itervalues(r):\n _add_lc_filename(d, source)\n\ndef relname(source): # type: (Text) -> Text\n if source.startswith(\"file://\"):\n source = source[7:]\n source = os.path.relpath(source)\n return source\n\ndef add_lc_filename(r, source): # type: (ruamel.yaml.comments.CommentedBase, Text) -> None\n _add_lc_filename(r, relname(source))\n\ndef reflow(text, maxline, shift=\"\"): # type: (Text, int, Text) -> Text\n if maxline < 20:\n maxline = 20\n if len(text) > maxline:\n sp = text.rfind(' ', 0, maxline)\n if sp < 1:\n sp = text.find(' ', sp+1)\n if sp == -1:\n sp = len(text)\n if sp < len(text):\n return \"%s\\n%s%s\" % (text[0:sp], shift, reflow(text[sp+1:], maxline, shift))\n return text\n\ndef indent(v, nolead=False, shift=u\" \", bullet=u\" \"): # type: (Text, bool, Text, Text) -> Text\n if nolead:\n return v.splitlines()[0] + u\"\\n\".join([shift + l for l in v.splitlines()[1:]])\n else:\n def lineno(i, l): # type: (int, Text) -> Text\n r = lineno_re.match(l)\n if r is not None:\n return r.group(1) + (bullet if i == 0 else shift) + r.group(2)\n else:\n return (bullet if i == 0 else shift) + l\n\n return u\"\\n\".join([lineno(i, l) for i, l in enumerate(v.splitlines())])\n\ndef bullets(textlist, bul): # type: (List[Text], Text) -> Text\n if len(textlist) == 1:\n return textlist[0]\n else:\n return \"\\n\".join(indent(t, bullet=bul) for t in textlist)\n\ndef strip_dup_lineno(text, maxline=None): # type: (Text, int) -> Text\n if maxline is None:\n maxline = int(os.environ.get(\"COLUMNS\", \"100\"))\n pre = None\n msg = []\n maxno = 0\n for l in text.splitlines():\n g = lineno_re.match(l)\n if not g:\n continue\n maxno = max(maxno, len(g.group(1)))\n\n for l in text.splitlines():\n g = lineno_re.match(l)\n if not g:\n msg.append(l)\n continue\n if g.group(1) != pre:\n shift = maxno + len(g.group(3))\n g2 = reflow(g.group(2), maxline-shift, \" \" * shift)\n pre = g.group(1)\n msg.append(pre + \" \" * (maxno-len(g.group(1))) + g2)\n else:\n g2 = reflow(g.group(2), maxline-maxno, \" \" * (maxno+len(g.group(3))))\n msg.append(\" \" * maxno + g2)\n return \"\\n\".join(msg)\n\ndef cmap(d, lc=None, fn=None): # type: (Union[int, float, str, Text, Dict, List], List[int], Text) -> Union[int, float, str, Text, CommentedMap, CommentedSeq]\n if lc is None:\n lc = [0, 0, 0, 0]\n if fn is None:\n fn = \"test\"\n\n if isinstance(d, CommentedMap):\n fn = d.lc.filename if hasattr(d.lc, \"filename\") else fn\n for k,v in six.iteritems(d):\n if k in d.lc.data:\n d[k] = cmap(v, lc=d.lc.data[k], fn=fn)\n else:\n d[k] = cmap(v, lc, fn=fn)\n return d\n if isinstance(d, CommentedSeq):\n fn = d.lc.filename if hasattr(d.lc, \"filename\") else fn\n for k,v in enumerate(d):\n if k in d.lc.data:\n d[k] = cmap(v, lc=d.lc.data[k], fn=fn)\n else:\n d[k] = cmap(v, lc, fn=fn)\n return d\n if isinstance(d, dict):\n cm = CommentedMap()\n for k in sorted(d.keys()):\n v = d[k]\n if isinstance(v, CommentedBase):\n uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]\n vfn = v.lc.filename if hasattr(v.lc, \"filename\") else fn\n else:\n uselc = lc\n vfn = fn\n cm[k] = cmap(v, lc=uselc, fn=vfn)\n cm.lc.add_kv_line_col(k, uselc)\n cm.lc.filename = fn\n return cm\n if isinstance(d, list):\n cs = CommentedSeq()\n for k,v in enumerate(d):\n if isinstance(v, CommentedBase):\n uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]\n vfn = v.lc.filename if hasattr(v.lc, \"filename\") else fn\n else:\n uselc = lc\n vfn = fn\n cs.append(cmap(v, lc=uselc, fn=vfn))\n cs.lc.add_kv_line_col(k, uselc)\n cs.lc.filename = fn\n return cs\n else:\n return d\n\nclass SourceLine(object):\n def __init__(self, item, key=None, raise_type=six.text_type, include_traceback=False): # type: (Any, Any, Callable, bool) -> None\n self.item = item\n self.key = key\n self.raise_type = raise_type\n self.include_traceback = include_traceback\n\n def __enter__(self): # type: () -> SourceLine\n return self\n\n def __exit__(self,\n exc_type, # type: Any\n exc_value, # type: Any\n tb # type: Any\n ): # -> Any\n if not exc_value:\n return\n if self.include_traceback:\n raise self.makeError(\"\\n\".join(traceback.format_exception(exc_type, exc_value, tb)))\n else:\n raise self.makeError(six.text_type(exc_value))\n\n def makeLead(self): # type: () -> Text\n if self.key is None or self.item.lc.data is None or self.key not in self.item.lc.data:\n return \"%s:%i:%i:\" % (self.item.lc.filename if hasattr(self.item.lc, \"filename\") else \"\",\n (self.item.lc.line or 0)+1,\n (self.item.lc.col or 0)+1)\n else:\n return \"%s:%i:%i:\" % (self.item.lc.filename if hasattr(self.item.lc, \"filename\") else \"\",\n (self.item.lc.data[self.key][0] or 0)+1,\n (self.item.lc.data[self.key][1] or 0)+1)\n\n def makeError(self, msg): # type: (Text) -> Any\n if not isinstance(self.item, ruamel.yaml.comments.CommentedBase):\n return self.raise_type(msg)\n errs = []\n lead = self.makeLead()\n for m in msg.splitlines():\n if bool(lineno_re.match(m)):\n errs.append(m)\n else:\n errs.append(\"%s %s\" % (lead, m))\n return self.raise_type(\"\\n\".join(errs))\n\n\nimport six\nfrom six.moves import urllib, StringIO\nimport ruamel.yaml as yaml\nimport copy\nimport re\nfrom typing import List, Text, Dict, Union, Any, Sequence\nimport uuid\n\nclass ValidationException(Exception):\n pass\n\nclass Savable(object):\n pass\n\nclass LoadingOptions(object):\n def __init__(self, fetcher=None, namespaces=None, fileuri=None, copyfrom=None, schemas=None):\n if copyfrom is not None:\n self.idx = copyfrom.idx\n if fetcher is None:\n fetcher = copyfrom.fetcher\n if fileuri is None:\n fileuri = copyfrom.fileuri\n if namespaces is None:\n namespaces = copyfrom.namespaces\n if namespaces is None:\n schemas = copyfrom.schemas\n else:\n self.idx = {}\n\n if fetcher is None:\n import os\n import requests\n from cachecontrol.wrapper import CacheControl\n from cachecontrol.caches import FileCache\n from schema_salad.ref_resolver import DefaultFetcher\n if \"HOME\" in os.environ:\n session = CacheControl(\n requests.Session(),\n cache=FileCache(os.path.join(os.environ[\"HOME\"], \".cache\", \"salad\")))\n elif \"TMP\" in os.environ:\n session = CacheControl(\n requests.Session(),\n cache=FileCache(os.path.join(os.environ[\"TMP\"], \".cache\", \"salad\")))\n else:\n session = CacheControl(\n requests.Session(),\n cache=FileCache(\"/tmp\", \".cache\", \"salad\"))\n self.fetcher = DefaultFetcher({}, session)\n else:\n self.fetcher = fetcher\n\n self.fileuri = fileuri\n\n self.vocab = _vocab\n self.rvocab = _rvocab\n self.namespaces = namespaces\n self.schemas = schemas\n\n if namespaces is not None:\n self.vocab = self.vocab.copy()\n self.rvocab = self.rvocab.copy()\n for k,v in six.iteritems(namespaces):\n self.vocab[k] = v\n self.rvocab[v] = k\n\n\n\ndef load_field(val, fieldtype, baseuri, loadingOptions):\n if isinstance(val, dict):\n if \"$import\" in val:\n return _document_load_by_url(fieldtype, loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val[\"$import\"]), loadingOptions)\n elif \"$include\" in val:\n val = loadingOptions.fetcher.fetch_text(loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val[\"$include\"]))\n return fieldtype.load(val, baseuri, loadingOptions)\n\n\ndef save(val, top=True):\n if isinstance(val, Savable):\n return val.save(top=top)\n if isinstance(val, list):\n return [save(v, top=False) for v in val]\n return val\n\ndef expand_url(url, # type: Union[str, Text]\n base_url, # type: Union[str, Text]\n loadingOptions, # type: LoadingOptions\n scoped_id=False, # type: bool\n vocab_term=False, # type: bool\n scoped_ref=None # type: int\n ):\n # type: (...) -> Text\n\n if not isinstance(url, six.string_types):\n return url\n\n url = Text(url)\n\n if url in (u\"@id\", u\"@type\"):\n return url\n\n if vocab_term and url in loadingOptions.vocab:\n return url\n\n if bool(loadingOptions.vocab) and u\":\" in url:\n prefix = url.split(u\":\")[0]\n if prefix in loadingOptions.vocab:\n url = loadingOptions.vocab[prefix] + url[len(prefix) + 1:]\n\n split = urllib.parse.urlsplit(url)\n\n if ((bool(split.scheme) and split.scheme in [u'http', u'https', u'file']) or url.startswith(u\"$(\")\n or url.startswith(u\"${\")):\n pass\n elif scoped_id and not bool(split.fragment):\n splitbase = urllib.parse.urlsplit(base_url)\n frg = u\"\"\n if bool(splitbase.fragment):\n frg = splitbase.fragment + u\"/\" + split.path\n else:\n frg = split.path\n pt = splitbase.path if splitbase.path != '' else \"/\"\n url = urllib.parse.urlunsplit(\n (splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))\n elif scoped_ref is not None and not bool(split.fragment):\n splitbase = urllib.parse.urlsplit(base_url)\n sp = splitbase.fragment.split(u\"/\")\n n = scoped_ref\n while n > 0 and len(sp) > 0:\n sp.pop()\n n -= 1\n sp.append(url)\n url = urllib.parse.urlunsplit((\n splitbase.scheme, splitbase.netloc, splitbase.path, splitbase.query,\n u\"/\".join(sp)))\n else:\n url = loadingOptions.fetcher.urljoin(base_url, url)\n\n if vocab_term:\n split = urllib.parse.urlsplit(url)\n if bool(split.scheme):\n if url in loadingOptions.rvocab:\n return loadingOptions.rvocab[url]\n else:\n raise ValidationException(\"Term '%s' not in vocabulary\" % url)\n\n return url\n\n\nclass _Loader(object):\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n # type: (Any, Text, LoadingOptions, Union[Text, None]) -> Any\n pass\n\nclass _AnyLoader(_Loader):\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if doc is not None:\n return doc\n raise ValidationException(\"Expected non-null\")\n\nclass _PrimitiveLoader(_Loader):\n def __init__(self, tp):\n # type: (Union[type, Sequence[type]]) -> None\n self.tp = tp\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if not isinstance(doc, self.tp):\n raise ValidationException(\"Expected a %s but got %s\" % (self.tp, type(doc)))\n return doc\n\n def __repr__(self):\n return str(self.tp)\n\nclass _ArrayLoader(_Loader):\n def __init__(self, items):\n # type: (_Loader) -> None\n self.items = items\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if not isinstance(doc, list):\n raise ValidationException(\"Expected a list\")\n r = []\n errors = []\n for i in range(0, len(doc)):\n try:\n lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)\n if isinstance(lf, list):\n r.extend(lf)\n else:\n r.append(lf)\n except ValidationException as e:\n errors.append(SourceLine(doc, i, str).makeError(six.text_type(e)))\n if errors:\n raise ValidationException(\"\\n\".join(errors))\n return r\n\n def __repr__(self):\n return \"array<%s>\" % self.items\n\nclass _EnumLoader(_Loader):\n def __init__(self, symbols):\n # type: (Sequence[Text]) -> None\n self.symbols = symbols\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if doc in self.symbols:\n return doc\n else:\n raise ValidationException(\"Expected one of %s\" % (self.symbols,))\n\n\nclass _RecordLoader(_Loader):\n def __init__(self, classtype):\n # type: (type) -> None\n self.classtype = classtype\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if not isinstance(doc, dict):\n raise ValidationException(\"Expected a dict\")\n return self.classtype(doc, baseuri, loadingOptions, docRoot=docRoot)\n\n def __repr__(self):\n return str(self.classtype)\n\n\nclass _UnionLoader(_Loader):\n def __init__(self, alternates):\n # type: (Sequence[_Loader]) -> None\n self.alternates = alternates\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n errors = []\n for t in self.alternates:\n try:\n return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)\n except ValidationException as e:\n errors.append(\"tried %s but\\n%s\" % (t, indent(str(e))))\n raise ValidationException(bullets(errors, \"- \"))\n\n def __repr__(self):\n return \" | \".join(str(a) for a in self.alternates)\n\nclass _URILoader(_Loader):\n def __init__(self, inner, scoped_id, vocab_term, scoped_ref):\n # type: (_Loader, bool, bool, Union[int, None]) -> None\n self.inner = inner\n self.scoped_id = scoped_id\n self.vocab_term = vocab_term\n self.scoped_ref = scoped_ref\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if isinstance(doc, list):\n doc = [expand_url(i, baseuri, loadingOptions,\n self.scoped_id, self.vocab_term, self.scoped_ref) for i in doc]\n if isinstance(doc, six.string_types):\n doc = expand_url(doc, baseuri, loadingOptions,\n self.scoped_id, self.vocab_term, self.scoped_ref)\n return self.inner.load(doc, baseuri, loadingOptions)\n\nclass _TypeDSLLoader(_Loader):\n typeDSLregex = re.compile(u\"^([^[?]+)(\\[\\])?(\\?)?$\")\n\n def __init__(self, inner, refScope):\n # type: (_Loader, Union[int, None]) -> None\n self.inner = inner\n self.refScope = refScope\n\n def resolve(self, doc, baseuri, loadingOptions):\n m = self.typeDSLregex.match(doc)\n if m:\n first = expand_url(m.group(1), baseuri, loadingOptions, False, True, self.refScope)\n second = third = None\n if bool(m.group(2)):\n second = {\"type\": \"array\", \"items\": first}\n #second = CommentedMap(((\"type\", \"array\"),\n # (\"items\", first)))\n #second.lc.add_kv_line_col(\"type\", lc)\n #second.lc.add_kv_line_col(\"items\", lc)\n #second.lc.filename = filename\n if bool(m.group(3)):\n third = [u\"null\", second or first]\n #third = CommentedSeq([u\"null\", second or first])\n #third.lc.add_kv_line_col(0, lc)\n #third.lc.add_kv_line_col(1, lc)\n #third.lc.filename = filename\n doc = third or second or first\n return doc\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if isinstance(doc, list):\n r = []\n for d in doc:\n if isinstance(d, six.string_types):\n resolved = self.resolve(d, baseuri, loadingOptions)\n if isinstance(resolved, list):\n for i in resolved:\n if i not in r:\n r.append(i)\n else:\n if resolved not in r:\n r.append(resolved)\n else:\n r.append(d)\n doc = r\n elif isinstance(doc, six.string_types):\n doc = self.resolve(doc, baseuri, loadingOptions)\n\n return self.inner.load(doc, baseuri, loadingOptions)\n\n\nclass _IdMapLoader(_Loader):\n def __init__(self, inner, mapSubject, mapPredicate):\n # type: (_Loader, Text, Union[Text, None]) -> None\n self.inner = inner\n self.mapSubject = mapSubject\n self.mapPredicate = mapPredicate\n\n def load(self, doc, baseuri, loadingOptions, docRoot=None):\n if isinstance(doc, dict):\n r = []\n for k in sorted(doc.keys()):\n val = doc[k]\n if isinstance(val, dict):\n v = copy.copy(val)\n if hasattr(val, 'lc'):\n v.lc.data = val.lc.data\n v.lc.filename = val.lc.filename\n else:\n if self.mapPredicate:\n v = {self.mapPredicate: val}\n else:\n raise ValidationException(\"No mapPredicate\")\n v[self.mapSubject] = k\n r.append(v)\n doc = r\n return self.inner.load(doc, baseuri, loadingOptions)\n\n\ndef _document_load(loader, doc, baseuri, loadingOptions):\n if isinstance(doc, six.string_types):\n return _document_load_by_url(loader, loadingOptions.fetcher.urljoin(baseuri, doc), loadingOptions)\n\n if isinstance(doc, dict):\n if \"$namespaces\" in doc:\n loadingOptions = LoadingOptions(copyfrom=loadingOptions, namespaces=doc[\"$namespaces\"])\n doc = {k: v for k,v in doc.items() if k != \"$namespaces\"}\n\n if \"$schemas\" in doc:\n loadingOptions = LoadingOptions(copyfrom=loadingOptions, schemas=doc[\"$schemas\"])\n doc = {k: v for k,v in doc.items() if k != \"$schemas\"}\n\n if \"$base\" in doc:\n baseuri = doc[\"$base\"]\n\n if \"$graph\" in doc:\n return loader.load(doc[\"$graph\"], baseuri, loadingOptions)\n else:\n return loader.load(doc, baseuri, loadingOptions, docRoot=baseuri)\n\n if isinstance(doc, list):\n return loader.load(doc, baseuri, loadingOptions)\n\n raise ValidationException()\n\n\ndef _document_load_by_url(loader, url, loadingOptions):\n if url in loadingOptions.idx:\n return _document_load(loader, loadingOptions.idx[url], url, loadingOptions)\n\n text = loadingOptions.fetcher.fetch_text(url)\n if isinstance(text, bytes):\n textIO = StringIO(text.decode('utf-8'))\n else:\n textIO = StringIO(text)\n textIO.name = url # type: ignore\n result = yaml.round_trip_load(textIO)\n add_lc_filename(result, url)\n\n loadingOptions.idx[url] = result\n\n loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=url)\n\n return _document_load(loader, result, url, loadingOptions)\n\ndef file_uri(path, split_frag=False): # type: (str, bool) -> str\n if path.startswith(\"file://\"):\n return path\n if split_frag:\n pathsp = path.split(\"#\", 2)\n frag = \"#\" + urllib.parse.quote(str(pathsp[1])) if len(pathsp) == 2 else \"\"\n urlpath = urllib.request.pathname2url(str(pathsp[0]))\n else:\n urlpath = urllib.request.pathname2url(path)\n frag = \"\"\n if urlpath.startswith(\"//\"):\n return \"file:%s%s\" % (urlpath, frag)\n else:\n return \"file://%s%s\" % (urlpath, frag)\n\ndef prefix_url(url, namespaces):\n for k,v in namespaces.items():\n if url.startswith(v):\n return k+\":\"+url[len(v):]\n return url\n\n\nclass RecordField(Savable):\n \"\"\"\nA field of a record.\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n raise ValidationException(\"Missing name\")\n baseuri = self.name\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `name`, `doc`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'RecordField'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['name', 'doc', 'type'])\n\n\nclass RecordSchema(Savable):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'fields' in doc:\n try:\n self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'fields', str).makeError(\"the `fields` field is not valid because:\\n\"+str(e)))\n else:\n self.fields = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `fields`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'RecordSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.fields is not None:\n r['fields'] = save(self.fields, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['fields', 'type'])\n\n\nclass EnumSchema(Savable):\n \"\"\"\nDefine an enumerated type.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'symbols', str).makeError(\"the `symbols` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `symbols`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'EnumSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.symbols is not None:\n r['symbols'] = save(self.symbols, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['symbols', 'type'])\n\n\nclass ArraySchema(Savable):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.items = load_field(doc.get('items'), uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'items', str).makeError(\"the `items` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `items`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'ArraySchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.items is not None:\n r['items'] = save(self.items, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['items', 'type'])\n\n\nclass File(Savable):\n \"\"\"\nRepresents a file (or group of files when `secondaryFiles` is provided) that\nwill be accessible by tools using standard POSIX file system call API such as\nopen(2) and read(2).\n\nFiles are represented as objects with `class` of `File`. File objects have\na number of properties that provide metadata about the file.\n\nThe `location` property of a File is a URI that uniquely identifies the\nfile. Implementations must support the file:// URI scheme and may support\nother schemes such as http://. The value of `location` may also be a\nrelative reference, in which case it must be resolved relative to the URI\nof the document it appears in. Alternately to `location`, implementations\nmust also accept the `path` property on File, which must be a filesystem\npath available on the same host as the CWL runner (for inputs) or the\nruntime environment of a command line tool execution (for command line tool\noutputs).\n\nIf no `location` or `path` is specified, a file object must specify\n`contents` with the UTF-8 text content of the file. This is a \"file\nliteral\". File literals do not correspond to external resources, but are\ncreated on disk with `contents` with when needed for a executing a tool.\nWhere appropriate, expressions can return file literals to define new files\non a runtime. The maximum size of `contents` is 64 kilobytes.\n\nThe `basename` property defines the filename on disk where the file is\nstaged. This may differ from the resource name. If not provided,\n`basename` must be computed from the last path part of `location` and made\navailable to expressions.\n\nThe `secondaryFiles` property is a list of File or Directory objects that\nmust be staged in the same directory as the primary file. It is an error\nfor file names to be duplicated in `secondaryFiles`.\n\nThe `size` property is the size in bytes of the File. It must be computed\nfrom the resource and made available to expressions. The `checksum` field\ncontains a cryptographic hash of the file content for use it verifying file\ncontents. Implementations may, at user option, enable or disable\ncomputation of the `checksum` field for performance or other reasons.\nHowever, the ability to compute output checksums is required to pass the\nCWL conformance test suite.\n\nWhen executing a CommandLineTool, the files and secondary files may be\nstaged to an arbitrary directory, but must use the value of `basename` for\nthe filename. The `path` property must be file path in the context of the\ntool execution runtime (local to the compute node, or within the executing\ncontainer). All computed properties should be available to expressions.\nFile literals also must be staged and `path` must be set.\n\nWhen collecting CommandLineTool outputs, `glob` matching returns file paths\n(with the `path` property) and the derived properties. This can all be\nmodified by `outputEval`. Alternately, if the file `cwl.output.json` is\npresent in the output, `outputBinding` is ignored.\n\nFile objects in the output must provide either a `location` URI or a `path`\nproperty in the context of the tool execution runtime (local to the compute\nnode, or within the executing container).\n\nWhen evaluating an ExpressionTool, file objects must be referenced via\n`location` (the expression tool does not have access to files on disk so\n`path` is meaningless) or as file literals. It is legal to return a file\nobject with an existing `location` but a different `basename`. The\n`loadContents` field of ExpressionTool inputs behaves the same as on\nCommandLineTool inputs, however it is not meaningful on the outputs.\n\nAn ExpressionTool may forward file references from input to output by using\nthe same value for `location`.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'File':\n raise ValidationException(\"Not a File\")\n\n if 'location' in doc:\n try:\n self.location = load_field(doc.get('location'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'location', str).makeError(\"the `location` field is not valid because:\\n\"+str(e)))\n else:\n self.location = None\n\n if 'path' in doc:\n try:\n self.path = load_field(doc.get('path'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'path', str).makeError(\"the `path` field is not valid because:\\n\"+str(e)))\n else:\n self.path = None\n\n if 'basename' in doc:\n try:\n self.basename = load_field(doc.get('basename'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'basename', str).makeError(\"the `basename` field is not valid because:\\n\"+str(e)))\n else:\n self.basename = None\n\n if 'dirname' in doc:\n try:\n self.dirname = load_field(doc.get('dirname'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'dirname', str).makeError(\"the `dirname` field is not valid because:\\n\"+str(e)))\n else:\n self.dirname = None\n\n if 'nameroot' in doc:\n try:\n self.nameroot = load_field(doc.get('nameroot'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'nameroot', str).makeError(\"the `nameroot` field is not valid because:\\n\"+str(e)))\n else:\n self.nameroot = None\n\n if 'nameext' in doc:\n try:\n self.nameext = load_field(doc.get('nameext'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'nameext', str).makeError(\"the `nameext` field is not valid because:\\n\"+str(e)))\n else:\n self.nameext = None\n\n if 'checksum' in doc:\n try:\n self.checksum = load_field(doc.get('checksum'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'checksum', str).makeError(\"the `checksum` field is not valid because:\\n\"+str(e)))\n else:\n self.checksum = None\n\n if 'size' in doc:\n try:\n self.size = load_field(doc.get('size'), union_of_None_type_or_inttype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'size', str).makeError(\"the `size` field is not valid because:\\n\"+str(e)))\n else:\n self.size = None\n\n if 'secondaryFiles' in doc:\n try:\n self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_array_of_union_of_FileLoader_or_DirectoryLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'secondaryFiles', str).makeError(\"the `secondaryFiles` field is not valid because:\\n\"+str(e)))\n else:\n self.secondaryFiles = None\n\n if 'format' in doc:\n try:\n self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'format', str).makeError(\"the `format` field is not valid because:\\n\"+str(e)))\n else:\n self.format = None\n\n if 'contents' in doc:\n try:\n self.contents = load_field(doc.get('contents'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'contents', str).makeError(\"the `contents` field is not valid because:\\n\"+str(e)))\n else:\n self.contents = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `location`, `path`, `basename`, `dirname`, `nameroot`, `nameext`, `checksum`, `size`, `secondaryFiles`, `format`, `contents`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'File'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'File'\n\n if self.location is not None:\n r['location'] = save(self.location, top=False)\n\n if self.path is not None:\n r['path'] = save(self.path, top=False)\n\n if self.basename is not None:\n r['basename'] = save(self.basename, top=False)\n\n if self.dirname is not None:\n r['dirname'] = save(self.dirname, top=False)\n\n if self.nameroot is not None:\n r['nameroot'] = save(self.nameroot, top=False)\n\n if self.nameext is not None:\n r['nameext'] = save(self.nameext, top=False)\n\n if self.checksum is not None:\n r['checksum'] = save(self.checksum, top=False)\n\n if self.size is not None:\n r['size'] = save(self.size, top=False)\n\n if self.secondaryFiles is not None:\n r['secondaryFiles'] = save(self.secondaryFiles, top=False)\n\n if self.format is not None:\n r['format'] = save(self.format, top=False)\n\n if self.contents is not None:\n r['contents'] = save(self.contents, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'location', 'path', 'basename', 'dirname', 'nameroot', 'nameext', 'checksum', 'size', 'secondaryFiles', 'format', 'contents'])\n\n\nclass Directory(Savable):\n \"\"\"\nRepresents a directory to present to a command line tool.\n\nDirectories are represented as objects with `class` of `Directory`. Directory objects have\na number of properties that provide metadata about the directory.\n\nThe `location` property of a Directory is a URI that uniquely identifies\nthe directory. Implementations must support the file:// URI scheme and may\nsupport other schemes such as http://. Alternately to `location`,\nimplementations must also accept the `path` property on Direcotry, which\nmust be a filesystem path available on the same host as the CWL runner (for\ninputs) or the runtime environment of a command line tool execution (for\ncommand line tool outputs).\n\nA Directory object may have a `listing` field. This is a list of File and\nDirectory objects that are contained in the Directory. For each entry in\n`listing`, the `basename` property defines the name of the File or\nSubdirectory when staged to disk. If `listing` is not provided, the\nimplementation must have some way of fetching the Directory listing at\nruntime based on the `location` field.\n\nIf a Directory does not have `location`, it is a Directory literal. A\nDirectory literal must provide `listing`. Directory literals must be\ncreated on disk at runtime as needed.\n\nThe resources in a Directory literal do not need to have any implied\nrelationship in their `location`. For example, a Directory listing may\ncontain two files located on different hosts. It is the responsibility of\nthe runtime to ensure that those files are staged to disk appropriately.\nSecondary files associated with files in `listing` must also be staged to\nthe same Directory.\n\nWhen executing a CommandLineTool, Directories must be recursively staged\nfirst and have local values of `path` assigend.\n\nDirectory objects in CommandLineTool output must provide either a\n`location` URI or a `path` property in the context of the tool execution\nruntime (local to the compute node, or within the executing container).\n\nAn ExpressionTool may forward file references from input to output by using\nthe same value for `location`.\n\nName conflicts (the same `basename` appearing multiple times in `listing`\nor in any entry in `secondaryFiles` in the listing) is a fatal error.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'Directory':\n raise ValidationException(\"Not a Directory\")\n\n if 'location' in doc:\n try:\n self.location = load_field(doc.get('location'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'location', str).makeError(\"the `location` field is not valid because:\\n\"+str(e)))\n else:\n self.location = None\n\n if 'path' in doc:\n try:\n self.path = load_field(doc.get('path'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'path', str).makeError(\"the `path` field is not valid because:\\n\"+str(e)))\n else:\n self.path = None\n\n if 'basename' in doc:\n try:\n self.basename = load_field(doc.get('basename'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'basename', str).makeError(\"the `basename` field is not valid because:\\n\"+str(e)))\n else:\n self.basename = None\n\n if 'listing' in doc:\n try:\n self.listing = load_field(doc.get('listing'), union_of_None_type_or_array_of_union_of_FileLoader_or_DirectoryLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'listing', str).makeError(\"the `listing` field is not valid because:\\n\"+str(e)))\n else:\n self.listing = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `location`, `path`, `basename`, `listing`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'Directory'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'Directory'\n\n if self.location is not None:\n r['location'] = save(self.location, top=False)\n\n if self.path is not None:\n r['path'] = save(self.path, top=False)\n\n if self.basename is not None:\n r['basename'] = save(self.basename, top=False)\n\n if self.listing is not None:\n r['listing'] = save(self.listing, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'location', 'path', 'basename', 'listing'])\n\n\nclass SchemaBase(Savable):\n pass\n\nclass Parameter(SchemaBase):\n \"\"\"\nDefine an input or output parameter to a process.\n\n \"\"\"\n pass\n\nclass InputBinding(Savable):\n pass\n\nclass OutputBinding(Savable):\n pass\n\nclass InputSchema(SchemaBase):\n pass\n\nclass OutputSchema(SchemaBase):\n pass\n\nclass InputRecordField(RecordField):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n raise ValidationException(\"Missing name\")\n baseuri = self.name\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `name`, `doc`, `type`, `inputBinding`, `label`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'InputRecordField'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['name', 'doc', 'type', 'inputBinding', 'label'])\n\n\nclass InputRecordSchema(RecordSchema, InputSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n self.name = \"_:\" + str(uuid.uuid4())\n baseuri = self.name\n if 'fields' in doc:\n try:\n self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_InputRecordFieldLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'fields', str).makeError(\"the `fields` field is not valid because:\\n\"+str(e)))\n else:\n self.fields = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `fields`, `type`, `label`, `name`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'InputRecordSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.fields is not None:\n r['fields'] = save(self.fields, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['fields', 'type', 'label', 'name'])\n\n\nclass InputEnumSchema(EnumSchema, InputSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n self.name = \"_:\" + str(uuid.uuid4())\n baseuri = self.name\n try:\n self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'symbols', str).makeError(\"the `symbols` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `symbols`, `type`, `label`, `name`, `inputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'InputEnumSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.symbols is not None:\n r['symbols'] = save(self.symbols, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['symbols', 'type', 'label', 'name', 'inputBinding'])\n\n\nclass InputArraySchema(ArraySchema, InputSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'items', str).makeError(\"the `items` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `items`, `type`, `label`, `inputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'InputArraySchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.items is not None:\n r['items'] = save(self.items, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['items', 'type', 'label', 'inputBinding'])\n\n\nclass OutputRecordField(RecordField):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n raise ValidationException(\"Missing name\")\n baseuri = self.name\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `name`, `doc`, `type`, `outputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'OutputRecordField'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['name', 'doc', 'type', 'outputBinding'])\n\n\nclass OutputRecordSchema(RecordSchema, OutputSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'fields' in doc:\n try:\n self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_OutputRecordFieldLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'fields', str).makeError(\"the `fields` field is not valid because:\\n\"+str(e)))\n else:\n self.fields = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `fields`, `type`, `label`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'OutputRecordSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.fields is not None:\n r['fields'] = save(self.fields, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['fields', 'type', 'label'])\n\n\nclass OutputEnumSchema(EnumSchema, OutputSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'symbols', str).makeError(\"the `symbols` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `symbols`, `type`, `label`, `outputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'OutputEnumSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.symbols is not None:\n r['symbols'] = save(self.symbols, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['symbols', 'type', 'label', 'outputBinding'])\n\n\nclass OutputArraySchema(ArraySchema, OutputSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'items', str).makeError(\"the `items` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `items`, `type`, `label`, `outputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'OutputArraySchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.items is not None:\n r['items'] = save(self.items, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['items', 'type', 'label', 'outputBinding'])\n\n\nclass InputParameter(Parameter):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'secondaryFiles' in doc:\n try:\n self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'secondaryFiles', str).makeError(\"the `secondaryFiles` field is not valid because:\\n\"+str(e)))\n else:\n self.secondaryFiles = None\n\n if 'streamable' in doc:\n try:\n self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'streamable', str).makeError(\"the `streamable` field is not valid because:\\n\"+str(e)))\n else:\n self.streamable = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'format' in doc:\n try:\n self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'format', str).makeError(\"the `format` field is not valid because:\\n\"+str(e)))\n else:\n self.format = None\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n if 'default' in doc:\n try:\n self.default = load_field(doc.get('default'), union_of_None_type_or_Any_type, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'default', str).makeError(\"the `default` field is not valid because:\\n\"+str(e)))\n else:\n self.default = None\n\n if 'type' in doc:\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n else:\n self.type = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `format`, `inputBinding`, `default`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'InputParameter'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.secondaryFiles is not None:\n r['secondaryFiles'] = save(self.secondaryFiles, top=False)\n\n if self.streamable is not None:\n r['streamable'] = save(self.streamable, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.format is not None:\n r['format'] = save(self.format, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if self.default is not None:\n r['default'] = save(self.default, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'format', 'inputBinding', 'default', 'type'])\n\n\nclass OutputParameter(Parameter):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'secondaryFiles' in doc:\n try:\n self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'secondaryFiles', str).makeError(\"the `secondaryFiles` field is not valid because:\\n\"+str(e)))\n else:\n self.secondaryFiles = None\n\n if 'streamable' in doc:\n try:\n self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'streamable', str).makeError(\"the `streamable` field is not valid because:\\n\"+str(e)))\n else:\n self.streamable = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n if 'format' in doc:\n try:\n self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'format', str).makeError(\"the `format` field is not valid because:\\n\"+str(e)))\n else:\n self.format = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'OutputParameter'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.secondaryFiles is not None:\n r['secondaryFiles'] = save(self.secondaryFiles, top=False)\n\n if self.streamable is not None:\n r['streamable'] = save(self.streamable, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if self.format is not None:\n r['format'] = save(self.format, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format'])\n\n\nclass ProcessRequirement(Savable):\n \"\"\"\nA process requirement declares a prerequisite that may or must be fulfilled\nbefore executing a process. See [`Process.hints`](#process) and\n[`Process.requirements`](#process).\n\nProcess requirements are the primary mechanism for specifying extensions to\nthe CWL core specification.\n\n \"\"\"\n pass\n\nclass Process(Savable):\n \"\"\"\n\nThe base executable type in CWL is the `Process` object defined by the\ndocument. Note that the `Process` object is abstract and cannot be\ndirectly executed.\n\n \"\"\"\n pass\n\nclass InlineJavascriptRequirement(ProcessRequirement):\n \"\"\"\nIndicates that the workflow platform must support inline Javascript expressions.\nIf this requirement is not present, the workflow platform must not perform expression\ninterpolatation.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'InlineJavascriptRequirement':\n raise ValidationException(\"Not a InlineJavascriptRequirement\")\n\n if 'expressionLib' in doc:\n try:\n self.expressionLib = load_field(doc.get('expressionLib'), union_of_None_type_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'expressionLib', str).makeError(\"the `expressionLib` field is not valid because:\\n\"+str(e)))\n else:\n self.expressionLib = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `expressionLib`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'InlineJavascriptRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'InlineJavascriptRequirement'\n\n if self.expressionLib is not None:\n r['expressionLib'] = save(self.expressionLib, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'expressionLib'])\n\n\nclass SchemaDefRequirement(ProcessRequirement):\n \"\"\"\nThis field consists of an array of type definitions which must be used when\ninterpreting the `inputs` and `outputs` fields. When a `type` field\ncontain a IRI, the implementation must check if the type is defined in\n`schemaDefs` and use that definition. If the type is not found in\n`schemaDefs`, it is an error. The entries in `schemaDefs` must be\nprocessed in the order listed such that later schema definitions may refer\nto earlier schema definitions.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'SchemaDefRequirement':\n raise ValidationException(\"Not a SchemaDefRequirement\")\n\n try:\n self.types = load_field(doc.get('types'), array_of_union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'types', str).makeError(\"the `types` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `types`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'SchemaDefRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'SchemaDefRequirement'\n\n if self.types is not None:\n r['types'] = save(self.types, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'types'])\n\n\nclass EnvironmentDef(Savable):\n \"\"\"\nDefine an environment variable that will be set in the runtime environment\nby the workflow platform when executing the command line tool. May be the\nresult of executing an expression, such as getting a parameter from input.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.envName = load_field(doc.get('envName'), strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'envName', str).makeError(\"the `envName` field is not valid because:\\n\"+str(e)))\n\n try:\n self.envValue = load_field(doc.get('envValue'), union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'envValue', str).makeError(\"the `envValue` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `envName`, `envValue`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'EnvironmentDef'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.envName is not None:\n r['envName'] = save(self.envName, top=False)\n\n if self.envValue is not None:\n r['envValue'] = save(self.envValue, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['envName', 'envValue'])\n\n\nclass CommandLineBinding(InputBinding):\n \"\"\"\n\nWhen listed under `inputBinding` in the input schema, the term\n\"value\" refers to the the corresponding value in the input object. For\nbinding objects listed in `CommandLineTool.arguments`, the term \"value\"\nrefers to the effective value after evaluating `valueFrom`.\n\nThe binding behavior when building the command line depends on the data\ntype of the value. If there is a mismatch between the type described by\nthe input schema and the effective value, such as resulting from an\nexpression evaluation, an implementation must use the data type of the\neffective value.\n\n - **string**: Add `prefix` and the string to the command line.\n\n - **number**: Add `prefix` and decimal representation to command line.\n\n - **boolean**: If true, add `prefix` to the command line. If false, add\n nothing.\n\n - **File**: Add `prefix` and the value of\n [`File.path`](#File) to the command line.\n\n - **array**: If `itemSeparator` is specified, add `prefix` and the join\n the array into a single string with `itemSeparator` separating the\n items. Otherwise first add `prefix`, then recursively process\n individual elements.\n\n - **object**: Add `prefix` only, and recursively add object fields for\n which `inputBinding` is specified.\n\n - **null**: Add nothing.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'loadContents' in doc:\n try:\n self.loadContents = load_field(doc.get('loadContents'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'loadContents', str).makeError(\"the `loadContents` field is not valid because:\\n\"+str(e)))\n else:\n self.loadContents = None\n\n if 'position' in doc:\n try:\n self.position = load_field(doc.get('position'), union_of_None_type_or_inttype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'position', str).makeError(\"the `position` field is not valid because:\\n\"+str(e)))\n else:\n self.position = None\n\n if 'prefix' in doc:\n try:\n self.prefix = load_field(doc.get('prefix'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'prefix', str).makeError(\"the `prefix` field is not valid because:\\n\"+str(e)))\n else:\n self.prefix = None\n\n if 'separate' in doc:\n try:\n self.separate = load_field(doc.get('separate'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'separate', str).makeError(\"the `separate` field is not valid because:\\n\"+str(e)))\n else:\n self.separate = None\n\n if 'itemSeparator' in doc:\n try:\n self.itemSeparator = load_field(doc.get('itemSeparator'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'itemSeparator', str).makeError(\"the `itemSeparator` field is not valid because:\\n\"+str(e)))\n else:\n self.itemSeparator = None\n\n if 'valueFrom' in doc:\n try:\n self.valueFrom = load_field(doc.get('valueFrom'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'valueFrom', str).makeError(\"the `valueFrom` field is not valid because:\\n\"+str(e)))\n else:\n self.valueFrom = None\n\n if 'shellQuote' in doc:\n try:\n self.shellQuote = load_field(doc.get('shellQuote'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'shellQuote', str).makeError(\"the `shellQuote` field is not valid because:\\n\"+str(e)))\n else:\n self.shellQuote = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `loadContents`, `position`, `prefix`, `separate`, `itemSeparator`, `valueFrom`, `shellQuote`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandLineBinding'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.loadContents is not None:\n r['loadContents'] = save(self.loadContents, top=False)\n\n if self.position is not None:\n r['position'] = save(self.position, top=False)\n\n if self.prefix is not None:\n r['prefix'] = save(self.prefix, top=False)\n\n if self.separate is not None:\n r['separate'] = save(self.separate, top=False)\n\n if self.itemSeparator is not None:\n r['itemSeparator'] = save(self.itemSeparator, top=False)\n\n if self.valueFrom is not None:\n r['valueFrom'] = save(self.valueFrom, top=False)\n\n if self.shellQuote is not None:\n r['shellQuote'] = save(self.shellQuote, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['loadContents', 'position', 'prefix', 'separate', 'itemSeparator', 'valueFrom', 'shellQuote'])\n\n\nclass CommandOutputBinding(OutputBinding):\n \"\"\"\nDescribes how to generate an output parameter based on the files produced\nby a CommandLineTool.\n\nThe output parameter value is generated by applying these operations in the\nfollowing order:\n\n - glob\n - loadContents\n - outputEval\n - secondaryFiles\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'glob' in doc:\n try:\n self.glob = load_field(doc.get('glob'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'glob', str).makeError(\"the `glob` field is not valid because:\\n\"+str(e)))\n else:\n self.glob = None\n\n if 'loadContents' in doc:\n try:\n self.loadContents = load_field(doc.get('loadContents'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'loadContents', str).makeError(\"the `loadContents` field is not valid because:\\n\"+str(e)))\n else:\n self.loadContents = None\n\n if 'outputEval' in doc:\n try:\n self.outputEval = load_field(doc.get('outputEval'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputEval', str).makeError(\"the `outputEval` field is not valid because:\\n\"+str(e)))\n else:\n self.outputEval = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `glob`, `loadContents`, `outputEval`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandOutputBinding'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.glob is not None:\n r['glob'] = save(self.glob, top=False)\n\n if self.loadContents is not None:\n r['loadContents'] = save(self.loadContents, top=False)\n\n if self.outputEval is not None:\n r['outputEval'] = save(self.outputEval, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['glob', 'loadContents', 'outputEval'])\n\n\nclass CommandInputRecordField(InputRecordField):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n raise ValidationException(\"Missing name\")\n baseuri = self.name\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `name`, `doc`, `type`, `inputBinding`, `label`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandInputRecordField'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['name', 'doc', 'type', 'inputBinding', 'label'])\n\n\nclass CommandInputRecordSchema(InputRecordSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n self.name = \"_:\" + str(uuid.uuid4())\n baseuri = self.name\n if 'fields' in doc:\n try:\n self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_CommandInputRecordFieldLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'fields', str).makeError(\"the `fields` field is not valid because:\\n\"+str(e)))\n else:\n self.fields = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `fields`, `type`, `label`, `name`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandInputRecordSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.fields is not None:\n r['fields'] = save(self.fields, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['fields', 'type', 'label', 'name'])\n\n\nclass CommandInputEnumSchema(InputEnumSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n self.name = \"_:\" + str(uuid.uuid4())\n baseuri = self.name\n try:\n self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'symbols', str).makeError(\"the `symbols` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `symbols`, `type`, `label`, `name`, `inputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandInputEnumSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.symbols is not None:\n r['symbols'] = save(self.symbols, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['symbols', 'type', 'label', 'name', 'inputBinding'])\n\n\nclass CommandInputArraySchema(InputArraySchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'items', str).makeError(\"the `items` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `items`, `type`, `label`, `inputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandInputArraySchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.items is not None:\n r['items'] = save(self.items, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['items', 'type', 'label', 'inputBinding'])\n\n\nclass CommandOutputRecordField(OutputRecordField):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n raise ValidationException(\"Missing name\")\n baseuri = self.name\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `name`, `doc`, `type`, `outputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandOutputRecordField'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['name', 'doc', 'type', 'outputBinding'])\n\n\nclass CommandOutputRecordSchema(OutputRecordSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'name' in doc:\n try:\n self.name = load_field(doc.get('name'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'name', str).makeError(\"the `name` field is not valid because:\\n\"+str(e)))\n else:\n self.name = None\n\n\n if self.name is None:\n if docRoot is not None:\n self.name = docRoot\n else:\n self.name = \"_:\" + str(uuid.uuid4())\n baseuri = self.name\n if 'fields' in doc:\n try:\n self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_CommandOutputRecordFieldLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'fields', str).makeError(\"the `fields` field is not valid because:\\n\"+str(e)))\n else:\n self.fields = None\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `fields`, `type`, `label`, `name`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandOutputRecordSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.name is not None:\n r['name'] = save(self.name, top=False)\n\n if self.fields is not None:\n r['fields'] = save(self.fields, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['fields', 'type', 'label', 'name'])\n\n\nclass CommandOutputEnumSchema(OutputEnumSchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'symbols', str).makeError(\"the `symbols` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `symbols`, `type`, `label`, `outputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandOutputEnumSchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.symbols is not None:\n r['symbols'] = save(self.symbols, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['symbols', 'type', 'label', 'outputBinding'])\n\n\nclass CommandOutputArraySchema(OutputArraySchema):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.items = load_field(doc.get('items'), uri_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'items', str).makeError(\"the `items` field is not valid because:\\n\"+str(e)))\n\n try:\n self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `items`, `type`, `label`, `outputBinding`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandOutputArraySchema'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.items is not None:\n r['items'] = save(self.items, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['items', 'type', 'label', 'outputBinding'])\n\n\nclass CommandInputParameter(InputParameter):\n \"\"\"\nAn input parameter for a CommandLineTool.\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'secondaryFiles' in doc:\n try:\n self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'secondaryFiles', str).makeError(\"the `secondaryFiles` field is not valid because:\\n\"+str(e)))\n else:\n self.secondaryFiles = None\n\n if 'streamable' in doc:\n try:\n self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'streamable', str).makeError(\"the `streamable` field is not valid because:\\n\"+str(e)))\n else:\n self.streamable = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'format' in doc:\n try:\n self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'format', str).makeError(\"the `format` field is not valid because:\\n\"+str(e)))\n else:\n self.format = None\n\n if 'inputBinding' in doc:\n try:\n self.inputBinding = load_field(doc.get('inputBinding'), union_of_None_type_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputBinding', str).makeError(\"the `inputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.inputBinding = None\n\n if 'default' in doc:\n try:\n self.default = load_field(doc.get('default'), union_of_None_type_or_Any_type, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'default', str).makeError(\"the `default` field is not valid because:\\n\"+str(e)))\n else:\n self.default = None\n\n if 'type' in doc:\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n else:\n self.type = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `format`, `inputBinding`, `default`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandInputParameter'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.secondaryFiles is not None:\n r['secondaryFiles'] = save(self.secondaryFiles, top=False)\n\n if self.streamable is not None:\n r['streamable'] = save(self.streamable, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.format is not None:\n r['format'] = save(self.format, top=False)\n\n if self.inputBinding is not None:\n r['inputBinding'] = save(self.inputBinding, top=False)\n\n if self.default is not None:\n r['default'] = save(self.default, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'format', 'inputBinding', 'default', 'type'])\n\n\nclass CommandOutputParameter(OutputParameter):\n \"\"\"\nAn output parameter for a CommandLineTool.\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'secondaryFiles' in doc:\n try:\n self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'secondaryFiles', str).makeError(\"the `secondaryFiles` field is not valid because:\\n\"+str(e)))\n else:\n self.secondaryFiles = None\n\n if 'streamable' in doc:\n try:\n self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'streamable', str).makeError(\"the `streamable` field is not valid because:\\n\"+str(e)))\n else:\n self.streamable = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n if 'format' in doc:\n try:\n self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'format', str).makeError(\"the `format` field is not valid because:\\n\"+str(e)))\n else:\n self.format = None\n\n if 'type' in doc:\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n else:\n self.type = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandOutputParameter'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.secondaryFiles is not None:\n r['secondaryFiles'] = save(self.secondaryFiles, top=False)\n\n if self.streamable is not None:\n r['streamable'] = save(self.streamable, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if self.format is not None:\n r['format'] = save(self.format, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format', 'type'])\n\n\nclass CommandLineTool(Process):\n \"\"\"\nThis defines the schema of the CWL Command Line Tool Description document.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'CommandLineTool':\n raise ValidationException(\"Not a CommandLineTool\")\n\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n self.id = \"_:\" + str(uuid.uuid4())\n baseuri = self.id\n try:\n self.inputs = load_field(doc.get('inputs'), idmap_inputs_array_of_CommandInputParameterLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputs', str).makeError(\"the `inputs` field is not valid because:\\n\"+str(e)))\n\n try:\n self.outputs = load_field(doc.get('outputs'), idmap_outputs_array_of_CommandOutputParameterLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputs', str).makeError(\"the `outputs` field is not valid because:\\n\"+str(e)))\n\n if 'requirements' in doc:\n try:\n self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'requirements', str).makeError(\"the `requirements` field is not valid because:\\n\"+str(e)))\n else:\n self.requirements = None\n\n if 'hints' in doc:\n try:\n self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'hints', str).makeError(\"the `hints` field is not valid because:\\n\"+str(e)))\n else:\n self.hints = None\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'cwlVersion' in doc:\n try:\n self.cwlVersion = load_field(doc.get('cwlVersion'), uri_union_of_None_type_or_CWLVersionLoader_False_True_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'cwlVersion', str).makeError(\"the `cwlVersion` field is not valid because:\\n\"+str(e)))\n else:\n self.cwlVersion = None\n\n if 'baseCommand' in doc:\n try:\n self.baseCommand = load_field(doc.get('baseCommand'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'baseCommand', str).makeError(\"the `baseCommand` field is not valid because:\\n\"+str(e)))\n else:\n self.baseCommand = None\n\n if 'arguments' in doc:\n try:\n self.arguments = load_field(doc.get('arguments'), union_of_None_type_or_array_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'arguments', str).makeError(\"the `arguments` field is not valid because:\\n\"+str(e)))\n else:\n self.arguments = None\n\n if 'stdin' in doc:\n try:\n self.stdin = load_field(doc.get('stdin'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'stdin', str).makeError(\"the `stdin` field is not valid because:\\n\"+str(e)))\n else:\n self.stdin = None\n\n if 'stderr' in doc:\n try:\n self.stderr = load_field(doc.get('stderr'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'stderr', str).makeError(\"the `stderr` field is not valid because:\\n\"+str(e)))\n else:\n self.stderr = None\n\n if 'stdout' in doc:\n try:\n self.stdout = load_field(doc.get('stdout'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'stdout', str).makeError(\"the `stdout` field is not valid because:\\n\"+str(e)))\n else:\n self.stdout = None\n\n if 'successCodes' in doc:\n try:\n self.successCodes = load_field(doc.get('successCodes'), union_of_None_type_or_array_of_inttype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'successCodes', str).makeError(\"the `successCodes` field is not valid because:\\n\"+str(e)))\n else:\n self.successCodes = None\n\n if 'temporaryFailCodes' in doc:\n try:\n self.temporaryFailCodes = load_field(doc.get('temporaryFailCodes'), union_of_None_type_or_array_of_inttype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'temporaryFailCodes', str).makeError(\"the `temporaryFailCodes` field is not valid because:\\n\"+str(e)))\n else:\n self.temporaryFailCodes = None\n\n if 'permanentFailCodes' in doc:\n try:\n self.permanentFailCodes = load_field(doc.get('permanentFailCodes'), union_of_None_type_or_array_of_inttype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'permanentFailCodes', str).makeError(\"the `permanentFailCodes` field is not valid because:\\n\"+str(e)))\n else:\n self.permanentFailCodes = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `id`, `inputs`, `outputs`, `requirements`, `hints`, `label`, `doc`, `cwlVersion`, `class`, `baseCommand`, `arguments`, `stdin`, `stderr`, `stdout`, `successCodes`, `temporaryFailCodes`, `permanentFailCodes`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'CommandLineTool'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'CommandLineTool'\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.inputs is not None:\n r['inputs'] = save(self.inputs, top=False)\n\n if self.outputs is not None:\n r['outputs'] = save(self.outputs, top=False)\n\n if self.requirements is not None:\n r['requirements'] = save(self.requirements, top=False)\n\n if self.hints is not None:\n r['hints'] = save(self.hints, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.cwlVersion is not None:\n r['cwlVersion'] = save(self.cwlVersion, top=False)\n\n if self.baseCommand is not None:\n r['baseCommand'] = save(self.baseCommand, top=False)\n\n if self.arguments is not None:\n r['arguments'] = save(self.arguments, top=False)\n\n if self.stdin is not None:\n r['stdin'] = save(self.stdin, top=False)\n\n if self.stderr is not None:\n r['stderr'] = save(self.stderr, top=False)\n\n if self.stdout is not None:\n r['stdout'] = save(self.stdout, top=False)\n\n if self.successCodes is not None:\n r['successCodes'] = save(self.successCodes, top=False)\n\n if self.temporaryFailCodes is not None:\n r['temporaryFailCodes'] = save(self.temporaryFailCodes, top=False)\n\n if self.permanentFailCodes is not None:\n r['permanentFailCodes'] = save(self.permanentFailCodes, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['id', 'inputs', 'outputs', 'requirements', 'hints', 'label', 'doc', 'cwlVersion', 'class', 'baseCommand', 'arguments', 'stdin', 'stderr', 'stdout', 'successCodes', 'temporaryFailCodes', 'permanentFailCodes'])\n\n\nclass DockerRequirement(ProcessRequirement):\n \"\"\"\nIndicates that a workflow component should be run in a\n[Docker](http://docker.com) container, and specifies how to fetch or build\nthe image.\n\nIf a CommandLineTool lists `DockerRequirement` under\n`hints` (or `requirements`), it may (or must) be run in the specified Docker\ncontainer.\n\nThe platform must first acquire or install the correct Docker image as\nspecified by `dockerPull`, `dockerImport`, `dockerLoad` or `dockerFile`.\n\nThe platform must execute the tool in the container using `docker run` with\nthe appropriate Docker image and tool command line.\n\nThe workflow platform may provide input files and the designated output\ndirectory through the use of volume bind mounts. The platform may rewrite\nfile paths in the input object to correspond to the Docker bind mounted\nlocations.\n\nWhen running a tool contained in Docker, the workflow platform must not\nassume anything about the contents of the Docker container, such as the\npresence or absence of specific software, except to assume that the\ngenerated command line represents a valid command within the runtime\nenvironment of the container.\n\n## Interaction with other requirements\n\nIf [EnvVarRequirement](#EnvVarRequirement) is specified alongside a\nDockerRequirement, the environment variables must be provided to Docker\nusing `--env` or `--env-file` and interact with the container's preexisting\nenvironment as defined by Docker.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'DockerRequirement':\n raise ValidationException(\"Not a DockerRequirement\")\n\n if 'dockerPull' in doc:\n try:\n self.dockerPull = load_field(doc.get('dockerPull'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'dockerPull', str).makeError(\"the `dockerPull` field is not valid because:\\n\"+str(e)))\n else:\n self.dockerPull = None\n\n if 'dockerLoad' in doc:\n try:\n self.dockerLoad = load_field(doc.get('dockerLoad'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'dockerLoad', str).makeError(\"the `dockerLoad` field is not valid because:\\n\"+str(e)))\n else:\n self.dockerLoad = None\n\n if 'dockerFile' in doc:\n try:\n self.dockerFile = load_field(doc.get('dockerFile'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'dockerFile', str).makeError(\"the `dockerFile` field is not valid because:\\n\"+str(e)))\n else:\n self.dockerFile = None\n\n if 'dockerImport' in doc:\n try:\n self.dockerImport = load_field(doc.get('dockerImport'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'dockerImport', str).makeError(\"the `dockerImport` field is not valid because:\\n\"+str(e)))\n else:\n self.dockerImport = None\n\n if 'dockerImageId' in doc:\n try:\n self.dockerImageId = load_field(doc.get('dockerImageId'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'dockerImageId', str).makeError(\"the `dockerImageId` field is not valid because:\\n\"+str(e)))\n else:\n self.dockerImageId = None\n\n if 'dockerOutputDirectory' in doc:\n try:\n self.dockerOutputDirectory = load_field(doc.get('dockerOutputDirectory'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'dockerOutputDirectory', str).makeError(\"the `dockerOutputDirectory` field is not valid because:\\n\"+str(e)))\n else:\n self.dockerOutputDirectory = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `dockerPull`, `dockerLoad`, `dockerFile`, `dockerImport`, `dockerImageId`, `dockerOutputDirectory`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'DockerRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'DockerRequirement'\n\n if self.dockerPull is not None:\n r['dockerPull'] = save(self.dockerPull, top=False)\n\n if self.dockerLoad is not None:\n r['dockerLoad'] = save(self.dockerLoad, top=False)\n\n if self.dockerFile is not None:\n r['dockerFile'] = save(self.dockerFile, top=False)\n\n if self.dockerImport is not None:\n r['dockerImport'] = save(self.dockerImport, top=False)\n\n if self.dockerImageId is not None:\n r['dockerImageId'] = save(self.dockerImageId, top=False)\n\n if self.dockerOutputDirectory is not None:\n r['dockerOutputDirectory'] = save(self.dockerOutputDirectory, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'dockerPull', 'dockerLoad', 'dockerFile', 'dockerImport', 'dockerImageId', 'dockerOutputDirectory'])\n\n\nclass SoftwareRequirement(ProcessRequirement):\n \"\"\"\nA list of software packages that should be configured in the environment of\nthe defined process.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'SoftwareRequirement':\n raise ValidationException(\"Not a SoftwareRequirement\")\n\n try:\n self.packages = load_field(doc.get('packages'), idmap_packages_array_of_SoftwarePackageLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'packages', str).makeError(\"the `packages` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `packages`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'SoftwareRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'SoftwareRequirement'\n\n if self.packages is not None:\n r['packages'] = save(self.packages, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'packages'])\n\n\nclass SoftwarePackage(Savable):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n try:\n self.package = load_field(doc.get('package'), strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'package', str).makeError(\"the `package` field is not valid because:\\n\"+str(e)))\n\n if 'version' in doc:\n try:\n self.version = load_field(doc.get('version'), union_of_None_type_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'version', str).makeError(\"the `version` field is not valid because:\\n\"+str(e)))\n else:\n self.version = None\n\n if 'specs' in doc:\n try:\n self.specs = load_field(doc.get('specs'), union_of_None_type_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'specs', str).makeError(\"the `specs` field is not valid because:\\n\"+str(e)))\n else:\n self.specs = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `package`, `version`, `specs`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'SoftwarePackage'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.package is not None:\n r['package'] = save(self.package, top=False)\n\n if self.version is not None:\n r['version'] = save(self.version, top=False)\n\n if self.specs is not None:\n r['specs'] = save(self.specs, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['package', 'version', 'specs'])\n\n\nclass Dirent(Savable):\n \"\"\"\nDefine a file or subdirectory that must be placed in the designated output\ndirectory prior to executing the command line tool. May be the result of\nexecuting an expression, such as building a configuration file from a\ntemplate.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'entryname' in doc:\n try:\n self.entryname = load_field(doc.get('entryname'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'entryname', str).makeError(\"the `entryname` field is not valid because:\\n\"+str(e)))\n else:\n self.entryname = None\n\n try:\n self.entry = load_field(doc.get('entry'), union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'entry', str).makeError(\"the `entry` field is not valid because:\\n\"+str(e)))\n\n if 'writable' in doc:\n try:\n self.writable = load_field(doc.get('writable'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'writable', str).makeError(\"the `writable` field is not valid because:\\n\"+str(e)))\n else:\n self.writable = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `entryname`, `entry`, `writable`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'Dirent'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.entryname is not None:\n r['entryname'] = save(self.entryname, top=False)\n\n if self.entry is not None:\n r['entry'] = save(self.entry, top=False)\n\n if self.writable is not None:\n r['writable'] = save(self.writable, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['entryname', 'entry', 'writable'])\n\n\nclass InitialWorkDirRequirement(ProcessRequirement):\n \"\"\"\nDefine a list of files and subdirectories that must be created by the workflow platform in the designated output directory prior to executing the command line tool.\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'InitialWorkDirRequirement':\n raise ValidationException(\"Not a InitialWorkDirRequirement\")\n\n try:\n self.listing = load_field(doc.get('listing'), union_of_array_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'listing', str).makeError(\"the `listing` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `listing`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'InitialWorkDirRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'InitialWorkDirRequirement'\n\n if self.listing is not None:\n r['listing'] = save(self.listing, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'listing'])\n\n\nclass EnvVarRequirement(ProcessRequirement):\n \"\"\"\nDefine a list of environment variables which will be set in the\nexecution environment of the tool. See `EnvironmentDef` for details.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'EnvVarRequirement':\n raise ValidationException(\"Not a EnvVarRequirement\")\n\n try:\n self.envDef = load_field(doc.get('envDef'), idmap_envDef_array_of_EnvironmentDefLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'envDef', str).makeError(\"the `envDef` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `envDef`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'EnvVarRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'EnvVarRequirement'\n\n if self.envDef is not None:\n r['envDef'] = save(self.envDef, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'envDef'])\n\n\nclass ShellCommandRequirement(ProcessRequirement):\n \"\"\"\nModify the behavior of CommandLineTool to generate a single string\ncontaining a shell command line. Each item in the argument list must be\njoined into a string separated by single spaces and quoted to prevent\nintepretation by the shell, unless `CommandLineBinding` for that argument\ncontains `shellQuote: false`. If `shellQuote: false` is specified, the\nargument is joined into the command string without quoting, which allows\nthe use of shell metacharacters such as `|` for pipes.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'ShellCommandRequirement':\n raise ValidationException(\"Not a ShellCommandRequirement\")\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'ShellCommandRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'ShellCommandRequirement'\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class'])\n\n\nclass ResourceRequirement(ProcessRequirement):\n \"\"\"\nSpecify basic hardware resource requirements.\n\n\"min\" is the minimum amount of a resource that must be reserved to schedule\na job. If \"min\" cannot be satisfied, the job should not be run.\n\n\"max\" is the maximum amount of a resource that the job shall be permitted\nto use. If a node has sufficient resources, multiple jobs may be scheduled\non a single node provided each job's \"max\" resource requirements are\nmet. If a job attempts to exceed its \"max\" resource allocation, an\nimplementation may deny additional resources, which may result in job\nfailure.\n\nIf \"min\" is specified but \"max\" is not, then \"max\" == \"min\"\nIf \"max\" is specified by \"min\" is not, then \"min\" == \"max\".\n\nIt is an error if max < min.\n\nIt is an error if the value of any of these fields is negative.\n\nIf neither \"min\" nor \"max\" is specified for a resource, an implementation may provide a default.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'ResourceRequirement':\n raise ValidationException(\"Not a ResourceRequirement\")\n\n if 'coresMin' in doc:\n try:\n self.coresMin = load_field(doc.get('coresMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'coresMin', str).makeError(\"the `coresMin` field is not valid because:\\n\"+str(e)))\n else:\n self.coresMin = None\n\n if 'coresMax' in doc:\n try:\n self.coresMax = load_field(doc.get('coresMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'coresMax', str).makeError(\"the `coresMax` field is not valid because:\\n\"+str(e)))\n else:\n self.coresMax = None\n\n if 'ramMin' in doc:\n try:\n self.ramMin = load_field(doc.get('ramMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'ramMin', str).makeError(\"the `ramMin` field is not valid because:\\n\"+str(e)))\n else:\n self.ramMin = None\n\n if 'ramMax' in doc:\n try:\n self.ramMax = load_field(doc.get('ramMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'ramMax', str).makeError(\"the `ramMax` field is not valid because:\\n\"+str(e)))\n else:\n self.ramMax = None\n\n if 'tmpdirMin' in doc:\n try:\n self.tmpdirMin = load_field(doc.get('tmpdirMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'tmpdirMin', str).makeError(\"the `tmpdirMin` field is not valid because:\\n\"+str(e)))\n else:\n self.tmpdirMin = None\n\n if 'tmpdirMax' in doc:\n try:\n self.tmpdirMax = load_field(doc.get('tmpdirMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'tmpdirMax', str).makeError(\"the `tmpdirMax` field is not valid because:\\n\"+str(e)))\n else:\n self.tmpdirMax = None\n\n if 'outdirMin' in doc:\n try:\n self.outdirMin = load_field(doc.get('outdirMin'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outdirMin', str).makeError(\"the `outdirMin` field is not valid because:\\n\"+str(e)))\n else:\n self.outdirMin = None\n\n if 'outdirMax' in doc:\n try:\n self.outdirMax = load_field(doc.get('outdirMax'), union_of_None_type_or_inttype_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outdirMax', str).makeError(\"the `outdirMax` field is not valid because:\\n\"+str(e)))\n else:\n self.outdirMax = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`, `coresMin`, `coresMax`, `ramMin`, `ramMax`, `tmpdirMin`, `tmpdirMax`, `outdirMin`, `outdirMax`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'ResourceRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'ResourceRequirement'\n\n if self.coresMin is not None:\n r['coresMin'] = save(self.coresMin, top=False)\n\n if self.coresMax is not None:\n r['coresMax'] = save(self.coresMax, top=False)\n\n if self.ramMin is not None:\n r['ramMin'] = save(self.ramMin, top=False)\n\n if self.ramMax is not None:\n r['ramMax'] = save(self.ramMax, top=False)\n\n if self.tmpdirMin is not None:\n r['tmpdirMin'] = save(self.tmpdirMin, top=False)\n\n if self.tmpdirMax is not None:\n r['tmpdirMax'] = save(self.tmpdirMax, top=False)\n\n if self.outdirMin is not None:\n r['outdirMin'] = save(self.outdirMin, top=False)\n\n if self.outdirMax is not None:\n r['outdirMax'] = save(self.outdirMax, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class', 'coresMin', 'coresMax', 'ramMin', 'ramMax', 'tmpdirMin', 'tmpdirMax', 'outdirMin', 'outdirMax'])\n\n\nclass ExpressionToolOutputParameter(OutputParameter):\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'secondaryFiles' in doc:\n try:\n self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'secondaryFiles', str).makeError(\"the `secondaryFiles` field is not valid because:\\n\"+str(e)))\n else:\n self.secondaryFiles = None\n\n if 'streamable' in doc:\n try:\n self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'streamable', str).makeError(\"the `streamable` field is not valid because:\\n\"+str(e)))\n else:\n self.streamable = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n if 'format' in doc:\n try:\n self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'format', str).makeError(\"the `format` field is not valid because:\\n\"+str(e)))\n else:\n self.format = None\n\n if 'type' in doc:\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n else:\n self.type = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'ExpressionToolOutputParameter'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.secondaryFiles is not None:\n r['secondaryFiles'] = save(self.secondaryFiles, top=False)\n\n if self.streamable is not None:\n r['streamable'] = save(self.streamable, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if self.format is not None:\n r['format'] = save(self.format, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format', 'type'])\n\n\nclass ExpressionTool(Process):\n \"\"\"\nExecute an expression as a Workflow step.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'ExpressionTool':\n raise ValidationException(\"Not a ExpressionTool\")\n\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n self.id = \"_:\" + str(uuid.uuid4())\n baseuri = self.id\n try:\n self.inputs = load_field(doc.get('inputs'), idmap_inputs_array_of_InputParameterLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputs', str).makeError(\"the `inputs` field is not valid because:\\n\"+str(e)))\n\n try:\n self.outputs = load_field(doc.get('outputs'), idmap_outputs_array_of_ExpressionToolOutputParameterLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputs', str).makeError(\"the `outputs` field is not valid because:\\n\"+str(e)))\n\n if 'requirements' in doc:\n try:\n self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'requirements', str).makeError(\"the `requirements` field is not valid because:\\n\"+str(e)))\n else:\n self.requirements = None\n\n if 'hints' in doc:\n try:\n self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'hints', str).makeError(\"the `hints` field is not valid because:\\n\"+str(e)))\n else:\n self.hints = None\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'cwlVersion' in doc:\n try:\n self.cwlVersion = load_field(doc.get('cwlVersion'), uri_union_of_None_type_or_CWLVersionLoader_False_True_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'cwlVersion', str).makeError(\"the `cwlVersion` field is not valid because:\\n\"+str(e)))\n else:\n self.cwlVersion = None\n\n try:\n self.expression = load_field(doc.get('expression'), union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'expression', str).makeError(\"the `expression` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `id`, `inputs`, `outputs`, `requirements`, `hints`, `label`, `doc`, `cwlVersion`, `class`, `expression`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'ExpressionTool'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'ExpressionTool'\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.inputs is not None:\n r['inputs'] = save(self.inputs, top=False)\n\n if self.outputs is not None:\n r['outputs'] = save(self.outputs, top=False)\n\n if self.requirements is not None:\n r['requirements'] = save(self.requirements, top=False)\n\n if self.hints is not None:\n r['hints'] = save(self.hints, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.cwlVersion is not None:\n r['cwlVersion'] = save(self.cwlVersion, top=False)\n\n if self.expression is not None:\n r['expression'] = save(self.expression, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['id', 'inputs', 'outputs', 'requirements', 'hints', 'label', 'doc', 'cwlVersion', 'class', 'expression'])\n\n\nclass WorkflowOutputParameter(OutputParameter):\n \"\"\"\nDescribe an output parameter of a workflow. The parameter must be\nconnected to one or more parameters defined in the workflow that will\nprovide the value of the output parameter.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'secondaryFiles' in doc:\n try:\n self.secondaryFiles = load_field(doc.get('secondaryFiles'), union_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'secondaryFiles', str).makeError(\"the `secondaryFiles` field is not valid because:\\n\"+str(e)))\n else:\n self.secondaryFiles = None\n\n if 'streamable' in doc:\n try:\n self.streamable = load_field(doc.get('streamable'), union_of_None_type_or_booltype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'streamable', str).makeError(\"the `streamable` field is not valid because:\\n\"+str(e)))\n else:\n self.streamable = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'outputBinding' in doc:\n try:\n self.outputBinding = load_field(doc.get('outputBinding'), union_of_None_type_or_CommandOutputBindingLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputBinding', str).makeError(\"the `outputBinding` field is not valid because:\\n\"+str(e)))\n else:\n self.outputBinding = None\n\n if 'format' in doc:\n try:\n self.format = load_field(doc.get('format'), uri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'format', str).makeError(\"the `format` field is not valid because:\\n\"+str(e)))\n else:\n self.format = None\n\n if 'outputSource' in doc:\n try:\n self.outputSource = load_field(doc.get('outputSource'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_0, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputSource', str).makeError(\"the `outputSource` field is not valid because:\\n\"+str(e)))\n else:\n self.outputSource = None\n\n if 'linkMerge' in doc:\n try:\n self.linkMerge = load_field(doc.get('linkMerge'), union_of_None_type_or_LinkMergeMethodLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'linkMerge', str).makeError(\"the `linkMerge` field is not valid because:\\n\"+str(e)))\n else:\n self.linkMerge = None\n\n if 'type' in doc:\n try:\n self.type = load_field(doc.get('type'), typedsl_union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'type', str).makeError(\"the `type` field is not valid because:\\n\"+str(e)))\n else:\n self.type = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `label`, `secondaryFiles`, `streamable`, `doc`, `id`, `outputBinding`, `format`, `outputSource`, `linkMerge`, `type`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'WorkflowOutputParameter'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.secondaryFiles is not None:\n r['secondaryFiles'] = save(self.secondaryFiles, top=False)\n\n if self.streamable is not None:\n r['streamable'] = save(self.streamable, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.outputBinding is not None:\n r['outputBinding'] = save(self.outputBinding, top=False)\n\n if self.format is not None:\n r['format'] = save(self.format, top=False)\n\n if self.outputSource is not None:\n r['outputSource'] = save(self.outputSource, top=False)\n\n if self.linkMerge is not None:\n r['linkMerge'] = save(self.linkMerge, top=False)\n\n if self.type is not None:\n r['type'] = save(self.type, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['label', 'secondaryFiles', 'streamable', 'doc', 'id', 'outputBinding', 'format', 'outputSource', 'linkMerge', 'type'])\n\n\nclass Sink(Savable):\n pass\n\nclass WorkflowStepInput(Sink):\n \"\"\"\nThe input of a workflow step connects an upstream parameter (from the\nworkflow inputs, or the outputs of other workflows steps) with the input\nparameters of the underlying step.\n\n## Input object\n\nA WorkflowStepInput object must contain an `id` field in the form\n`#fieldname` or `#prefix/fieldname`. When the `id` field contains a slash\n`/` the field name consists of the characters following the final slash\n(the prefix portion may contain one or more slashes to indicate scope).\nThis defines a field of the workflow step input object with the value of\nthe `source` parameter(s).\n\n## Merging\n\nTo merge multiple inbound data links,\n[MultipleInputFeatureRequirement](#MultipleInputFeatureRequirement) must be specified\nin the workflow or workflow step requirements.\n\nIf the sink parameter is an array, or named in a [workflow\nscatter](#WorkflowStep) operation, there may be multiple inbound data links\nlisted in the `source` field. The values from the input links are merged\ndepending on the method specified in the `linkMerge` field. If not\nspecified, the default method is \"merge_nested\".\n\n* **merge_nested**\n\n The input must be an array consisting of exactly one entry for each\n input link. If \"merge_nested\" is specified with a single link, the value\n from the link must be wrapped in a single-item list.\n\n* **merge_flattened**\n\n 1. The source and sink parameters must be compatible types, or the source\n type must be compatible with single element from the \"items\" type of\n the destination array parameter.\n 2. Source parameters which are arrays are concatenated.\n Source parameters which are single element types are appended as\n single elements.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n if 'source' in doc:\n try:\n self.source = load_field(doc.get('source'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_2, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'source', str).makeError(\"the `source` field is not valid because:\\n\"+str(e)))\n else:\n self.source = None\n\n if 'linkMerge' in doc:\n try:\n self.linkMerge = load_field(doc.get('linkMerge'), union_of_None_type_or_LinkMergeMethodLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'linkMerge', str).makeError(\"the `linkMerge` field is not valid because:\\n\"+str(e)))\n else:\n self.linkMerge = None\n\n if 'default' in doc:\n try:\n self.default = load_field(doc.get('default'), union_of_None_type_or_Any_type, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'default', str).makeError(\"the `default` field is not valid because:\\n\"+str(e)))\n else:\n self.default = None\n\n if 'valueFrom' in doc:\n try:\n self.valueFrom = load_field(doc.get('valueFrom'), union_of_None_type_or_strtype_or_ExpressionLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'valueFrom', str).makeError(\"the `valueFrom` field is not valid because:\\n\"+str(e)))\n else:\n self.valueFrom = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `source`, `linkMerge`, `id`, `default`, `valueFrom`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'WorkflowStepInput'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.source is not None:\n r['source'] = save(self.source, top=False)\n\n if self.linkMerge is not None:\n r['linkMerge'] = save(self.linkMerge, top=False)\n\n if self.default is not None:\n r['default'] = save(self.default, top=False)\n\n if self.valueFrom is not None:\n r['valueFrom'] = save(self.valueFrom, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['source', 'linkMerge', 'id', 'default', 'valueFrom'])\n\n\nclass WorkflowStepOutput(Savable):\n \"\"\"\nAssociate an output parameter of the underlying process with a workflow\nparameter. The workflow parameter (given in the `id` field) be may be used\nas a `source` to connect with input parameters of other workflow steps, or\nwith an output parameter of the process.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `id`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'WorkflowStepOutput'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['id'])\n\n\nclass WorkflowStep(Savable):\n \"\"\"\nA workflow step is an executable element of a workflow. It specifies the\nunderlying process implementation (such as `CommandLineTool` or another\n`Workflow`) in the `run` field and connects the input and output parameters\nof the underlying process to workflow parameters.\n\n# Scatter/gather\n\nTo use scatter/gather,\n[ScatterFeatureRequirement](#ScatterFeatureRequirement) must be specified\nin the workflow or workflow step requirements.\n\nA \"scatter\" operation specifies that the associated workflow step or\nsubworkflow should execute separately over a list of input elements. Each\njob making up a scatter operation is independent and may be executed\nconcurrently.\n\nThe `scatter` field specifies one or more input parameters which will be\nscattered. An input parameter may be listed more than once. The declared\ntype of each input parameter is implicitly becomes an array of items of the\ninput parameter type. If a parameter is listed more than once, it becomes\na nested array. As a result, upstream parameters which are connected to\nscattered parameters must be arrays.\n\nAll output parameter types are also implicitly wrapped in arrays. Each job\nin the scatter results in an entry in the output array.\n\nIf any scattered parameter runtime value is an empty array, all outputs are\nset to empty arrays and no work is done for the step, according to\napplicable scattering rules.\n\nIf `scatter` declares more than one input parameter, `scatterMethod`\ndescribes how to decompose the input into a discrete set of jobs.\n\n * **dotproduct** specifies that each of the input arrays are aligned and one\n element taken from each array to construct each job. It is an error\n if all input arrays are not the same length.\n\n * **nested_crossproduct** specifies the Cartesian product of the inputs,\n producing a job for every combination of the scattered inputs. The\n output must be nested arrays for each level of scattering, in the\n order that the input arrays are listed in the `scatter` field.\n\n * **flat_crossproduct** specifies the Cartesian product of the inputs,\n producing a job for every combination of the scattered inputs. The\n output arrays must be flattened to a single level, but otherwise listed in the\n order that the input arrays are listed in the `scatter` field.\n\n# Subworkflows\n\nTo specify a nested workflow as part of a workflow step,\n[SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) must be\nspecified in the workflow or workflow step requirements.\n\nIt is a fatal error if a workflow directly or indirectly invokes itself as\na subworkflow (recursive workflows are not allowed).\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n raise ValidationException(\"Missing id\")\n baseuri = self.id\n try:\n self.in_ = load_field(doc.get('in'), idmap_in__array_of_WorkflowStepInputLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'in', str).makeError(\"the `in` field is not valid because:\\n\"+str(e)))\n\n try:\n self.out = load_field(doc.get('out'), uri_union_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'out', str).makeError(\"the `out` field is not valid because:\\n\"+str(e)))\n\n if 'requirements' in doc:\n try:\n self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'requirements', str).makeError(\"the `requirements` field is not valid because:\\n\"+str(e)))\n else:\n self.requirements = None\n\n if 'hints' in doc:\n try:\n self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'hints', str).makeError(\"the `hints` field is not valid because:\\n\"+str(e)))\n else:\n self.hints = None\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n try:\n self.run = load_field(doc.get('run'), uri_union_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_False_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'run', str).makeError(\"the `run` field is not valid because:\\n\"+str(e)))\n\n if 'scatter' in doc:\n try:\n self.scatter = load_field(doc.get('scatter'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_0, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'scatter', str).makeError(\"the `scatter` field is not valid because:\\n\"+str(e)))\n else:\n self.scatter = None\n\n if 'scatterMethod' in doc:\n try:\n self.scatterMethod = load_field(doc.get('scatterMethod'), uri_union_of_None_type_or_ScatterMethodLoader_False_True_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'scatterMethod', str).makeError(\"the `scatterMethod` field is not valid because:\\n\"+str(e)))\n else:\n self.scatterMethod = None\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `id`, `in`, `out`, `requirements`, `hints`, `label`, `doc`, `run`, `scatter`, `scatterMethod`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'WorkflowStep'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.in_ is not None:\n r['in'] = save(self.in_, top=False)\n\n if self.out is not None:\n r['out'] = save(self.out, top=False)\n\n if self.requirements is not None:\n r['requirements'] = save(self.requirements, top=False)\n\n if self.hints is not None:\n r['hints'] = save(self.hints, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.run is not None:\n r['run'] = save(self.run, top=False)\n\n if self.scatter is not None:\n r['scatter'] = save(self.scatter, top=False)\n\n if self.scatterMethod is not None:\n r['scatterMethod'] = save(self.scatterMethod, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['id', 'in', 'out', 'requirements', 'hints', 'label', 'doc', 'run', 'scatter', 'scatterMethod'])\n\n\nclass Workflow(Process):\n \"\"\"\nA workflow describes a set of **steps** and the **dependencies** between\nthose steps. When a step produces output that will be consumed by a\nsecond step, the first step is a dependency of the second step.\n\nWhen there is a dependency, the workflow engine must execute the preceeding\nstep and wait for it to successfully produce output before executing the\ndependent step. If two steps are defined in the workflow graph that\nare not directly or indirectly dependent, these steps are **independent**,\nand may execute in any order or execute concurrently. A workflow is\ncomplete when all steps have been executed.\n\nDependencies between parameters are expressed using the `source` field on\n[workflow step input parameters](#WorkflowStepInput) and [workflow output\nparameters](#WorkflowOutputParameter).\n\nThe `source` field expresses the dependency of one parameter on another\nsuch that when a value is associated with the parameter specified by\n`source`, that value is propagated to the destination parameter. When all\ndata links inbound to a given step are fufilled, the step is ready to\nexecute.\n\n## Workflow success and failure\n\nA completed step must result in one of `success`, `temporaryFailure` or\n`permanentFailure` states. An implementation may choose to retry a step\nexecution which resulted in `temporaryFailure`. An implementation may\nchoose to either continue running other steps of a workflow, or terminate\nimmediately upon `permanentFailure`.\n\n* If any step of a workflow execution results in `permanentFailure`, then\nthe workflow status is `permanentFailure`.\n\n* If one or more steps result in `temporaryFailure` and all other steps\ncomplete `success` or are not executed, then the workflow status is\n`temporaryFailure`.\n\n* If all workflow steps are executed and complete with `success`, then the\nworkflow status is `success`.\n\n# Extensions\n\n[ScatterFeatureRequirement](#ScatterFeatureRequirement) and\n[SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) are\navailable as standard [extensions](#Extensions_and_Metadata) to core\nworkflow semantics.\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'Workflow':\n raise ValidationException(\"Not a Workflow\")\n\n if 'id' in doc:\n try:\n self.id = load_field(doc.get('id'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'id', str).makeError(\"the `id` field is not valid because:\\n\"+str(e)))\n else:\n self.id = None\n\n\n if self.id is None:\n if docRoot is not None:\n self.id = docRoot\n else:\n self.id = \"_:\" + str(uuid.uuid4())\n baseuri = self.id\n try:\n self.inputs = load_field(doc.get('inputs'), idmap_inputs_array_of_InputParameterLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'inputs', str).makeError(\"the `inputs` field is not valid because:\\n\"+str(e)))\n\n try:\n self.outputs = load_field(doc.get('outputs'), idmap_outputs_array_of_WorkflowOutputParameterLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'outputs', str).makeError(\"the `outputs` field is not valid because:\\n\"+str(e)))\n\n if 'requirements' in doc:\n try:\n self.requirements = load_field(doc.get('requirements'), idmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'requirements', str).makeError(\"the `requirements` field is not valid because:\\n\"+str(e)))\n else:\n self.requirements = None\n\n if 'hints' in doc:\n try:\n self.hints = load_field(doc.get('hints'), idmap_hints_union_of_None_type_or_array_of_Any_type, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'hints', str).makeError(\"the `hints` field is not valid because:\\n\"+str(e)))\n else:\n self.hints = None\n\n if 'label' in doc:\n try:\n self.label = load_field(doc.get('label'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'label', str).makeError(\"the `label` field is not valid because:\\n\"+str(e)))\n else:\n self.label = None\n\n if 'doc' in doc:\n try:\n self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'doc', str).makeError(\"the `doc` field is not valid because:\\n\"+str(e)))\n else:\n self.doc = None\n\n if 'cwlVersion' in doc:\n try:\n self.cwlVersion = load_field(doc.get('cwlVersion'), uri_union_of_None_type_or_CWLVersionLoader_False_True_None, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'cwlVersion', str).makeError(\"the `cwlVersion` field is not valid because:\\n\"+str(e)))\n else:\n self.cwlVersion = None\n\n try:\n self.steps = load_field(doc.get('steps'), idmap_steps_union_of_array_of_WorkflowStepLoader, baseuri, loadingOptions)\n except ValidationException as e:\n errors.append(SourceLine(doc, 'steps', str).makeError(\"the `steps` field is not valid because:\\n\"+str(e)))\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `id`, `inputs`, `outputs`, `requirements`, `hints`, `label`, `doc`, `cwlVersion`, `class`, `steps`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'Workflow'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'Workflow'\n\n if self.id is not None:\n r['id'] = save(self.id, top=False)\n\n if self.inputs is not None:\n r['inputs'] = save(self.inputs, top=False)\n\n if self.outputs is not None:\n r['outputs'] = save(self.outputs, top=False)\n\n if self.requirements is not None:\n r['requirements'] = save(self.requirements, top=False)\n\n if self.hints is not None:\n r['hints'] = save(self.hints, top=False)\n\n if self.label is not None:\n r['label'] = save(self.label, top=False)\n\n if self.doc is not None:\n r['doc'] = save(self.doc, top=False)\n\n if self.cwlVersion is not None:\n r['cwlVersion'] = save(self.cwlVersion, top=False)\n\n if self.steps is not None:\n r['steps'] = save(self.steps, top=False)\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['id', 'inputs', 'outputs', 'requirements', 'hints', 'label', 'doc', 'cwlVersion', 'class', 'steps'])\n\n\nclass SubworkflowFeatureRequirement(ProcessRequirement):\n \"\"\"\nIndicates that the workflow platform must support nested workflows in\nthe `run` field of [WorkflowStep](#WorkflowStep).\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'SubworkflowFeatureRequirement':\n raise ValidationException(\"Not a SubworkflowFeatureRequirement\")\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'SubworkflowFeatureRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'SubworkflowFeatureRequirement'\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class'])\n\n\nclass ScatterFeatureRequirement(ProcessRequirement):\n \"\"\"\nIndicates that the workflow platform must support the `scatter` and\n`scatterMethod` fields of [WorkflowStep](#WorkflowStep).\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'ScatterFeatureRequirement':\n raise ValidationException(\"Not a ScatterFeatureRequirement\")\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'ScatterFeatureRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'ScatterFeatureRequirement'\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class'])\n\n\nclass MultipleInputFeatureRequirement(ProcessRequirement):\n \"\"\"\nIndicates that the workflow platform must support multiple inbound data links\nlisted in the `source` field of [WorkflowStepInput](#WorkflowStepInput).\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'MultipleInputFeatureRequirement':\n raise ValidationException(\"Not a MultipleInputFeatureRequirement\")\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'MultipleInputFeatureRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'MultipleInputFeatureRequirement'\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class'])\n\n\nclass StepInputExpressionRequirement(ProcessRequirement):\n \"\"\"\nIndicate that the workflow platform must support the `valueFrom` field\nof [WorkflowStepInput](#WorkflowStepInput).\n\n \"\"\"\n def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):\n doc = copy.copy(_doc)\n if hasattr(_doc, 'lc'):\n doc.lc.data = _doc.lc.data\n doc.lc.filename = _doc.lc.filename\n errors = []\n self.loadingOptions = loadingOptions\n\n if doc.get('class') != 'StepInputExpressionRequirement':\n raise ValidationException(\"Not a StepInputExpressionRequirement\")\n\n\n self.extension_fields = {}\n for k in doc.keys():\n if k not in self.attrs:\n if \":\" in k:\n ex = expand_url(k, u\"\", loadingOptions, scoped_id=False, vocab_term=False)\n self.extension_fields[ex] = doc[k]\n else:\n errors.append(SourceLine(doc, k, str).makeError(\"invalid field `%s`, expected one of: `class`\" % (k)))\n break\n\n if errors:\n raise ValidationException(\"Trying 'StepInputExpressionRequirement'\\n\"+\"\\n\".join(errors))\n\n def save(self, top=False):\n r = {}\n for ef in self.extension_fields:\n r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]\n\n r['class'] = 'StepInputExpressionRequirement'\n\n if top and self.loadingOptions.namespaces:\n r[\"$namespaces\"] = self.loadingOptions.namespaces\n\n return r\n\n attrs = frozenset(['class'])\n\n\n_vocab = {\n \"Any\": \"https://w3id.org/cwl/salad#Any\",\n \"ArraySchema\": \"https://w3id.org/cwl/salad#ArraySchema\",\n \"CWLType\": \"https://w3id.org/cwl/cwl#CWLType\",\n \"CWLVersion\": \"https://w3id.org/cwl/cwl#CWLVersion\",\n \"CommandInputArraySchema\": \"https://w3id.org/cwl/cwl#CommandInputArraySchema\",\n \"CommandInputEnumSchema\": \"https://w3id.org/cwl/cwl#CommandInputEnumSchema\",\n \"CommandInputParameter\": \"https://w3id.org/cwl/cwl#CommandInputParameter\",\n \"CommandInputRecordField\": \"https://w3id.org/cwl/cwl#CommandInputRecordField\",\n \"CommandInputRecordSchema\": \"https://w3id.org/cwl/cwl#CommandInputRecordSchema\",\n \"CommandLineBinding\": \"https://w3id.org/cwl/cwl#CommandLineBinding\",\n \"CommandLineTool\": \"https://w3id.org/cwl/cwl#CommandLineTool\",\n \"CommandOutputArraySchema\": \"https://w3id.org/cwl/cwl#CommandOutputArraySchema\",\n \"CommandOutputBinding\": \"https://w3id.org/cwl/cwl#CommandOutputBinding\",\n \"CommandOutputEnumSchema\": \"https://w3id.org/cwl/cwl#CommandOutputEnumSchema\",\n \"CommandOutputParameter\": \"https://w3id.org/cwl/cwl#CommandOutputParameter\",\n \"CommandOutputRecordField\": \"https://w3id.org/cwl/cwl#CommandOutputRecordField\",\n \"CommandOutputRecordSchema\": \"https://w3id.org/cwl/cwl#CommandOutputRecordSchema\",\n \"Directory\": \"https://w3id.org/cwl/cwl#Directory\",\n \"Dirent\": \"https://w3id.org/cwl/cwl#Dirent\",\n \"DockerRequirement\": \"https://w3id.org/cwl/cwl#DockerRequirement\",\n \"EnumSchema\": \"https://w3id.org/cwl/salad#EnumSchema\",\n \"EnvVarRequirement\": \"https://w3id.org/cwl/cwl#EnvVarRequirement\",\n \"EnvironmentDef\": \"https://w3id.org/cwl/cwl#EnvironmentDef\",\n \"Expression\": \"https://w3id.org/cwl/cwl#Expression\",\n \"ExpressionPlaceholder\": \"https://w3id.org/cwl/cwl#ExpressionPlaceholder\",\n \"ExpressionTool\": \"https://w3id.org/cwl/cwl#ExpressionTool\",\n \"ExpressionToolOutputParameter\": \"https://w3id.org/cwl/cwl#ExpressionToolOutputParameter\",\n \"File\": \"https://w3id.org/cwl/cwl#File\",\n \"InitialWorkDirRequirement\": \"https://w3id.org/cwl/cwl#InitialWorkDirRequirement\",\n \"InlineJavascriptRequirement\": \"https://w3id.org/cwl/cwl#InlineJavascriptRequirement\",\n \"InputArraySchema\": \"https://w3id.org/cwl/cwl#InputArraySchema\",\n \"InputBinding\": \"https://w3id.org/cwl/cwl#InputBinding\",\n \"InputEnumSchema\": \"https://w3id.org/cwl/cwl#InputEnumSchema\",\n \"InputParameter\": \"https://w3id.org/cwl/cwl#InputParameter\",\n \"InputRecordField\": \"https://w3id.org/cwl/cwl#InputRecordField\",\n \"InputRecordSchema\": \"https://w3id.org/cwl/cwl#InputRecordSchema\",\n \"InputSchema\": \"https://w3id.org/cwl/cwl#InputSchema\",\n \"LinkMergeMethod\": \"https://w3id.org/cwl/cwl#LinkMergeMethod\",\n \"MultipleInputFeatureRequirement\": \"https://w3id.org/cwl/cwl#MultipleInputFeatureRequirement\",\n \"OutputArraySchema\": \"https://w3id.org/cwl/cwl#OutputArraySchema\",\n \"OutputBinding\": \"https://w3id.org/cwl/cwl#OutputBinding\",\n \"OutputEnumSchema\": \"https://w3id.org/cwl/cwl#OutputEnumSchema\",\n \"OutputParameter\": \"https://w3id.org/cwl/cwl#OutputParameter\",\n \"OutputRecordField\": \"https://w3id.org/cwl/cwl#OutputRecordField\",\n \"OutputRecordSchema\": \"https://w3id.org/cwl/cwl#OutputRecordSchema\",\n \"OutputSchema\": \"https://w3id.org/cwl/cwl#OutputSchema\",\n \"Parameter\": \"https://w3id.org/cwl/cwl#Parameter\",\n \"PrimitiveType\": \"https://w3id.org/cwl/salad#PrimitiveType\",\n \"Process\": \"https://w3id.org/cwl/cwl#Process\",\n \"ProcessRequirement\": \"https://w3id.org/cwl/cwl#ProcessRequirement\",\n \"RecordField\": \"https://w3id.org/cwl/salad#RecordField\",\n \"RecordSchema\": \"https://w3id.org/cwl/salad#RecordSchema\",\n \"ResourceRequirement\": \"https://w3id.org/cwl/cwl#ResourceRequirement\",\n \"ScatterFeatureRequirement\": \"https://w3id.org/cwl/cwl#ScatterFeatureRequirement\",\n \"ScatterMethod\": \"https://w3id.org/cwl/cwl#ScatterMethod\",\n \"SchemaBase\": \"https://w3id.org/cwl/cwl#SchemaBase\",\n \"SchemaDefRequirement\": \"https://w3id.org/cwl/cwl#SchemaDefRequirement\",\n \"ShellCommandRequirement\": \"https://w3id.org/cwl/cwl#ShellCommandRequirement\",\n \"Sink\": \"https://w3id.org/cwl/cwl#Sink\",\n \"SoftwarePackage\": \"https://w3id.org/cwl/cwl#SoftwarePackage\",\n \"SoftwareRequirement\": \"https://w3id.org/cwl/cwl#SoftwareRequirement\",\n \"StepInputExpressionRequirement\": \"https://w3id.org/cwl/cwl#StepInputExpressionRequirement\",\n \"SubworkflowFeatureRequirement\": \"https://w3id.org/cwl/cwl#SubworkflowFeatureRequirement\",\n \"Workflow\": \"https://w3id.org/cwl/cwl#Workflow\",\n \"WorkflowOutputParameter\": \"https://w3id.org/cwl/cwl#WorkflowOutputParameter\",\n \"WorkflowStep\": \"https://w3id.org/cwl/cwl#WorkflowStep\",\n \"WorkflowStepInput\": \"https://w3id.org/cwl/cwl#WorkflowStepInput\",\n \"WorkflowStepOutput\": \"https://w3id.org/cwl/cwl#WorkflowStepOutput\",\n \"array\": \"https://w3id.org/cwl/salad#array\",\n \"boolean\": \"http://www.w3.org/2001/XMLSchema#boolean\",\n \"dotproduct\": \"https://w3id.org/cwl/cwl#ScatterMethod/dotproduct\",\n \"double\": \"http://www.w3.org/2001/XMLSchema#double\",\n \"draft-2\": \"https://w3id.org/cwl/cwl#draft-2\",\n \"draft-3\": \"https://w3id.org/cwl/cwl#draft-3\",\n \"draft-3.dev1\": \"https://w3id.org/cwl/cwl#draft-3.dev1\",\n \"draft-3.dev2\": \"https://w3id.org/cwl/cwl#draft-3.dev2\",\n \"draft-3.dev3\": \"https://w3id.org/cwl/cwl#draft-3.dev3\",\n \"draft-3.dev4\": \"https://w3id.org/cwl/cwl#draft-3.dev4\",\n \"draft-3.dev5\": \"https://w3id.org/cwl/cwl#draft-3.dev5\",\n \"draft-4.dev1\": \"https://w3id.org/cwl/cwl#draft-4.dev1\",\n \"draft-4.dev2\": \"https://w3id.org/cwl/cwl#draft-4.dev2\",\n \"draft-4.dev3\": \"https://w3id.org/cwl/cwl#draft-4.dev3\",\n \"enum\": \"https://w3id.org/cwl/salad#enum\",\n \"flat_crossproduct\": \"https://w3id.org/cwl/cwl#ScatterMethod/flat_crossproduct\",\n \"float\": \"http://www.w3.org/2001/XMLSchema#float\",\n \"int\": \"http://www.w3.org/2001/XMLSchema#int\",\n \"long\": \"http://www.w3.org/2001/XMLSchema#long\",\n \"merge_flattened\": \"https://w3id.org/cwl/cwl#LinkMergeMethod/merge_flattened\",\n \"merge_nested\": \"https://w3id.org/cwl/cwl#LinkMergeMethod/merge_nested\",\n \"nested_crossproduct\": \"https://w3id.org/cwl/cwl#ScatterMethod/nested_crossproduct\",\n \"null\": \"https://w3id.org/cwl/salad#null\",\n \"record\": \"https://w3id.org/cwl/salad#record\",\n \"stderr\": \"https://w3id.org/cwl/cwl#stderr\",\n \"stdout\": \"https://w3id.org/cwl/cwl#stdout\",\n \"string\": \"http://www.w3.org/2001/XMLSchema#string\",\n \"v1.0\": \"https://w3id.org/cwl/cwl#v1.0\",\n \"v1.0.dev4\": \"https://w3id.org/cwl/cwl#v1.0.dev4\",\n}\n_rvocab = {\n \"https://w3id.org/cwl/salad#Any\": \"Any\",\n \"https://w3id.org/cwl/salad#ArraySchema\": \"ArraySchema\",\n \"https://w3id.org/cwl/cwl#CWLType\": \"CWLType\",\n \"https://w3id.org/cwl/cwl#CWLVersion\": \"CWLVersion\",\n \"https://w3id.org/cwl/cwl#CommandInputArraySchema\": \"CommandInputArraySchema\",\n \"https://w3id.org/cwl/cwl#CommandInputEnumSchema\": \"CommandInputEnumSchema\",\n \"https://w3id.org/cwl/cwl#CommandInputParameter\": \"CommandInputParameter\",\n \"https://w3id.org/cwl/cwl#CommandInputRecordField\": \"CommandInputRecordField\",\n \"https://w3id.org/cwl/cwl#CommandInputRecordSchema\": \"CommandInputRecordSchema\",\n \"https://w3id.org/cwl/cwl#CommandLineBinding\": \"CommandLineBinding\",\n \"https://w3id.org/cwl/cwl#CommandLineTool\": \"CommandLineTool\",\n \"https://w3id.org/cwl/cwl#CommandOutputArraySchema\": \"CommandOutputArraySchema\",\n \"https://w3id.org/cwl/cwl#CommandOutputBinding\": \"CommandOutputBinding\",\n \"https://w3id.org/cwl/cwl#CommandOutputEnumSchema\": \"CommandOutputEnumSchema\",\n \"https://w3id.org/cwl/cwl#CommandOutputParameter\": \"CommandOutputParameter\",\n \"https://w3id.org/cwl/cwl#CommandOutputRecordField\": \"CommandOutputRecordField\",\n \"https://w3id.org/cwl/cwl#CommandOutputRecordSchema\": \"CommandOutputRecordSchema\",\n \"https://w3id.org/cwl/cwl#Directory\": \"Directory\",\n \"https://w3id.org/cwl/cwl#Dirent\": \"Dirent\",\n \"https://w3id.org/cwl/cwl#DockerRequirement\": \"DockerRequirement\",\n \"https://w3id.org/cwl/salad#EnumSchema\": \"EnumSchema\",\n \"https://w3id.org/cwl/cwl#EnvVarRequirement\": \"EnvVarRequirement\",\n \"https://w3id.org/cwl/cwl#EnvironmentDef\": \"EnvironmentDef\",\n \"https://w3id.org/cwl/cwl#Expression\": \"Expression\",\n \"https://w3id.org/cwl/cwl#ExpressionPlaceholder\": \"ExpressionPlaceholder\",\n \"https://w3id.org/cwl/cwl#ExpressionTool\": \"ExpressionTool\",\n \"https://w3id.org/cwl/cwl#ExpressionToolOutputParameter\": \"ExpressionToolOutputParameter\",\n \"https://w3id.org/cwl/cwl#File\": \"File\",\n \"https://w3id.org/cwl/cwl#InitialWorkDirRequirement\": \"InitialWorkDirRequirement\",\n \"https://w3id.org/cwl/cwl#InlineJavascriptRequirement\": \"InlineJavascriptRequirement\",\n \"https://w3id.org/cwl/cwl#InputArraySchema\": \"InputArraySchema\",\n \"https://w3id.org/cwl/cwl#InputBinding\": \"InputBinding\",\n \"https://w3id.org/cwl/cwl#InputEnumSchema\": \"InputEnumSchema\",\n \"https://w3id.org/cwl/cwl#InputParameter\": \"InputParameter\",\n \"https://w3id.org/cwl/cwl#InputRecordField\": \"InputRecordField\",\n \"https://w3id.org/cwl/cwl#InputRecordSchema\": \"InputRecordSchema\",\n \"https://w3id.org/cwl/cwl#InputSchema\": \"InputSchema\",\n \"https://w3id.org/cwl/cwl#LinkMergeMethod\": \"LinkMergeMethod\",\n \"https://w3id.org/cwl/cwl#MultipleInputFeatureRequirement\": \"MultipleInputFeatureRequirement\",\n \"https://w3id.org/cwl/cwl#OutputArraySchema\": \"OutputArraySchema\",\n \"https://w3id.org/cwl/cwl#OutputBinding\": \"OutputBinding\",\n \"https://w3id.org/cwl/cwl#OutputEnumSchema\": \"OutputEnumSchema\",\n \"https://w3id.org/cwl/cwl#OutputParameter\": \"OutputParameter\",\n \"https://w3id.org/cwl/cwl#OutputRecordField\": \"OutputRecordField\",\n \"https://w3id.org/cwl/cwl#OutputRecordSchema\": \"OutputRecordSchema\",\n \"https://w3id.org/cwl/cwl#OutputSchema\": \"OutputSchema\",\n \"https://w3id.org/cwl/cwl#Parameter\": \"Parameter\",\n \"https://w3id.org/cwl/salad#PrimitiveType\": \"PrimitiveType\",\n \"https://w3id.org/cwl/cwl#Process\": \"Process\",\n \"https://w3id.org/cwl/cwl#ProcessRequirement\": \"ProcessRequirement\",\n \"https://w3id.org/cwl/salad#RecordField\": \"RecordField\",\n \"https://w3id.org/cwl/salad#RecordSchema\": \"RecordSchema\",\n \"https://w3id.org/cwl/cwl#ResourceRequirement\": \"ResourceRequirement\",\n \"https://w3id.org/cwl/cwl#ScatterFeatureRequirement\": \"ScatterFeatureRequirement\",\n \"https://w3id.org/cwl/cwl#ScatterMethod\": \"ScatterMethod\",\n \"https://w3id.org/cwl/cwl#SchemaBase\": \"SchemaBase\",\n \"https://w3id.org/cwl/cwl#SchemaDefRequirement\": \"SchemaDefRequirement\",\n \"https://w3id.org/cwl/cwl#ShellCommandRequirement\": \"ShellCommandRequirement\",\n \"https://w3id.org/cwl/cwl#Sink\": \"Sink\",\n \"https://w3id.org/cwl/cwl#SoftwarePackage\": \"SoftwarePackage\",\n \"https://w3id.org/cwl/cwl#SoftwareRequirement\": \"SoftwareRequirement\",\n \"https://w3id.org/cwl/cwl#StepInputExpressionRequirement\": \"StepInputExpressionRequirement\",\n \"https://w3id.org/cwl/cwl#SubworkflowFeatureRequirement\": \"SubworkflowFeatureRequirement\",\n \"https://w3id.org/cwl/cwl#Workflow\": \"Workflow\",\n \"https://w3id.org/cwl/cwl#WorkflowOutputParameter\": \"WorkflowOutputParameter\",\n \"https://w3id.org/cwl/cwl#WorkflowStep\": \"WorkflowStep\",\n \"https://w3id.org/cwl/cwl#WorkflowStepInput\": \"WorkflowStepInput\",\n \"https://w3id.org/cwl/cwl#WorkflowStepOutput\": \"WorkflowStepOutput\",\n \"https://w3id.org/cwl/salad#array\": \"array\",\n \"http://www.w3.org/2001/XMLSchema#boolean\": \"boolean\",\n \"https://w3id.org/cwl/cwl#ScatterMethod/dotproduct\": \"dotproduct\",\n \"http://www.w3.org/2001/XMLSchema#double\": \"double\",\n \"https://w3id.org/cwl/cwl#draft-2\": \"draft-2\",\n \"https://w3id.org/cwl/cwl#draft-3\": \"draft-3\",\n \"https://w3id.org/cwl/cwl#draft-3.dev1\": \"draft-3.dev1\",\n \"https://w3id.org/cwl/cwl#draft-3.dev2\": \"draft-3.dev2\",\n \"https://w3id.org/cwl/cwl#draft-3.dev3\": \"draft-3.dev3\",\n \"https://w3id.org/cwl/cwl#draft-3.dev4\": \"draft-3.dev4\",\n \"https://w3id.org/cwl/cwl#draft-3.dev5\": \"draft-3.dev5\",\n \"https://w3id.org/cwl/cwl#draft-4.dev1\": \"draft-4.dev1\",\n \"https://w3id.org/cwl/cwl#draft-4.dev2\": \"draft-4.dev2\",\n \"https://w3id.org/cwl/cwl#draft-4.dev3\": \"draft-4.dev3\",\n \"https://w3id.org/cwl/salad#enum\": \"enum\",\n \"https://w3id.org/cwl/cwl#ScatterMethod/flat_crossproduct\": \"flat_crossproduct\",\n \"http://www.w3.org/2001/XMLSchema#float\": \"float\",\n \"http://www.w3.org/2001/XMLSchema#int\": \"int\",\n \"http://www.w3.org/2001/XMLSchema#long\": \"long\",\n \"https://w3id.org/cwl/cwl#LinkMergeMethod/merge_flattened\": \"merge_flattened\",\n \"https://w3id.org/cwl/cwl#LinkMergeMethod/merge_nested\": \"merge_nested\",\n \"https://w3id.org/cwl/cwl#ScatterMethod/nested_crossproduct\": \"nested_crossproduct\",\n \"https://w3id.org/cwl/salad#null\": \"null\",\n \"https://w3id.org/cwl/salad#record\": \"record\",\n \"https://w3id.org/cwl/cwl#stderr\": \"stderr\",\n \"https://w3id.org/cwl/cwl#stdout\": \"stdout\",\n \"http://www.w3.org/2001/XMLSchema#string\": \"string\",\n \"https://w3id.org/cwl/cwl#v1.0\": \"v1.0\",\n \"https://w3id.org/cwl/cwl#v1.0.dev4\": \"v1.0.dev4\",\n}\n\nstrtype = _PrimitiveLoader((str, six.text_type))\ninttype = _PrimitiveLoader(int)\nfloattype = _PrimitiveLoader(float)\nbooltype = _PrimitiveLoader(bool)\nNone_type = _PrimitiveLoader(type(None))\nAny_type = _AnyLoader()\nPrimitiveTypeLoader = _EnumLoader((\"null\", \"boolean\", \"int\", \"long\", \"float\", \"double\", \"string\",))\nAnyLoader = _EnumLoader((\"Any\",))\nRecordFieldLoader = _RecordLoader(RecordField)\nRecordSchemaLoader = _RecordLoader(RecordSchema)\nEnumSchemaLoader = _RecordLoader(EnumSchema)\nArraySchemaLoader = _RecordLoader(ArraySchema)\nCWLVersionLoader = _EnumLoader((\"draft-2\", \"draft-3.dev1\", \"draft-3.dev2\", \"draft-3.dev3\", \"draft-3.dev4\", \"draft-3.dev5\", \"draft-3\", \"draft-4.dev1\", \"draft-4.dev2\", \"draft-4.dev3\", \"v1.0.dev4\", \"v1.0\",))\nCWLTypeLoader = _EnumLoader((\"File\", \"Directory\",))\nFileLoader = _RecordLoader(File)\nDirectoryLoader = _RecordLoader(Directory)\nSchemaBaseLoader = _RecordLoader(SchemaBase)\nParameterLoader = _RecordLoader(Parameter)\nExpressionLoader = _EnumLoader((\"ExpressionPlaceholder\",))\nInputBindingLoader = _RecordLoader(InputBinding)\nOutputBindingLoader = _RecordLoader(OutputBinding)\nInputSchemaLoader = _RecordLoader(InputSchema)\nOutputSchemaLoader = _RecordLoader(OutputSchema)\nInputRecordFieldLoader = _RecordLoader(InputRecordField)\nInputRecordSchemaLoader = _RecordLoader(InputRecordSchema)\nInputEnumSchemaLoader = _RecordLoader(InputEnumSchema)\nInputArraySchemaLoader = _RecordLoader(InputArraySchema)\nOutputRecordFieldLoader = _RecordLoader(OutputRecordField)\nOutputRecordSchemaLoader = _RecordLoader(OutputRecordSchema)\nOutputEnumSchemaLoader = _RecordLoader(OutputEnumSchema)\nOutputArraySchemaLoader = _RecordLoader(OutputArraySchema)\nInputParameterLoader = _RecordLoader(InputParameter)\nOutputParameterLoader = _RecordLoader(OutputParameter)\nProcessRequirementLoader = _RecordLoader(ProcessRequirement)\nProcessLoader = _RecordLoader(Process)\nInlineJavascriptRequirementLoader = _RecordLoader(InlineJavascriptRequirement)\nSchemaDefRequirementLoader = _RecordLoader(SchemaDefRequirement)\nEnvironmentDefLoader = _RecordLoader(EnvironmentDef)\nCommandLineBindingLoader = _RecordLoader(CommandLineBinding)\nCommandOutputBindingLoader = _RecordLoader(CommandOutputBinding)\nCommandInputRecordFieldLoader = _RecordLoader(CommandInputRecordField)\nCommandInputRecordSchemaLoader = _RecordLoader(CommandInputRecordSchema)\nCommandInputEnumSchemaLoader = _RecordLoader(CommandInputEnumSchema)\nCommandInputArraySchemaLoader = _RecordLoader(CommandInputArraySchema)\nCommandOutputRecordFieldLoader = _RecordLoader(CommandOutputRecordField)\nCommandOutputRecordSchemaLoader = _RecordLoader(CommandOutputRecordSchema)\nCommandOutputEnumSchemaLoader = _RecordLoader(CommandOutputEnumSchema)\nCommandOutputArraySchemaLoader = _RecordLoader(CommandOutputArraySchema)\nCommandInputParameterLoader = _RecordLoader(CommandInputParameter)\nCommandOutputParameterLoader = _RecordLoader(CommandOutputParameter)\nstdoutLoader = _EnumLoader((\"stdout\",))\nstderrLoader = _EnumLoader((\"stderr\",))\nCommandLineToolLoader = _RecordLoader(CommandLineTool)\nDockerRequirementLoader = _RecordLoader(DockerRequirement)\nSoftwareRequirementLoader = _RecordLoader(SoftwareRequirement)\nSoftwarePackageLoader = _RecordLoader(SoftwarePackage)\nDirentLoader = _RecordLoader(Dirent)\nInitialWorkDirRequirementLoader = _RecordLoader(InitialWorkDirRequirement)\nEnvVarRequirementLoader = _RecordLoader(EnvVarRequirement)\nShellCommandRequirementLoader = _RecordLoader(ShellCommandRequirement)\nResourceRequirementLoader = _RecordLoader(ResourceRequirement)\nExpressionToolOutputParameterLoader = _RecordLoader(ExpressionToolOutputParameter)\nExpressionToolLoader = _RecordLoader(ExpressionTool)\nLinkMergeMethodLoader = _EnumLoader((\"merge_nested\", \"merge_flattened\",))\nWorkflowOutputParameterLoader = _RecordLoader(WorkflowOutputParameter)\nSinkLoader = _RecordLoader(Sink)\nWorkflowStepInputLoader = _RecordLoader(WorkflowStepInput)\nWorkflowStepOutputLoader = _RecordLoader(WorkflowStepOutput)\nScatterMethodLoader = _EnumLoader((\"dotproduct\", \"nested_crossproduct\", \"flat_crossproduct\",))\nWorkflowStepLoader = _RecordLoader(WorkflowStep)\nWorkflowLoader = _RecordLoader(Workflow)\nSubworkflowFeatureRequirementLoader = _RecordLoader(SubworkflowFeatureRequirement)\nScatterFeatureRequirementLoader = _RecordLoader(ScatterFeatureRequirement)\nMultipleInputFeatureRequirementLoader = _RecordLoader(MultipleInputFeatureRequirement)\nStepInputExpressionRequirementLoader = _RecordLoader(StepInputExpressionRequirement)\nuri_strtype_True_False_None = _URILoader(strtype, True, False, None)\nunion_of_None_type_or_strtype = _UnionLoader((None_type, strtype,))\nunion_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype,))\narray_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _ArrayLoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype)\nunion_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype, array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,))\ntypedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype, 2)\narray_of_RecordFieldLoader = _ArrayLoader(RecordFieldLoader)\nunion_of_None_type_or_array_of_RecordFieldLoader = _UnionLoader((None_type, array_of_RecordFieldLoader,))\nidmap_fields_union_of_None_type_or_array_of_RecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_RecordFieldLoader, 'name', 'type')\nRecord_symbolLoader = _EnumLoader((\"record\",))\ntypedsl_Record_symbolLoader_2 = _TypeDSLLoader(Record_symbolLoader, 2)\narray_of_strtype = _ArrayLoader(strtype)\nuri_array_of_strtype_True_False_None = _URILoader(array_of_strtype, True, False, None)\nEnum_symbolLoader = _EnumLoader((\"enum\",))\ntypedsl_Enum_symbolLoader_2 = _TypeDSLLoader(Enum_symbolLoader, 2)\nuri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype, False, True, 2)\nArray_symbolLoader = _EnumLoader((\"array\",))\ntypedsl_Array_symbolLoader_2 = _TypeDSLLoader(Array_symbolLoader, 2)\nFile_classLoader = _EnumLoader((\"File\",))\nuri_File_classLoader_False_True_None = _URILoader(File_classLoader, False, True, None)\nuri_union_of_None_type_or_strtype_False_False_None = _URILoader(union_of_None_type_or_strtype, False, False, None)\nunion_of_None_type_or_inttype = _UnionLoader((None_type, inttype,))\nunion_of_FileLoader_or_DirectoryLoader = _UnionLoader((FileLoader, DirectoryLoader,))\narray_of_union_of_FileLoader_or_DirectoryLoader = _ArrayLoader(union_of_FileLoader_or_DirectoryLoader)\nunion_of_None_type_or_array_of_union_of_FileLoader_or_DirectoryLoader = _UnionLoader((None_type, array_of_union_of_FileLoader_or_DirectoryLoader,))\nuri_union_of_None_type_or_strtype_True_False_None = _URILoader(union_of_None_type_or_strtype, True, False, None)\nDirectory_classLoader = _EnumLoader((\"Directory\",))\nuri_Directory_classLoader_False_True_None = _URILoader(Directory_classLoader, False, True, None)\nunion_of_strtype_or_ExpressionLoader = _UnionLoader((strtype, ExpressionLoader,))\narray_of_union_of_strtype_or_ExpressionLoader = _ArrayLoader(union_of_strtype_or_ExpressionLoader)\nunion_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_union_of_strtype_or_ExpressionLoader = _UnionLoader((None_type, strtype, ExpressionLoader, array_of_union_of_strtype_or_ExpressionLoader,))\nunion_of_None_type_or_booltype = _UnionLoader((None_type, booltype,))\nunion_of_None_type_or_strtype_or_array_of_strtype = _UnionLoader((None_type, strtype, array_of_strtype,))\nunion_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader, strtype,))\narray_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype)\nunion_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype, 2)\nunion_of_None_type_or_CommandLineBindingLoader = _UnionLoader((None_type, CommandLineBindingLoader,))\narray_of_InputRecordFieldLoader = _ArrayLoader(InputRecordFieldLoader)\nunion_of_None_type_or_array_of_InputRecordFieldLoader = _UnionLoader((None_type, array_of_InputRecordFieldLoader,))\nidmap_fields_union_of_None_type_or_array_of_InputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_InputRecordFieldLoader, 'name', 'type')\nuri_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype, False, True, 2)\nunion_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, OutputRecordSchemaLoader, OutputEnumSchemaLoader, OutputArraySchemaLoader, strtype,))\narray_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype)\nunion_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, OutputRecordSchemaLoader, OutputEnumSchemaLoader, OutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype, 2)\nunion_of_None_type_or_CommandOutputBindingLoader = _UnionLoader((None_type, CommandOutputBindingLoader,))\narray_of_OutputRecordFieldLoader = _ArrayLoader(OutputRecordFieldLoader)\nunion_of_None_type_or_array_of_OutputRecordFieldLoader = _UnionLoader((None_type, array_of_OutputRecordFieldLoader,))\nidmap_fields_union_of_None_type_or_array_of_OutputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_OutputRecordFieldLoader, 'name', 'type')\nuri_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype, False, True, 2)\nunion_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader = _UnionLoader((None_type, strtype, array_of_strtype, ExpressionLoader,))\nuri_union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader_True_False_None = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype_or_ExpressionLoader, True, False, None)\nunion_of_None_type_or_Any_type = _UnionLoader((None_type, Any_type,))\nunion_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader_or_strtype, 2)\nunion_of_None_type_or_strtype_or_ExpressionLoader = _UnionLoader((None_type, strtype, ExpressionLoader,))\nuri_union_of_None_type_or_strtype_or_ExpressionLoader_True_False_None = _URILoader(union_of_None_type_or_strtype_or_ExpressionLoader, True, False, None)\narray_of_InputParameterLoader = _ArrayLoader(InputParameterLoader)\nidmap_inputs_array_of_InputParameterLoader = _IdMapLoader(array_of_InputParameterLoader, 'id', 'type')\narray_of_OutputParameterLoader = _ArrayLoader(OutputParameterLoader)\nidmap_outputs_array_of_OutputParameterLoader = _IdMapLoader(array_of_OutputParameterLoader, 'id', 'type')\nunion_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _UnionLoader((InlineJavascriptRequirementLoader, SchemaDefRequirementLoader, DockerRequirementLoader, SoftwareRequirementLoader, InitialWorkDirRequirementLoader, EnvVarRequirementLoader, ShellCommandRequirementLoader, ResourceRequirementLoader, SubworkflowFeatureRequirementLoader, ScatterFeatureRequirementLoader, MultipleInputFeatureRequirementLoader, StepInputExpressionRequirementLoader,))\narray_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _ArrayLoader(union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader)\nunion_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _UnionLoader((None_type, array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader,))\nidmap_requirements_union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader = _IdMapLoader(union_of_None_type_or_array_of_union_of_InlineJavascriptRequirementLoader_or_SchemaDefRequirementLoader_or_DockerRequirementLoader_or_SoftwareRequirementLoader_or_InitialWorkDirRequirementLoader_or_EnvVarRequirementLoader_or_ShellCommandRequirementLoader_or_ResourceRequirementLoader_or_SubworkflowFeatureRequirementLoader_or_ScatterFeatureRequirementLoader_or_MultipleInputFeatureRequirementLoader_or_StepInputExpressionRequirementLoader, 'class', 'None')\narray_of_Any_type = _ArrayLoader(Any_type)\nunion_of_None_type_or_array_of_Any_type = _UnionLoader((None_type, array_of_Any_type,))\nidmap_hints_union_of_None_type_or_array_of_Any_type = _IdMapLoader(union_of_None_type_or_array_of_Any_type, 'class', 'None')\nunion_of_None_type_or_CWLVersionLoader = _UnionLoader((None_type, CWLVersionLoader,))\nuri_union_of_None_type_or_CWLVersionLoader_False_True_None = _URILoader(union_of_None_type_or_CWLVersionLoader, False, True, None)\nuri_strtype_False_True_None = _URILoader(strtype, False, True, None)\nunion_of_None_type_or_array_of_strtype = _UnionLoader((None_type, array_of_strtype,))\nunion_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader = _UnionLoader((InputRecordSchemaLoader, InputEnumSchemaLoader, InputArraySchemaLoader,))\narray_of_union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader = _ArrayLoader(union_of_InputRecordSchemaLoader_or_InputEnumSchemaLoader_or_InputArraySchemaLoader)\nunion_of_None_type_or_strtype_or_ExpressionLoader_or_array_of_strtype = _UnionLoader((None_type, strtype, ExpressionLoader, array_of_strtype,))\nunion_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandInputRecordSchemaLoader, CommandInputEnumSchemaLoader, CommandInputArraySchemaLoader, strtype,))\narray_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype)\nunion_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandInputRecordSchemaLoader, CommandInputEnumSchemaLoader, CommandInputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype, 2)\narray_of_CommandInputRecordFieldLoader = _ArrayLoader(CommandInputRecordFieldLoader)\nunion_of_None_type_or_array_of_CommandInputRecordFieldLoader = _UnionLoader((None_type, array_of_CommandInputRecordFieldLoader,))\nidmap_fields_union_of_None_type_or_array_of_CommandInputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_CommandInputRecordFieldLoader, 'name', 'type')\nuri_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype, False, True, 2)\nunion_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandOutputRecordSchemaLoader, CommandOutputEnumSchemaLoader, CommandOutputArraySchemaLoader, strtype,))\narray_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _ArrayLoader(union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype)\nunion_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _UnionLoader((CWLTypeLoader, CommandOutputRecordSchemaLoader, CommandOutputEnumSchemaLoader, CommandOutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype, 2)\narray_of_CommandOutputRecordFieldLoader = _ArrayLoader(CommandOutputRecordFieldLoader)\nunion_of_None_type_or_array_of_CommandOutputRecordFieldLoader = _UnionLoader((None_type, array_of_CommandOutputRecordFieldLoader,))\nidmap_fields_union_of_None_type_or_array_of_CommandOutputRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_CommandOutputRecordFieldLoader, 'name', 'type')\nuri_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype, False, True, 2)\nunion_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, CommandInputRecordSchemaLoader, CommandInputEnumSchemaLoader, CommandInputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandInputRecordSchemaLoader_or_CommandInputEnumSchemaLoader_or_CommandInputArraySchemaLoader_or_strtype, 2)\nunion_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, stdoutLoader, stderrLoader, CommandOutputRecordSchemaLoader, CommandOutputEnumSchemaLoader, CommandOutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_stdoutLoader_or_stderrLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_CommandOutputRecordSchemaLoader_or_CommandOutputEnumSchemaLoader_or_CommandOutputArraySchemaLoader_or_strtype, 2)\narray_of_CommandInputParameterLoader = _ArrayLoader(CommandInputParameterLoader)\nidmap_inputs_array_of_CommandInputParameterLoader = _IdMapLoader(array_of_CommandInputParameterLoader, 'id', 'type')\narray_of_CommandOutputParameterLoader = _ArrayLoader(CommandOutputParameterLoader)\nidmap_outputs_array_of_CommandOutputParameterLoader = _IdMapLoader(array_of_CommandOutputParameterLoader, 'id', 'type')\nunion_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader = _UnionLoader((strtype, ExpressionLoader, CommandLineBindingLoader,))\narray_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader = _ArrayLoader(union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader)\nunion_of_None_type_or_array_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader = _UnionLoader((None_type, array_of_union_of_strtype_or_ExpressionLoader_or_CommandLineBindingLoader,))\narray_of_inttype = _ArrayLoader(inttype)\nunion_of_None_type_or_array_of_inttype = _UnionLoader((None_type, array_of_inttype,))\narray_of_SoftwarePackageLoader = _ArrayLoader(SoftwarePackageLoader)\nidmap_packages_array_of_SoftwarePackageLoader = _IdMapLoader(array_of_SoftwarePackageLoader, 'package', 'specs')\nunion_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader = _UnionLoader((FileLoader, DirectoryLoader, DirentLoader, strtype, ExpressionLoader,))\narray_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader = _ArrayLoader(union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader)\nunion_of_array_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader_or_strtype_or_ExpressionLoader = _UnionLoader((array_of_union_of_FileLoader_or_DirectoryLoader_or_DirentLoader_or_strtype_or_ExpressionLoader, strtype, ExpressionLoader,))\narray_of_EnvironmentDefLoader = _ArrayLoader(EnvironmentDefLoader)\nidmap_envDef_array_of_EnvironmentDefLoader = _IdMapLoader(array_of_EnvironmentDefLoader, 'envName', 'envValue')\nunion_of_None_type_or_inttype_or_strtype_or_ExpressionLoader = _UnionLoader((None_type, inttype, strtype, ExpressionLoader,))\nunion_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype = _UnionLoader((None_type, CWLTypeLoader, OutputRecordSchemaLoader, OutputEnumSchemaLoader, OutputArraySchemaLoader, strtype, array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype,))\ntypedsl_union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_None_type_or_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype_or_array_of_union_of_CWLTypeLoader_or_OutputRecordSchemaLoader_or_OutputEnumSchemaLoader_or_OutputArraySchemaLoader_or_strtype, 2)\narray_of_ExpressionToolOutputParameterLoader = _ArrayLoader(ExpressionToolOutputParameterLoader)\nidmap_outputs_array_of_ExpressionToolOutputParameterLoader = _IdMapLoader(array_of_ExpressionToolOutputParameterLoader, 'id', 'type')\nuri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_0 = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, False, False, 0)\nunion_of_None_type_or_LinkMergeMethodLoader = _UnionLoader((None_type, LinkMergeMethodLoader,))\nuri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_2 = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, False, False, 2)\narray_of_WorkflowStepInputLoader = _ArrayLoader(WorkflowStepInputLoader)\nidmap_in__array_of_WorkflowStepInputLoader = _IdMapLoader(array_of_WorkflowStepInputLoader, 'id', 'source')\nunion_of_strtype_or_WorkflowStepOutputLoader = _UnionLoader((strtype, WorkflowStepOutputLoader,))\narray_of_union_of_strtype_or_WorkflowStepOutputLoader = _ArrayLoader(union_of_strtype_or_WorkflowStepOutputLoader)\nunion_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader = _UnionLoader((array_of_union_of_strtype_or_WorkflowStepOutputLoader,))\nuri_union_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader_True_False_None = _URILoader(union_of_array_of_union_of_strtype_or_WorkflowStepOutputLoader, True, False, None)\nunion_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _UnionLoader((strtype, CommandLineToolLoader, ExpressionToolLoader, WorkflowLoader,))\nuri_union_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_False_False_None = _URILoader(union_of_strtype_or_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader, False, False, None)\nunion_of_None_type_or_ScatterMethodLoader = _UnionLoader((None_type, ScatterMethodLoader,))\nuri_union_of_None_type_or_ScatterMethodLoader_False_True_None = _URILoader(union_of_None_type_or_ScatterMethodLoader, False, True, None)\narray_of_WorkflowOutputParameterLoader = _ArrayLoader(WorkflowOutputParameterLoader)\nidmap_outputs_array_of_WorkflowOutputParameterLoader = _IdMapLoader(array_of_WorkflowOutputParameterLoader, 'id', 'type')\narray_of_WorkflowStepLoader = _ArrayLoader(WorkflowStepLoader)\nunion_of_array_of_WorkflowStepLoader = _UnionLoader((array_of_WorkflowStepLoader,))\nidmap_steps_union_of_array_of_WorkflowStepLoader = _IdMapLoader(union_of_array_of_WorkflowStepLoader, 'id', 'None')\nunion_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _UnionLoader((CommandLineToolLoader, ExpressionToolLoader, WorkflowLoader,))\narray_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _ArrayLoader(union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader)\nunion_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_or_array_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader = _UnionLoader((CommandLineToolLoader, ExpressionToolLoader, WorkflowLoader, array_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader,))\n\n\n\ndef load_document(doc, baseuri=None, loadingOptions=None):\n if baseuri is None:\n baseuri = file_uri(os.getcwd()) + \"/\"\n if loadingOptions is None:\n loadingOptions = LoadingOptions()\n return _document_load(union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader_or_array_of_union_of_CommandLineToolLoader_or_ExpressionToolLoader_or_WorkflowLoader, doc, baseuri, loadingOptions)\n", "id": "10910423", "language": "Python", "matching_score": 3.1738157272338867, "max_stars_count": 3, "path": "cwl_schema.py" }, { "content": "\"\"\"Dataverse API wrapper for all it's API's.\"\"\"\nimport json\nimport subprocess as sp\n\nfrom requests import ConnectionError, Response, delete, get, post, put\n\nfrom pyDataverse.exceptions import (\n ApiAuthorizationError,\n ApiUrlError,\n DatasetNotFoundError,\n DataverseNotEmptyError,\n DataverseNotFoundError,\n OperationFailedError,\n)\n\n\nclass Api:\n \"\"\"Base class.\n\n Parameters\n ----------\n base_url : str\n Base URL of Dataverse instance. Without trailing `/` at the end.\n e.g. `http://demo.dataverse.org`\n api_token : str\n Authenication token for the api.\n\n Attributes\n ----------\n base_url\n api_token\n dataverse_version\n\n \"\"\"\n\n def __init__(\n self, base_url: str, api_token: str = None, api_version: str = \"latest\"\n ):\n \"\"\"Init an Api() class.\n\n Scheme, host and path combined create the base-url for the api.\n See more about URL at `Wikipedia <https://en.wikipedia.org/wiki/URL>`_.\n\n Parameters\n ----------\n base_url : str\n Base url for Dataverse api.\n api_token : str\n Api token for Dataverse api.\n\n Examples\n -------\n Create an Api connection::\n\n >>> from pyDataverse.api import Api\n >>> base_url = 'http://demo.dataverse.org'\n >>> api = Api(base_url)\n\n \"\"\"\n if not isinstance(base_url, str):\n raise ApiUrlError(\"base_url {0} is not a string.\".format(base_url))\n\n self.base_url = base_url\n\n if not isinstance(api_version, (\"\".__class__, \"\".__class__)):\n raise ApiUrlError(\"api_version {0} is not a string.\".format(api_version))\n self.api_version = api_version\n\n if api_token:\n if not isinstance(api_token, (\"\".__class__, \"\".__class__)):\n raise ApiAuthorizationError(\"Api token passed is not a string.\")\n self.api_token = api_token\n\n if self.base_url:\n if self.api_version == \"latest\":\n self.base_url_api = \"{0}/api\".format(self.base_url)\n else:\n self.base_url_api = \"{0}/api/{1}\".format(\n self.base_url, self.api_version\n )\n else:\n self.base_url_api = None\n self.timeout = 500\n\n def __str__(self):\n \"\"\"Return name of Api() class for users.\n\n Returns\n -------\n str\n Naming of the API class.\n\n \"\"\"\n return \"API: {0}\".format(self.base_url_api)\n\n def get_request(self, url, params=None, auth=False):\n \"\"\"Make a GET request.\n\n Parameters\n ----------\n url : str\n Full URL.\n params : dict\n Dictionary of parameters to be passed with the request.\n Defaults to `None`.\n auth : bool\n Should an api token be sent in the request. Defaults to `False`.\n\n Returns\n -------\n class:`requests.Response`\n Response object of requests library.\n\n \"\"\"\n params = {}\n params[\"User-Agent\"] = \"pydataverse\"\n if self.api_token:\n params[\"key\"] = str(self.api_token)\n\n try:\n resp = get(url, params=params)\n if resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: GET - Authorization invalid {0}. MSG: {1}.\".format(\n url, error_msg\n )\n )\n elif resp.status_code >= 300:\n if resp.text:\n error_msg = resp.text\n raise OperationFailedError(\n \"ERROR: GET HTTP {0} - {1}. MSG: {2}\".format(\n resp.status_code, url, error_msg\n )\n )\n return resp\n except ConnectionError:\n raise ConnectionError(\n \"ERROR: GET - Could not establish connection to api {0}.\".format(url)\n )\n\n def post_request(self, url, data=None, auth=False, params=None, files=None):\n \"\"\"Make a POST request.\n\n params will be added as key-value pairs to the URL.\n\n Parameters\n ----------\n url : str\n Full URL.\n data : str\n Metadata as a json-formatted string. Defaults to `None`.\n auth : bool\n Should an api token be sent in the request. Defaults to `False`.\n files: dict\n e. g. files = {'file': open('sample_file.txt','rb')}\n params : dict\n Dictionary of parameters to be passed with the request.\n Defaults to `None`.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n params = {}\n params[\"User-Agent\"] = \"pydataverse\"\n if self.api_token:\n params[\"key\"] = self.api_token\n\n try:\n resp = post(url, data=data, params=params, files=files)\n if resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: POST HTTP 401 - Authorization error {0}. MSG: {1}\".format(\n url, error_msg\n )\n )\n return resp\n except ConnectionError:\n raise ConnectionError(\n \"ERROR: POST - Could not establish connection to API: {0}\".format(url)\n )\n\n def put_request(self, url, data=None, auth=False, params=None):\n \"\"\"Make a PUT request.\n\n Parameters\n ----------\n url : str\n Full URL.\n data : str\n Metadata as a json-formatted string. Defaults to `None`.\n auth : bool\n Should an api token be sent in the request. Defaults to `False`.\n params : dict\n Dictionary of parameters to be passed with the request.\n Defaults to `None`.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n params = {}\n params[\"User-Agent\"] = \"pydataverse\"\n if self.api_token:\n params[\"key\"] = self.api_token\n\n try:\n resp = put(url, data=data, params=params)\n if resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: PUT HTTP 401 - Authorization error {0}. MSG: {1}\".format(\n url, error_msg\n )\n )\n return resp\n except ConnectionError:\n raise ConnectionError(\n \"ERROR: PUT - Could not establish connection to api '{0}'.\".format(url)\n )\n\n def delete_request(self, url, auth=False, params=None):\n \"\"\"Make a Delete request.\n\n Parameters\n ----------\n url : str\n Full URL.\n auth : bool\n Should an api token be sent in the request. Defaults to `False`.\n params : dict\n Dictionary of parameters to be passed with the request.\n Defaults to `None`.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n params = {}\n params[\"User-Agent\"] = \"pydataverse\"\n if self.api_token:\n params[\"key\"] = self.api_token\n\n try:\n return delete(url, params=params)\n except ConnectionError:\n raise ConnectionError(\n \"ERROR: DELETE could not establish connection to api {}.\".format(url)\n )\n\n\nclass DataAccessApi(Api):\n \"\"\"Class to access Dataverse's Data Access API.\n\n Examples\n -------\n Examples should be written in doctest format, and\n should illustrate how to use the function/class.\n >>>\n\n Attributes\n ----------\n base_url_api_data_access : type\n Description of attribute `base_url_api_data_access`.\n base_url : type\n Description of attribute `base_url`.\n\n \"\"\"\n\n def __init__(self, base_url, api_token=None):\n \"\"\"Init an DataAccessApi() class.\"\"\"\n super().__init__(base_url, api_token)\n if base_url:\n self.base_url_api_data_access = \"{0}/access\".format(self.base_url_api)\n else:\n self.base_url_api_data_access = self.base_url_api\n\n def __str__(self):\n \"\"\"Return name of DataAccessApi() class for users.\n\n Returns\n -------\n str\n Naming of the DataAccess API class.\n\n \"\"\"\n return \"Data Access API: {0}\".format(self.base_url_api_data_access)\n\n def get_datafile(\n self,\n identifier,\n data_format=None,\n no_var_header=None,\n image_thumb=None,\n is_pid=True,\n auth=False,\n ):\n \"\"\"Download a datafile via the Dataverse Data Access API.\n\n Get by file id (HTTP Request).\n\n .. code-block:: bash\n\n GET /api/access/datafile/$id\n\n Get by persistent identifier (HTTP Request).\n\n .. code-block:: bash\n\n GET http://$SERVER/api/access/datafile/:persistentId/?persistentId=doi:10.5072/FK2/J8SJZB\n\n Parameters\n ----------\n identifier : str\n Identifier of the datafile. Can be datafile id or persistent\n identifier of the datafile (e. g. doi).\n is_pid : bool\n ``True`` to use persistent identifier. ``False``, if not.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n is_first_param = True\n if is_pid:\n url = \"{0}/datafile/{1}\".format(self.base_url_api_data_access, identifier)\n if data_format or no_var_header or image_thumb:\n url += \"?\"\n else:\n url = \"{0}/datafile/:persistentId/?persistentId={1}\".format(\n self.base_url_api_data_access, identifier\n )\n if data_format:\n url += \"format={0}\".format(data_format)\n is_first_param = False\n if no_var_header:\n if not is_first_param:\n url += \"&\"\n url += \"noVarHeader={0}\".format(no_var_header)\n is_first_param = False\n if image_thumb:\n if not is_first_param:\n url += \"&\"\n url += \"imageThumb={0}\".format(image_thumb)\n return self.get_request(url, auth=auth)\n\n def get_datafiles(self, identifier, data_format=None, auth=False):\n \"\"\"Download a datafile via the Dataverse Data Access API.\n\n Get by file id (HTTP Request).\n\n .. code-block:: bash\n\n GET /api/access/datafiles/$id1,$id2,...$idN\n\n Get by persistent identifier (HTTP Request).\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset. Can be datafile id or persistent\n identifier of the datafile (e. g. doi).\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/datafiles/{1}\".format(self.base_url_api_data_access, identifier)\n if data_format:\n url += \"?format={0}\".format(data_format)\n return self.get_request(url, auth=auth)\n\n def get_datafile_bundle(self, identifier, file_metadata_id=None, auth=False):\n \"\"\"Download a datafile in all its formats.\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET /api/access/datafile/bundle/$id\n\n Data Access API calls can now be made using persistent identifiers (in\n addition to database ids). This is done by passing the constant\n :persistentId where the numeric id of the file is expected, and then\n passing the actual persistent id as a query parameter with the name\n persistentId.\n\n This is a convenience packaging method available for tabular data\n files. It returns a zipped bundle that contains the data in the\n following formats:\n - Tab-delimited;\n - “Saved Original”, the proprietary (SPSS, Stata, R, etc.) file\n from which the tabular data was ingested;\n - Generated R Data frame (unless the “original” above was in R);\n - Data (Variable) metadata record, in DDI XML;\n - File citation, in Endnote and RIS formats.\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/datafile/bundle/{1}\".format(\n self.base_url_api_data_access, identifier\n )\n if file_metadata_id:\n url += \"?fileMetadataId={0}\".format(file_metadata_id)\n return self.get_request(url, auth=auth)\n\n def request_access(self, identifier, auth=True, is_filepid=False):\n \"\"\"Request datafile access.\n\n This method requests access to the datafile whose id is passed on the behalf of an authenticated user whose key is passed. Note that not all datasets allow access requests to restricted files.\n\n https://guides.dataverse.org/en/4.18.1/api/dataaccess.html#request-access\n\n /api/access/datafile/$id/requestAccess\n\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X PUT http://$SERVER/api/access/datafile/{id}/requestAccess\n \"\"\"\n if is_filepid:\n url = \"{0}/datafile/:persistentId/requestAccess?persistentId={1}\".format(\n self.base_url_api_data_access, identifier\n )\n else:\n url = \"{0}/datafile/{1}/requestAccess\".format(\n self.base_url_api_data_access, identifier\n )\n return self.put_request(url, auth=auth)\n\n def allow_access_request(self, identifier, do_allow=True, auth=True, is_pid=True):\n \"\"\"Allow access request for datafiles.\n\n https://guides.dataverse.org/en/latest/api/dataaccess.html#allow-access-requests\n\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X PUT -d true http://$SERVER/api/access/{id}/allowAccessRequest\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X PUT -d true http://$SERVER/api/access/:persistentId/allowAccessRequest?persistentId={pid}\n \"\"\"\n if is_pid:\n url = \"{0}/:persistentId/allowAccessRequest?persistentId={1}\".format(\n self.base_url_api_data_access, identifier\n )\n else:\n url = \"{0}/{1}/allowAccessRequest\".format(\n self.base_url_api_data_access, identifier\n )\n\n if do_allow:\n data = \"true\"\n else:\n data = \"false\"\n return self.put_request(url, data=data, auth=auth)\n\n def grant_file_access(self, identifier, user, auth=False):\n \"\"\"Grant datafile access.\n\n https://guides.dataverse.org/en/4.18.1/api/dataaccess.html#grant-file-access\n\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X PUT http://$SERVER/api/access/datafile/{id}/grantAccess/{@userIdentifier}\n \"\"\"\n url = \"{0}/datafile/{1}/grantAccess/{2}\".format(\n self.base_url_api_data_access, identifier, user\n )\n return self.put_request(url, auth=auth)\n\n def list_file_access_requests(self, identifier, auth=False):\n \"\"\"Liste datafile access requests.\n\n https://guides.dataverse.org/en/4.18.1/api/dataaccess.html#list-file-access-requests\n\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X GET http://$SERVER/api/access/datafile/{id}/listRequests\n \"\"\"\n url = \"{0}/datafile/{1}/listRequests\".format(\n self.base_url_api_data_access, identifier\n )\n return self.get_request(url, auth=auth)\n\n\nclass MetricsApi(Api):\n \"\"\"Class to access Dataverse's Metrics API.\n\n Attributes\n ----------\n base_url_api_metrics : type\n Description of attribute `base_url_api_metrics`.\n base_url : type\n Description of attribute `base_url`.\n\n \"\"\"\n\n def __init__(self, base_url, api_token=None, api_version=\"latest\"):\n \"\"\"Init an MetricsApi() class.\"\"\"\n super().__init__(base_url, api_token, api_version)\n if base_url:\n self.base_url_api_metrics = \"{0}/api/info/metrics\".format(self.base_url)\n else:\n self.base_url_api_metrics = None\n\n def __str__(self):\n \"\"\"Return name of MetricsApi() class for users.\n\n Returns\n -------\n str\n Naming of the MetricsApi() class.\n\n \"\"\"\n return \"Metrics API: {0}\".format(self.base_url_api_metrics)\n\n def total(self, data_type, date_str=None, auth=False):\n \"\"\"\n GET https://$SERVER/api/info/metrics/$type\n GET https://$SERVER/api/info/metrics/$type/toMonth/$YYYY-DD\n\n $type can be set to dataverses, datasets, files or downloads.\n\n \"\"\"\n url = \"{0}/{1}\".format(self.base_url_api_metrics, data_type)\n if date_str:\n url += \"/toMonth/{0}\".format(date_str)\n return self.get_request(url, auth=auth)\n\n def past_days(self, data_type, days_str, auth=False):\n \"\"\"\n\n http://guides.dataverse.org/en/4.18.1/api/metrics.html\n GET https://$SERVER/api/info/metrics/$type/pastDays/$days\n\n $type can be set to dataverses, datasets, files or downloads.\n \"\"\"\n # TODO: check if date-string has proper format\n url = \"{0}/{1}/pastDays/{2}\".format(\n self.base_url_api_metrics, data_type, days_str\n )\n return self.get_request(url, auth=auth)\n\n def get_dataverses_by_subject(self, auth=False):\n \"\"\"\n GET https://$SERVER/api/info/metrics/dataverses/bySubject\n\n $type can be set to dataverses, datasets, files or downloads.\n \"\"\"\n # TODO: check if date-string has proper format\n url = \"{0}/dataverses/bySubject\".format(self.base_url_api_metrics)\n return self.get_request(url, auth=auth)\n\n def get_dataverses_by_category(self, auth=False):\n \"\"\"\n GET https://$SERVER/api/info/metrics/dataverses/byCategory\n\n $type can be set to dataverses, datasets, files or downloads.\n \"\"\"\n # TODO: check if date-string has proper format\n url = \"{0}/dataverses/byCategory\".format(self.base_url_api_metrics)\n return self.get_request(url, auth=auth)\n\n def get_datasets_by_subject(self, date_str=None, auth=False):\n \"\"\"\n GET https://$SERVER/api/info/metrics/datasets/bySubject\n\n $type can be set to dataverses, datasets, files or downloads.\n \"\"\"\n # TODO: check if date-string has proper format\n url = \"{0}/datasets/bySubject\".format(self.base_url_api_metrics)\n if date_str:\n url += \"/toMonth/{0}\".format(date_str)\n return self.get_request(url, auth=auth)\n\n def get_datasets_by_data_location(self, data_location, auth=False):\n \"\"\"\n GET https://$SERVER/api/info/metrics/datasets/bySubject\n\n $type can be set to dataverses, datasets, files or downloads.\n \"\"\"\n # TODO: check if date-string has proper format\n url = \"{0}/datasets/?dataLocation={1}\".format(\n self.base_url_api_metrics, data_location\n )\n return self.get_request(url, auth=auth)\n\n\nclass NativeApi(Api):\n \"\"\"Class to access Dataverse's Native API.\n\n Parameters\n ----------\n base_url : type\n Description of parameter `base_url`.\n api_token : type\n Description of parameter `api_token`.\n api_version : type\n Description of parameter `api_version`.\n\n Attributes\n ----------\n base_url_api_native : type\n Description of attribute `base_url_api_native`.\n base_url_api : type\n Description of attribute `base_url_api`.\n\n \"\"\"\n\n def __init__(self, base_url: str, api_token=None, api_version=\"v1\"):\n \"\"\"Init an Api() class.\n\n Scheme, host and path combined create the base-url for the api.\n See more about URL at `Wikipedia <https://en.wikipedia.org/wiki/URL>`_.\n\n Parameters\n ----------\n native_api_version : str\n Api version of Dataverse native api. Default is `v1`.\n\n \"\"\"\n super().__init__(base_url, api_token, api_version)\n self.base_url_api_native = self.base_url_api\n\n def __str__(self):\n \"\"\"Return name of NativeApi() class for users.\n\n Returns\n -------\n str\n Naming of the NativeApi() class.\n\n \"\"\"\n return \"Native API: {0}\".format(self.base_url_api_native)\n\n def get_dataverse(self, identifier, auth=False):\n \"\"\"Get dataverse metadata by alias or id.\n\n View metadata about a dataverse.\n\n .. code-block:: bash\n\n GET http://$SERVER/api/dataverses/$id\n\n Parameters\n ----------\n identifier : str\n Can either be a dataverse id (long), a dataverse alias (more\n robust), or the special value ``:root``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/dataverses/{1}\".format(self.base_url_api_native, identifier)\n return self.get_request(url, auth=auth)\n\n def create_dataverse(\n self, parent: str, metadata: str, auth: bool = True\n ) -> Response:\n \"\"\"Create a dataverse.\n\n Generates a new dataverse under identifier. Expects a JSON content\n describing the dataverse.\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST http://$SERVER/api/dataverses/$id\n\n Download the `dataverse.json <http://guides.dataverse.org/en/latest/\n _downloads/dataverse-complete.json>`_ example file and modify to create\n dataverses to suit your needs. The fields name, alias, and\n dataverseContacts are required.\n\n Status Codes:\n 200: dataverse created\n 201: dataverse created\n\n Parameters\n ----------\n parent : str\n Parent dataverse, to which the Dataverse gets attached to.\n metadata : str\n Metadata of the Dataverse.\n auth : bool\n True if api authorization is necessary. Defaults to ``True``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n metadata_dict = json.loads(metadata)\n identifier = metadata_dict[\"alias\"]\n url = \"{0}/dataverses/{1}\".format(self.base_url_api_native, parent)\n resp = self.post_request(url, metadata, auth)\n\n if resp.status_code == 404:\n error_msg = resp.json()[\"message\"]\n raise DataverseNotFoundError(\n \"ERROR: HTTP 404 - Dataverse {0} was not found. MSG: {1}\".format(\n parent, error_msg\n )\n )\n elif resp.status_code != 200 and resp.status_code != 201:\n error_msg = resp.json()[\"message\"]\n raise OperationFailedError(\n \"ERROR: HTTP {0} - Dataverse {1} could not be created. MSG: {2}\".format(\n resp.status_code, identifier, error_msg\n )\n )\n else:\n print(\"Dataverse {0} created.\".format(identifier))\n return resp\n\n def publish_dataverse(self, identifier, auth=True):\n \"\"\"Publish a dataverse.\n\n Publish the Dataverse pointed by identifier, which can either by the\n dataverse alias or its numerical id.\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST http://$SERVER/api/dataverses/$identifier/actions/:publish\n\n Status Code:\n 200: Dataverse published\n\n Parameters\n ----------\n identifier : str\n Can either be a dataverse id (long) or a dataverse alias (more\n robust).\n auth : bool\n True if api authorization is necessary. Defaults to ``False``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/dataverses/{1}/actions/:publish\".format(\n self.base_url_api_native, identifier\n )\n resp = self.post_request(url, auth=auth)\n\n if resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: HTTP 401 - Publish Dataverse {0} unauthorized. MSG: {1}\".format(\n identifier, error_msg\n )\n )\n elif resp.status_code == 404:\n error_msg = resp.json()[\"message\"]\n raise DataverseNotFoundError(\n \"ERROR: HTTP 404 - Dataverse {0} was not found. MSG: {1}\".format(\n identifier, error_msg\n )\n )\n elif resp.status_code != 200:\n error_msg = resp.json()[\"message\"]\n raise OperationFailedError(\n \"ERROR: HTTP {0} - Dataverse {1} could not be published. MSG: {2}\".format(\n resp.status_code, identifier, error_msg\n )\n )\n elif resp.status_code == 200:\n print(\"Dataverse {0} published.\".format(identifier))\n return resp\n\n def delete_dataverse(self, identifier, auth=True):\n \"\"\"Delete dataverse by alias or id.\n\n Status Code:\n 200: Dataverse deleted\n\n Parameters\n ----------\n identifier : str\n Can either be a dataverse id (long) or a dataverse alias (more\n robust).\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/dataverses/{1}\".format(self.base_url_api_native, identifier)\n resp = self.delete_request(url, auth)\n\n if resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: HTTP 401 - Delete Dataverse {0} unauthorized. MSG: {1}\".format(\n identifier, error_msg\n )\n )\n elif resp.status_code == 404:\n error_msg = resp.json()[\"message\"]\n raise DataverseNotFoundError(\n \"ERROR: HTTP 404 - Dataverse {0} was not found. MSG: {1}\".format(\n identifier, error_msg\n )\n )\n elif resp.status_code == 403:\n error_msg = resp.json()[\"message\"]\n raise DataverseNotEmptyError(\n \"ERROR: HTTP 403 - Dataverse {0} not empty. MSG: {1}\".format(\n identifier, error_msg\n )\n )\n elif resp.status_code != 200:\n error_msg = resp.json()[\"message\"]\n raise OperationFailedError(\n \"ERROR: HTTP {0} - Dataverse {1} could not be deleted. MSG: {2}\".format(\n resp.status_code, identifier, error_msg\n )\n )\n elif resp.status_code == 200:\n print(\"Dataverse {0} deleted.\".format(identifier))\n return resp\n\n def get_dataverse_roles(self, identifier: str, auth: bool = False) -> Response:\n \"\"\"All the roles defined directly in the dataverse by identifier.\n\n `Docs <https://guides.dataverse.org/en/latest/api/native-api.html#list-roles-defined-in-a-dataverse>`_\n\n .. code-block:: bash\n\n GET http://$SERVER/api/dataverses/$id/roles\n\n Parameters\n ----------\n identifier : str\n Can either be a dataverse id (long), a dataverse alias (more\n robust), or the special value ``:root``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/dataverses/{1}/roles\".format(self.base_url_api_native, identifier)\n return self.get_request(url, auth=auth)\n\n def get_dataverse_contents(self, identifier, auth=True):\n \"\"\"Gets contents of Dataverse.\n\n Parameters\n ----------\n identifier : str\n Can either be a dataverse id (long), a dataverse alias (more\n robust), or the special value ``:root``.\n auth : bool\n Description of parameter `auth` (the default is False).\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/dataverses/{1}/contents\".format(self.base_url_api_native, identifier)\n return self.get_request(url, auth=auth)\n\n def get_dataverse_assignments(self, identifier, auth=False):\n \"\"\"Get dataverse assignments by alias or id.\n\n View assignments of a dataverse.\n\n .. code-block:: bash\n\n GET http://$SERVER/api/dataverses/$id/assignments\n\n Parameters\n ----------\n identifier : str\n Can either be a dataverse id (long), a dataverse alias (more\n robust), or the special value ``:root``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/dataverses/{1}/assignments\".format(\n self.base_url_api_native, identifier\n )\n return self.get_request(url, auth=auth)\n\n def get_dataverse_facets(self, identifier, auth=False):\n \"\"\"Get dataverse facets by alias or id.\n\n View facets of a dataverse.\n\n .. code-block:: bash\n\n GET http://$SERVER/api/dataverses/$id/facets\n\n Parameters\n ----------\n identifier : str\n Can either be a dataverse id (long), a dataverse alias (more\n robust), or the special value ``:root``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/dataverses/{1}/facets\".format(self.base_url_api_native, identifier)\n return self.get_request(url, auth=auth)\n\n def dataverse_id2alias(self, dataverse_id, auth=False):\n \"\"\"Converts a Dataverse ID to an alias.\n\n Parameters\n ----------\n dataverse_id : str\n Dataverse ID.\n\n Returns\n -------\n str\n Dataverse alias\n\n \"\"\"\n resp = self.get_dataverse(dataverse_id, auth=auth)\n if \"data\" in resp.json():\n if \"alias\" in resp.json()[\"data\"]:\n return resp.json()[\"data\"][\"alias\"]\n print(\"ERROR: Can not resolve Dataverse ID to alias.\")\n return False\n\n def get_dataset(self, identifier, version=\":latest\", auth=True, is_pid=True):\n \"\"\"Get metadata of a Dataset.\n\n With Dataverse identifier:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/$identifier\n\n With persistent identifier:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/:persistentId/?persistentId=$id\n GET http://$SERVER/api/datasets/:persistentId/\n ?persistentId=$pid\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset. Can be a Dataverse identifier or a\n persistent identifier (e.g. ``doi:10.11587/8H3N93``).\n is_pid : bool\n True, if identifier is a persistent identifier.\n version : str\n Version to be retrieved:\n ``:latest-published``: the latest published version\n ``:latest``: either a draft (if exists) or the latest published version.\n ``:draft``: the draft version, if any\n ``x.y``: x.y a specific version, where x is the major version number and y is the minor version number.\n ``x``: same as x.0\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n if is_pid:\n # TODO: Add version to query http://guides.dataverse.org/en/4.18.1/api/native-api.html#get-json-representation-of-a-dataset\n url = \"{0}/datasets/:persistentId/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}\".format(self.base_url_api_native, identifier)\n # CHECK: Its not really clear, if the version query can also be done via ID.\n return self.get_request(url, auth=auth)\n\n def get_dataset_versions(self, identifier, auth=True, is_pid=True):\n \"\"\"Get versions of a Dataset.\n\n With Dataverse identifier:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/$identifier/versions\n\n With persistent identifier:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/:persistentId/versions?persistentId=$id\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset. Can be a Dataverse identifier or a\n persistent identifier (e.g. ``doi:10.11587/8H3N93``).\n is_pid : bool\n True, if identifier is a persistent identifier.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/versions?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}/versions\".format(\n self.base_url_api_native, identifier\n )\n return self.get_request(url, auth=auth)\n\n def get_dataset_version(self, identifier, version, auth=True, is_pid=True):\n \"\"\"Get version of a Dataset.\n\n With Dataverse identifier:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/$identifier/versions/$versionNumber\n\n With persistent identifier:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/:persistentId/versions/$versionNumber?persistentId=$id\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset. Can be a Dataverse identifier or a\n persistent identifier (e.g. ``doi:10.11587/8H3N93``).\n version : str\n Version string of the Dataset.\n is_pid : bool\n True, if identifier is a persistent identifier.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/versions/{1}?persistentId={2}\".format(\n self.base_url_api_native, version, identifier\n )\n else:\n url = \"{0}/datasets/{1}/versions/{2}\".format(\n self.base_url_api_native, identifier, version\n )\n return self.get_request(url, auth=auth)\n\n def get_dataset_export(self, pid, export_format, auth=False):\n \"\"\"Get metadata of dataset exported in different formats.\n\n Export the metadata of the current published version of a dataset\n in various formats by its persistend identifier.\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/export?exporter=$exportformat&persistentId=$pid\n\n Parameters\n ----------\n pid : str\n Persistent identifier of the dataset. (e.g. ``doi:10.11587/8H3N93``).\n export_format : str\n Export format as a string. Formats: ``ddi``, ``oai_ddi``,\n ``dcterms``, ``oai_dc``, ``schema.org``, ``dataverse_json``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/datasets/export?exporter={1}&persistentId={2}\".format(\n self.base_url_api_native, export_format, pid\n )\n return self.get_request(url, auth=auth)\n\n def create_dataset(self, dataverse, metadata, pid=None, publish=False, auth=True):\n \"\"\"Add dataset to a dataverse.\n\n `Dataverse Documentation\n <http://guides.dataverse.org/en/latest/api/native-api.html#create-a-dataset-in-a-dataverse>`_\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST http://$SERVER/api/dataverses/$dataverse/datasets --upload-file FILENAME\n\n Add new dataset with curl:\n\n .. code-block:: bash\n\n curl -H \"X-Dataverse-key: $API_TOKEN\" -X POST $SERVER_URL/api/dataverses/$DV_ALIAS/datasets --upload-file tests/data/dataset_min.json\n\n Import dataset with existing persistend identifier with curl:\n\n .. code-block:: bash\n\n curl -H \"X-Dataverse-key: $API_TOKEN\" -X POST $SERVER_URL/api/dataverses/$DV_ALIAS/datasets/:import?pid=$PERSISTENT_IDENTIFIER&release=yes --upload-file tests/data/dataset_min.json\n\n To create a dataset, you must create a JSON file containing all the\n metadata you want such as example file: `dataset-finch1.json\n <http://guides.dataverse.org/en/latest/_downloads/dataset-finch1.json>`_.\n Then, you must decide which dataverse to create the dataset in and\n target that datavese with either the \"alias\" of the dataverse (e.g.\n \"root\") or the database id of the dataverse (e.g. \"1\"). The initial\n version state will be set to \"DRAFT\":\n\n Status Code:\n 201: dataset created\n\n Import Dataset with existing PID:\n `<http://guides.dataverse.org/en/latest/api/native-api.html#import-a-dataset-into-a-dataverse>`_\n To import a dataset with an existing persistent identifier (PID), the\n dataset’s metadata should be prepared in Dataverse’s native JSON format. The\n PID is provided as a parameter at the URL. The following line imports a\n dataset with the PID PERSISTENT_IDENTIFIER to Dataverse, and then releases it:\n\n The pid parameter holds a persistent identifier (such as a DOI or Handle). The import will fail if no PID is provided, or if the provided PID fails validation.\n\n The optional release parameter tells Dataverse to immediately publish the\n dataset. If the parameter is changed to no, the imported dataset will\n remain in DRAFT status.\n\n Parameters\n ----------\n dataverse : str\n \"alias\" of the dataverse (e.g. ``root``) or the database id of the\n dataverse (e.g. ``1``)\n pid : str\n PID of existing Dataset.\n publish : bool\n Publish only works when a Dataset with an existing PID is created. If it\n is ``True``, Dataset should be instantly published, ``False``\n if a Draft should be created.\n metadata : str\n Metadata of the Dataset as a json-formatted string (e. g.\n `dataset-finch1.json <http://guides.dataverse.org/en/latest/_downloads/dataset-finch1.json>`_)\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n if pid:\n assert isinstance(pid, str)\n url = \"{0}/dataverses/{1}/datasets/:import?pid={2}\".format(\n self.base_url_api_native, dataverse, pid\n )\n if publish:\n url += \"&release=yes\"\n else:\n url += \"&release=no\"\n else:\n url = \"{0}/dataverses/{1}/datasets\".format(\n self.base_url_api_native, dataverse\n )\n resp = self.post_request(url, metadata, auth)\n\n if resp.status_code == 404:\n error_msg = resp.json()[\"message\"]\n raise DataverseNotFoundError(\n \"ERROR: HTTP 404 - Dataverse {0} was not found. MSG: {1}\".format(\n dataverse, error_msg\n )\n )\n elif resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: HTTP 401 - Create Dataset unauthorized. MSG: {0}\".format(\n error_msg\n )\n )\n elif resp.status_code == 201:\n if \"data\" in resp.json():\n if \"persistentId\" in resp.json()[\"data\"]:\n identifier = resp.json()[\"data\"][\"persistentId\"]\n print(\"Dataset with pid '{0}' created.\".format(identifier))\n elif \"id\" in resp.json()[\"data\"]:\n identifier = resp.json()[\"data\"][\"id\"]\n print(\"Dataset with id '{0}' created.\".format(identifier))\n else:\n print(\"ERROR: No identifier returned for created Dataset.\")\n return resp\n\n def edit_dataset_metadata(\n self, identifier, metadata, is_pid=True, replace=False, auth=True\n ):\n \"\"\"Edit metadata of a given dataset.\n\n `edit-dataset-metadata <http://guides.dataverse.org/en/latest/api/native-api.html#edit-dataset-metadata>`_.\n\n HTTP Request:\n\n .. code-block:: bash\n\n PUT http://$SERVER/api/datasets/editMetadata/$id --upload-file FILENAME\n\n Add data to dataset fields that are blank or accept multiple values with\n the following\n\n CURL Request:\n\n .. code-block:: bash\n\n curl -H \"X-Dataverse-key: $API_TOKEN\" -X PUT $SERVER_URL/api/datasets/:persistentId/editMetadata/?persistentId=$pid --upload-file dataset-add-metadata.json\n\n For these edits your JSON file need only include those dataset fields\n which you would like to edit. A sample JSON file may be downloaded\n here: `dataset-edit-metadata-sample.json\n <http://guides.dataverse.org/en/latest/_downloads/dataset-finch1.json>`_\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset. Can be a Dataverse identifier or a\n persistent identifier (e.g. ``doi:10.11587/8H3N93``).\n metadata : str\n Metadata of the Dataset as a json-formatted string.\n is_pid : bool\n ``True`` to use persistent identifier. ``False``, if not.\n replace : bool\n ``True`` to replace already existing metadata. ``False``, if not.\n auth : bool\n ``True``, if an api token should be sent. Defaults to ``False``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n Examples\n -------\n Get dataset metadata::\n\n >>> data = api.get_dataset(doi).json()[\"data\"][\"latestVersion\"][\"metadataBlocks\"][\"citation\"]\n >>> resp = api.edit_dataset_metadata(doi, data, is_replace=True, auth=True)\n >>> resp.status_code\n 200: metadata updated\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/editMetadata/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/editMetadata/{1}\".format(\n self.base_url_api_native, identifier\n )\n params = {\"replace\": True} if replace else {}\n resp = self.put_request(url, metadata, auth, params)\n\n if resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: HTTP 401 - Updating metadata unauthorized. MSG: {0}\".format(\n error_msg\n )\n )\n elif resp.status_code == 400:\n if \"Error parsing\" in resp.json()[\"message\"]:\n print(\"Wrong passed data format.\")\n else:\n print(\n \"You may not add data to a field that already has data and does not\"\n \" allow multiples. Use is_replace=true to replace existing data.\"\n )\n elif resp.status_code == 200:\n print(\"Dataset '{0}' updated\".format(identifier))\n return resp\n\n def create_dataset_private_url(self, identifier, is_pid=True, auth=True):\n \"\"\"Create private Dataset URL.\n\n POST http://$SERVER/api/datasets/$id/privateUrl?key=$apiKey\n\n\n http://guides.dataverse.org/en/4.16/api/native-api.html#create-a-private-url-for-a-dataset\n 'MSG: {1}'.format(pid, error_msg))\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/privateUrl/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}/privateUrl\".format(\n self.base_url_api_native, identifier\n )\n\n resp = self.post_request(url, auth=auth)\n\n if resp.status_code == 200:\n print(\n \"Dataset private URL created: {0}\".format(resp.json()[\"data\"][\"link\"])\n )\n return resp\n\n def get_dataset_private_url(self, identifier, is_pid=True, auth=True):\n \"\"\"Get private Dataset URL.\n\n GET http://$SERVER/api/datasets/$id/privateUrl?key=$apiKey\n\n http://guides.dataverse.org/en/4.16/api/native-api.html#get-the-private-url-for-a-dataset\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/privateUrl/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}/privateUrl\".format(\n self.base_url_api_native, identifier\n )\n\n resp = self.get_request(url, auth=auth)\n\n if resp.status_code == 200:\n print(\"Got Dataset private URL: {0}\".format(resp.json()[\"data\"][\"link\"]))\n return resp\n\n def delete_dataset_private_url(self, identifier, is_pid=True, auth=True):\n \"\"\"Get private Dataset URL.\n\n DELETE http://$SERVER/api/datasets/$id/privateUrl?key=$apiKey\n\n http://guides.dataverse.org/en/4.16/api/native-api.html#delete-the-private-url-from-a-dataset\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/privateUrl/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}/privateUrl\".format(\n self.base_url_api_native, identifier\n )\n\n resp = self.delete_request(url, auth=auth)\n\n if resp.status_code == 200:\n print(\"Got Dataset private URL: {0}\".format(resp.json()[\"data\"][\"link\"]))\n return resp\n\n def publish_dataset(self, pid, release_type=\"minor\", auth=True):\n \"\"\"Publish dataset.\n\n Publishes the dataset whose id is passed. If this is the first version\n of the dataset, its version number will be set to 1.0. Otherwise, the\n new dataset version number is determined by the most recent version\n number and the type parameter. Passing type=minor increases the minor\n version number (2.3 is updated to 2.4). Passing type=major increases\n the major version number (2.3 is updated to 3.0). Superusers can pass\n type=updatecurrent to update metadata without changing the version\n number.\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST http://$SERVER/api/datasets/$id/actions/:publish?type=$type\n\n When there are no default workflows, a successful publication process\n will result in 200 OK response. When there are workflows, it is\n impossible for Dataverse to know how long they are going to take and\n whether they will succeed or not (recall that some stages might require\n human intervention). Thus, a 202 ACCEPTED is returned immediately. To\n know whether the publication process succeeded or not, the client code\n has to check the status of the dataset periodically, or perform some\n push request in the post-publish workflow.\n\n Status Code:\n 200: dataset published\n\n Parameters\n ----------\n pid : str\n Persistent identifier of the dataset (e.g.\n ``doi:10.11587/8H3N93``).\n release_type : str\n Passing ``minor`` increases the minor version number (2.3 is\n updated to 2.4). Passing ``major`` increases the major version\n number (2.3 is updated to 3.0). Superusers can pass\n ``updatecurrent`` to update metadata without changing the version\n number.\n auth : bool\n ``True`` if api authorization is necessary. Defaults to ``False``.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/datasets/:persistentId/actions/:publish\".format(\n self.base_url_api_native\n )\n url += \"?persistentId={0}&type={1}\".format(pid, release_type)\n resp = self.post_request(url, auth=auth)\n\n if resp.status_code == 404:\n error_msg = resp.json()[\"message\"]\n raise DatasetNotFoundError(\n \"ERROR: HTTP 404 - Dataset {0} was not found. MSG: {1}\".format(\n pid, error_msg\n )\n )\n elif resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: HTTP 401 - User not allowed to publish dataset {0}. \"\n \"MSG: {1}\".format(pid, error_msg)\n )\n elif resp.status_code == 200:\n print(\"Dataset {0} published\".format(pid))\n return resp\n\n def get_dataset_lock(self, pid):\n \"\"\"Get if dataset is locked.\n\n The lock API endpoint was introduced in Dataverse 4.9.3.\n\n Parameters\n ----------\n pid : str\n Persistent identifier of the Dataset (e.g.\n ``doi:10.11587/8H3N93``).\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/datasets/:persistentId/locks/?persistentId={1}\".format(\n self.base_url_api_native, pid\n )\n return self.get_request(url, auth=True)\n\n def get_dataset_assignments(self, identifier, is_pid=True, auth=True):\n \"\"\"Get Dataset assignments.\n\n GET http://$SERVER/api/datasets/$id/assignments?key=$apiKey\n\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/assignments/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}/assignments\".format(\n self.base_url_api_native, identifier\n )\n return self.get_request(url, auth=auth)\n\n def delete_dataset(self, identifier, is_pid=True, auth=True):\n \"\"\"Delete a dataset.\n\n Delete the dataset whose id is passed\n\n Status Code:\n 200: dataset deleted\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset. Can be a Dataverse identifier or a\n persistent identifier (e.g. ``doi:10.11587/8H3N93``).\n is_pid : bool\n True, if identifier is a persistent identifier.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}\".format(self.base_url_api_native, identifier)\n resp = self.delete_request(url, auth=auth)\n\n if resp.status_code == 404:\n error_msg = resp.json()[\"message\"]\n raise DatasetNotFoundError(\n \"ERROR: HTTP 404 - Dataset '{0}' was not found. MSG: {1}\".format(\n identifier, error_msg\n )\n )\n elif resp.status_code == 405:\n error_msg = resp.json()[\"message\"]\n raise OperationFailedError(\n \"ERROR: HTTP 405 - \"\n \"Published datasets can only be deleted from the GUI. For \"\n \"more information, please refer to \"\n \"https://github.com/IQSS/dataverse/issues/778\"\n \" MSG: {0}\".format(error_msg)\n )\n elif resp.status_code == 401:\n error_msg = resp.json()[\"message\"]\n raise ApiAuthorizationError(\n \"ERROR: HTTP 401 - User not allowed to delete dataset '{0}'. \"\n \"MSG: {1}\".format(identifier, error_msg)\n )\n elif resp.status_code == 200:\n print(\"Dataset '{0}' deleted.\".format(identifier))\n return resp\n\n def destroy_dataset(self, identifier, is_pid=True, auth=True):\n \"\"\"Destroy Dataset.\n\n http://guides.dataverse.org/en/4.16/api/native-api.html#delete-published-dataset\n\n Normally published datasets should not be deleted, but there exists a\n “destroy” API endpoint for superusers which will act on a dataset given\n a persistent ID or dataset database ID:\n\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X DELETE http://$SERVER/api/datasets/:persistentId/destroy/?persistentId=doi:10.5072/FK2/AAA000\n\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X DELETE http://$SERVER/api/datasets/999/destroy\n\n Calling the destroy endpoint is permanent and irreversible. It will\n remove the dataset and its datafiles, then re-index the parent\n dataverse in Solr. This endpoint requires the API token of a\n superuser.\n\n \"\"\"\n if is_pid:\n url = \"{0}/datasets/:persistentId/destroy/?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n url = \"{0}/datasets/{1}/destroy\".format(\n self.base_url_api_native, identifier\n )\n\n resp = self.delete_request(url, auth=auth)\n\n if resp.status_code == 200:\n print(\"Dataset {0} destroyed\".format(resp.json()))\n return resp\n\n def get_datafiles_metadata(self, pid, version=\":latest\", auth=True):\n \"\"\"List metadata of all datafiles of a dataset.\n\n `Documentation <http://guides.dataverse.org/en/latest/api/native-api.html#list-files-in-a-dataset>`_\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/datasets/$id/versions/$versionId/files\n\n Parameters\n ----------\n pid : str\n Persistent identifier of the dataset. e.g. ``doi:10.11587/8H3N93``.\n version : str\n Version of dataset. Defaults to `1`.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n base_str = \"{0}/datasets/:persistentId/versions/\".format(\n self.base_url_api_native\n )\n url = base_str + \"{0}/files?persistentId={1}\".format(version, pid)\n return self.get_request(url, auth=auth)\n\n def get_datafile_metadata(\n self, identifier, is_filepid=False, is_draft=False, auth=True\n ):\n \"\"\"\n GET http://$SERVER/api/files/{id}/metadata\n\n curl $SERVER_URL/api/files/$ID/metadata\n curl \"$SERVER_URL/api/files/:persistentId/metadata?persistentId=$PERSISTENT_ID\"\n curl \"https://demo.dataverse.org/api/files/:persistentId/metadata?persistentId=doi:10.5072/FK2/AAA000\"\n curl -H \"X-Dataverse-key:$API_TOKEN\" $SERVER_URL/api/files/$ID/metadata/draft\n\n \"\"\"\n if is_filepid:\n url = \"{0}/files/:persistentId/metadata\".format(self.base_url_api_native)\n if is_draft:\n url += \"/draft\"\n url += \"?persistentId={0}\".format(identifier)\n else:\n url = \"{0}/files/{1}/metadata\".format(self.base_url_api_native, identifier)\n if is_draft:\n url += \"/draft\"\n # CHECK: Its not really clear, if the version query can also be done via ID.\n return self.get_request(url, auth=auth)\n\n def upload_datafile(self, identifier, filename, json_str=None, is_pid=True):\n \"\"\"Add file to a dataset.\n\n Add a file to an existing Dataset. Description and tags are optional:\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST http://$SERVER/api/datasets/$id/add\n\n The upload endpoint checks the content of the file, compares it with\n existing files and tells if already in the database (most likely via\n hashing).\n\n `adding-files <http://guides.dataverse.org/en/latest/api/native-api.html#adding-files>`_.\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset.\n filename : str\n Full filename with path.\n json_str : str\n Metadata as JSON string.\n is_pid : bool\n ``True`` to use persistent identifier. ``False``, if not.\n\n Returns\n -------\n dict\n The json string responded by the CURL request, converted to a\n dict().\n\n \"\"\"\n url = self.base_url_api_native\n if is_pid:\n url += \"/datasets/:persistentId/add?persistentId={0}\".format(identifier)\n else:\n url += \"/datasets/{0}/add\".format(identifier)\n\n files = {\"file\": open(filename, \"rb\")}\n return self.post_request(\n url, data={\"jsonData\": json_str}, files=files, auth=True\n )\n\n def update_datafile_metadata(self, identifier, json_str=None, is_filepid=False):\n \"\"\"Update datafile metadata.\n\n metadata such as description, directoryLabel (File Path) and tags are not carried over from the file being replaced:\n Updates the file metadata for an existing file where ID is the\n database id of the file to update or PERSISTENT_ID is the persistent id\n (DOI or Handle) of the file. Requires a jsonString expressing the new\n metadata. No metadata from the previous version of this file will be\n persisted, so if you want to update a specific field first get the\n json with the above command and alter the fields you want.\n\n\n Also note that dataFileTags are not versioned and changes to these will update the published version of the file.\n\n This functions needs CURL to work!\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST -F '[email protected]' -F 'jsonData={json}' http://$SERVER/api/files/{id}/metadata?key={apiKey}\n curl -H \"X-Dataverse-key:$API_TOKEN\" -X POST -F 'jsonData={\"description\":\"My description bbb.\",\"provFreeform\":\"Test prov freeform\",\"categories\":[\"Data\"],\"restrict\":false}' $SERVER_URL/api/files/$ID/metadata\n curl -H \"X-Dataverse-key:<KEY>\" -X POST -F 'jsonData={\"description\":\"My description bbb.\",\"provFreeform\":\"Test prov freeform\",\"categories\":[\"Data\"],\"restrict\":false}' \"https://demo.dataverse.org/api/files/:persistentId/metadata?persistentId=doi:10.5072/FK2/AAA000\"\n\n `Docs <http://guides.dataverse.org/en/latest/api/native-api.html#updating-file-metadata>`_.\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset.\n json_str : str\n Metadata as JSON string.\n is_filepid : bool\n ``True`` to use persistent identifier for datafile. ``False``, if\n not.\n\n Returns\n -------\n dict\n The json string responded by the CURL request, converted to a\n dict().\n\n \"\"\"\n # if is_filepid:\n # url = '{0}/files/:persistentId/metadata?persistentId={1}'.format(\n # self.base_url_api_native, identifier)\n # else:\n # url = '{0}/files/{1}/metadata'.format(self.base_url_api_native, identifier)\n #\n # data = {'jsonData': json_str}\n # resp = self.post_request(\n # url,\n # data=data,\n # auth=True\n # )\n query_str = self.base_url_api_native\n if is_filepid:\n query_str = \"{0}/files/:persistentId/metadata?persistentId={1}\".format(\n self.base_url_api_native, identifier\n )\n else:\n query_str = \"{0}/files/{1}/metadata\".format(\n self.base_url_api_native, identifier\n )\n shell_command = 'curl -H \"X-Dataverse-key: {0}\"'.format(self.api_token)\n shell_command += \" -X POST -F 'jsonData={0}' {1}\".format(json_str, query_str)\n # TODO(Shell): is shell=True necessary?\n return sp.run(shell_command, shell=True, stdout=sp.PIPE)\n\n def replace_datafile(self, identifier, filename, json_str, is_filepid=True):\n \"\"\"Replace datafile.\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST -F 'file=<EMAIL>' -F 'jsonData={json}' http://$SERVER/api/files/{id}/replace?key={apiKey}\n\n `replacing-files <http://guides.dataverse.org/en/latest/api/native-api.html#replacing-files>`_.\n\n Parameters\n ----------\n identifier : str\n Identifier of the dataset.\n filename : str\n Full filename with path.\n json_str : str\n Metadata as JSON string.\n is_filepid : bool\n ``True`` to use persistent identifier for datafile. ``False``, if\n not.\n\n Returns\n -------\n dict\n The json string responded by the CURL request, converted to a\n dict().\n\n \"\"\"\n url = self.base_url_api_native\n files = {\"file\": open(filename, \"rb\")}\n data = {\"jsonData\": json_str}\n\n if is_filepid:\n url += \"/files/:persistentId/replace?persistentId={0}\".format(identifier)\n else:\n url += \"/files/{0}/replace\".format(identifier)\n return self.post_request(url, data=data, files=files, auth=True)\n\n def get_info_version(self, auth=False):\n \"\"\"Get the Dataverse version and build number.\n\n The response contains the version and build numbers. Requires no api\n token.\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/info/version\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/info/version\".format(self.base_url_api_native)\n return self.get_request(url, auth=auth)\n\n def get_info_server(self, auth=False):\n \"\"\"Get dataverse server name.\n\n This is useful when a Dataverse system is composed of multiple Java EE\n servers behind a load balancer.\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/info/server\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/info/server\".format(self.base_url_api_native)\n return self.get_request(url, auth=auth)\n\n def get_info_api_terms_of_use(self, auth=False):\n \"\"\"Get API Terms of Use url.\n\n The response contains the text value inserted as API Terms of use which\n uses the database setting :ApiTermsOfUse.\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/info/apiTermsOfUse\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/info/apiTermsOfUse\".format(self.base_url_api_native)\n return self.get_request(url, auth=auth)\n\n def get_metadatablocks(self, auth=False):\n \"\"\"Get info about all metadata blocks.\n\n Lists brief info about all metadata blocks registered in the system.\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/metadatablocks\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/metadatablocks\".format(self.base_url_api_native)\n return self.get_request(url, auth=auth)\n\n def get_metadatablock(self, identifier, auth=False):\n \"\"\"Get info about single metadata block.\n\n Returns data about the block whose identifier is passed. identifier can\n either be the block’s id, or its name.\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/metadatablocks/$identifier\n\n Parameters\n ----------\n identifier : str\n Can be block's id, or it's name.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/metadatablocks/{1}\".format(self.base_url_api_native, identifier)\n return self.get_request(url, auth=auth)\n\n def get_user_api_token_expiration_date(self, auth=False):\n \"\"\"Get the expiration date of an Users's API token.\n\n HTTP Request:\n\n .. code-block:: bash\n\n curl -H X-Dataverse-key:$API_TOKEN -X GET $SERVER_URL/api/users/token\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/users/token\".format(self.base_url_api_native)\n return self.get_request(url, auth=auth)\n\n def recreate_user_api_token(self):\n \"\"\"Recreate an Users API token.\n\n HTTP Request:\n\n .. code-block:: bash\n\n curl -H X-Dataverse-key:$API_TOKEN -X POST $SERVER_URL/api/users/token/recreate\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/users/token/recreate\".format(self.base_url_api_native)\n return self.post_request(url)\n\n def delete_user_api_token(self):\n \"\"\"Delete an Users API token.\n\n HTTP Request:\n\n .. code-block:: bash\n\n curl -H X-Dataverse-key:$API_TOKEN -X POST $SERVER_URL/api/users/token/recreate\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/users/token\".format(self.base_url_api_native)\n return self.delete_request(url)\n\n def create_role(self, dataverse_id):\n \"\"\"Create a new role.\n\n `Docs <https://guides.dataverse.org/en/latest/api/native-api.html#id2>`_\n\n HTTP Request:\n\n .. code-block:: bash\n\n POST http://$SERVER/api/roles?dvo=$dataverseIdtf&key=$apiKey\n\n Parameters\n ----------\n dataverse_id : str\n Can be alias or id of a Dataverse.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/roles?dvo={1}\".format(self.base_url_api_native, dataverse_id)\n return self.post_request(url)\n\n def show_role(self, role_id, auth=False):\n \"\"\"Show role.\n\n `Docs <https://guides.dataverse.org/en/latest/api/native-api.html#show-role>`_\n\n HTTP Request:\n\n .. code-block:: bash\n\n GET http://$SERVER/api/roles/$id\n\n Parameters\n ----------\n identifier : str\n Can be alias or id of a Dataverse.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/roles/{1}\".format(self.base_url_api_native, role_id)\n return self.get_request(url, auth=auth)\n\n def delete_role(self, role_id):\n \"\"\"Delete role.\n\n `Docs <https://guides.dataverse.org/en/latest/api/native-api.html#delete-role>`_\n\n Parameters\n ----------\n identifier : str\n Can be alias or id of a Dataverse.\n\n Returns\n -------\n requests.Response\n Response object of requests library.\n\n \"\"\"\n url = \"{0}/roles/{1}\".format(self.base_url_api_native, role_id)\n return self.delete_request(url)\n\n def get_children(\n self, parent=\":root\", parent_type=\"dataverse\", children_types=None, auth=True\n ):\n \"\"\"Walk through children of parent element in Dataverse tree.\n\n Default: gets all child dataverses if parent = dataverse or all\n\n Example Dataverse Tree:\n\n .. code-block:: bash\n\n data = {\n 'type': 'dataverse',\n 'dataverse_id': 1,\n 'dataverse_alias': ':root',\n 'children': [\n {\n 'type': 'datasets',\n 'dataset_id': 231,\n 'pid': 'doi:10.11587/LYFDYC',\n 'children': [\n {\n 'type': 'datafile'\n 'datafile_id': 532,\n 'pid': 'doi:10.11587/LYFDYC/C2WTRN',\n 'filename': '10082_curation.pdf '\n }\n ]\n }\n ]\n }\n\n Parameters\n ----------\n parent : str\n Description of parameter `parent`.\n parent_type : str\n Description of parameter `parent_type`.\n children_types : list\n Types of children to be collected. 'dataverses', 'datasets' and 'datafiles' are valid list items.\n auth : bool\n Authentication needed\n\n Returns\n -------\n list\n List of Dataverse data type dictionaries. Different ones for\n Dataverses, Datasets and Datafiles.\n\n # TODO\n - differentiate between published and unpublished data types\n - util function to read out all dataverses into a list\n - util function to read out all datasets into a list\n - util function to read out all datafiles into a list\n - Unify tree and models\n\n \"\"\"\n children = []\n\n if children_types is None:\n children_types = []\n\n if len(children_types) == 0:\n if parent_type == \"dataverse\":\n children_types = [\"dataverses\"]\n elif parent_type == \"dataset\":\n children_types = [\"datafiles\"]\n\n if (\n \"dataverses\" in children_types\n and \"datafiles\" in children_types\n and \"datasets\" not in children_types\n ):\n print(\n \"ERROR: Wrong children_types passed: 'dataverses' and 'datafiles'\"\n \" passed, 'datasets' missing.\"\n )\n return False\n\n if parent_type == \"dataverse\":\n # check for dataverses and datasets as children and get their ID\n parent_alias = parent\n resp = self.get_dataverse_contents(parent_alias, auth=auth)\n if \"data\" in resp.json():\n contents = resp.json()[\"data\"]\n for content in contents:\n if (\n content[\"type\"] == \"dataverse\"\n and \"dataverses\" in children_types\n ):\n dataverse_id = content[\"id\"]\n child_alias = self.dataverse_id2alias(dataverse_id, auth=auth)\n children.append(\n {\n \"dataverse_id\": dataverse_id,\n \"title\": content[\"title\"],\n \"dataverse_alias\": child_alias,\n \"type\": \"dataverse\",\n \"children\": self.get_children(\n parent=child_alias,\n parent_type=\"dataverse\",\n children_types=children_types,\n auth=auth,\n ),\n }\n )\n elif content[\"type\"] == \"dataset\" and \"datasets\" in children_types:\n pid = (\n content[\"protocol\"]\n + \":\"\n + content[\"authority\"]\n + \"/\"\n + content[\"identifier\"]\n )\n children.append(\n {\n \"dataset_id\": content[\"id\"],\n \"pid\": pid,\n \"type\": \"dataset\",\n \"children\": self.get_children(\n parent=pid,\n parent_type=\"dataset\",\n children_types=children_types,\n auth=auth,\n ),\n }\n )\n else:\n print(\"ERROR: 'get_dataverse_contents()' API request not working.\")\n elif parent_type == \"dataset\" and \"datafiles\" in children_types:\n # check for datafiles as children and get their ID\n pid = parent\n resp = self.get_datafiles_metadata(parent, version=\":latest\")\n if \"data\" in resp.json():\n for datafile in resp.json()[\"data\"]:\n children.append(\n {\n \"datafile_id\": datafile[\"dataFile\"][\"id\"],\n \"filename\": datafile[\"dataFile\"][\"filename\"],\n \"label\": datafile[\"label\"],\n \"pid\": datafile[\"dataFile\"][\"persistentId\"],\n \"type\": \"datafile\",\n }\n )\n else:\n print(\"ERROR: 'get_datafiles()' API request not working.\")\n return children\n\n def get_user(self):\n \"\"\"Get details of the current authenticated user.\n\n Auth must be ``true`` for this to work. API endpoint is available for Dataverse >= 5.3.\n\n https://guides.dataverse.org/en/latest/api/native-api.html#get-user-information-in-json-format\n \"\"\"\n url = f\"{self.base_url}/users/:me\"\n return self.get_request(url, auth=True)\n\n def redetect_file_type(\n self, identifier: str, is_pid: bool = False, dry_run: bool = False\n ) -> Response:\n \"\"\"Redetect file type.\n\n https://guides.dataverse.org/en/latest/api/native-api.html#redetect-file-type\n\n Parameters\n ----------\n identifier : str\n Datafile id (fileid) or file PID.\n is_pid : bool\n Is the identifier a PID, by default False.\n dry_run : bool, optional\n [description], by default False\n\n Returns\n -------\n Response\n Request Response() object.\n \"\"\"\n if dry_run is True:\n dry_run_str = \"true\"\n elif dry_run is False:\n dry_run_str = \"false\"\n if is_pid:\n url = f\"{self.base_url_api_native}/files/:persistentId/redetect?persistentId={identifier}&dryRun={dry_run_str}\"\n else:\n url = f\"{self.base_url_api_native}/files/{identifier}/redetect?dryRun={dry_run_str}\"\n return self.post_request(url, auth=True)\n\n def reingest_datafile(self, identifier: str, is_pid: bool = False) -> Response:\n \"\"\"Reingest datafile.\n\n https://guides.dataverse.org/en/latest/api/native-api.html#reingest-a-file\n\n Parameters\n ----------\n identifier : str\n Datafile id (fileid) or file PID.\n is_pid : bool\n Is the identifier a PID, by default False.\n\n Returns\n -------\n Response\n Request Response() object.\n \"\"\"\n if is_pid:\n url = f\"{self.base_url_api_native}/files/:persistentId/reingest?persistentId={identifier}\"\n else:\n url = f\"{self.base_url_api_native}/files/{identifier}/reingest\"\n return self.post_request(url, auth=True)\n\n def uningest_datafile(self, identifier: str, is_pid: bool = False) -> Response:\n \"\"\"Uningest datafile.\n\n https://guides.dataverse.org/en/latest/api/native-api.html#uningest-a-file\n\n Parameters\n ----------\n identifier : str\n Datafile id (fileid) or file PID.\n is_pid : bool\n Is the identifier a PID, by default False.\n\n Returns\n -------\n Response\n Request Response() object.\n \"\"\"\n if is_pid:\n url = f\"{self.base_url_api_native}/files/:persistentId/uningest?persistentId={identifier}\"\n else:\n url = f\"{self.base_url_api_native}/files/{identifier}/uningest\"\n return self.post_request(url, auth=True)\n\n def restrict_datafile(self, identifier: str, is_pid: bool = False) -> Response:\n \"\"\"Uningest datafile.\n\n https://guides.dataverse.org/en/latest/api/native-api.html#restrict-files\n\n Parameters\n ----------\n identifier : str\n Datafile id (fileid) or file PID.\n is_pid : bool\n Is the identifier a PID, by default False.\n\n Returns\n -------\n Response\n Request Response() object.\n \"\"\"\n if is_pid:\n url = f\"{self.base_url_api_native}/files/:persistentId/restrict?persistentId={identifier}\"\n else:\n url = f\"{self.base_url_api_native}/files/{identifier}/restrict\"\n return self.put_request(url, auth=True)\n\n\nclass SearchApi(Api):\n \"\"\"Class to access Dataverse's Search API.\n\n Examples\n -------\n Examples should be written in doctest format, and\n should illustrate how to use the function/class.\n >>>\n\n Attributes\n ----------\n base_url_api_search : type\n Description of attribute `base_url_api_search`.\n base_url : type\n Description of attribute `base_url`.\n\n \"\"\"\n\n def __init__(self, base_url, api_token=None, api_version=\"latest\"):\n \"\"\"Init an SearchApi() class.\"\"\"\n super().__init__(base_url, api_token, api_version)\n if base_url:\n self.base_url_api_search = \"{0}/search?q=\".format(self.base_url_api)\n else:\n self.base_url_api_search = self.base_url_api\n\n def __str__(self):\n \"\"\"Return name of SearchApi() class for users.\n\n Returns\n -------\n str\n Naming of the Search API class.\n\n \"\"\"\n return \"Search API: {0}\".format(self.base_url_api_search)\n\n def search(\n self,\n q_str,\n data_type=None,\n subtree=None,\n sort=None,\n order=None,\n per_page=None,\n start=None,\n show_relevance=None,\n show_facets=None,\n filter_query=None,\n show_entity_ids=None,\n query_entities=None,\n auth=False,\n ):\n \"\"\"Search.\n\n http://guides.dataverse.org/en/4.18.1/api/search.html\n \"\"\"\n url = \"{0}{1}\".format(self.base_url_api_search, q_str)\n if data_type:\n # TODO: pass list of types\n url += \"&type={0}\".format(data_type)\n if subtree:\n # TODO: pass list of subtrees\n url += \"&subtree={0}\".format(subtree)\n if sort:\n url += \"&sort={0}\".format(sort)\n if order:\n url += \"&order={0}\".format(order)\n if per_page:\n url += \"&per_page={0}\".format(per_page)\n if start:\n url += \"&start={0}\".format(start)\n if show_relevance:\n url += \"&show_relevance={0}\".format(show_relevance)\n if show_facets:\n url += \"&show_facets={0}\".format(show_facets)\n if filter_query:\n url += \"&fq={0}\".format(filter_query)\n if show_entity_ids:\n url += \"&show_entity_ids={0}\".format(show_entity_ids)\n if query_entities:\n url += \"&query_entities={0}\".format(query_entities)\n return self.get_request(url, auth=auth)\n\n\nclass SwordApi(Api):\n \"\"\"Class to access Dataverse's SWORD API.\n\n Parameters\n ----------\n sword_api_version : str\n SWORD API version. Defaults to 'v1.1'.\n\n Attributes\n ----------\n base_url_api_sword : str\n Description of attribute `base_url_api_sword`.\n base_url : str\n Description of attribute `base_url`.\n native_api_version : str\n Description of attribute `native_api_version`.\n sword_api_version\n\n \"\"\"\n\n def __init__(\n self, base_url, api_version=\"v1.1\", api_token=None, sword_api_version=\"v1.1\"\n ):\n \"\"\"Init a :class:`SwordApi <pyDataverse.api.SwordApi>` instance.\n\n Parameters\n ----------\n sword_api_version : str\n Api version of Dataverse SWORD API.\n\n \"\"\"\n super().__init__(base_url, api_token, api_version)\n if not isinstance(sword_api_version, (\"\".__class__, \"\".__class__)):\n raise ApiUrlError(\n \"sword_api_version {0} is not a string.\".format(sword_api_version)\n )\n self.sword_api_version = sword_api_version\n\n # Test connection.\n if self.base_url and sword_api_version:\n self.base_url_api_sword = \"{0}/dvn/api/data-deposit/{1}\".format(\n self.base_url, self.sword_api_version\n )\n else:\n self.base_url_api_sword = base_url\n\n def __str__(self):\n \"\"\"Return name of :class:Api() class for users.\n\n Returns\n -------\n str\n Naming of the SWORD API class.\n\n \"\"\"\n return \"SWORD API: {0}\".format(self.base_url_api_sword)\n\n def get_service_document(self):\n url = \"{0}/swordv2/service-document\".format(self.base_url_api_sword)\n return self.get_request(url, auth=True)\n", "id": "6183071", "language": "Python", "matching_score": 2.9185471534729004, "max_stars_count": 21, "path": "src/pyDataverse/api.py" }, { "content": "import sys\nimport os\nimport subprocess\nfrom github import Github\nfrom github.GithubException import UnknownObjectException\nSEP = '-'\nBUILD_DIR=\"build\"\n\ntoken = sys.argv[1]\nrepo_name = sys.argv[2]\ntag = sys.argv[3]\n\n# split off first part of tag\ntag_parts = tag.split(SEP)\nworkflow_name = SEP.join(tag_parts[:-1])\nworkflow_path = 'workflows/{}.cwl'.format(workflow_name)\nworkflow_filename = os.path.basename(workflow_path)\npacked_workflow_path = '{}/{}'.format(BUILD_DIR, workflow_filename)\nversion_str = tag_parts[-1]\n\nif not os.path.exists(workflow_path):\n raise ValueError(\"No such workflow file found: {}.\".format(workflow_path))\n\ng = Github(token)\nrepo = g.get_repo(repo_name)\ntry:\n release = repo.get_release(tag)\nexcept UnknownObjectException:\n release = repo.create_git_release(tag=tag, name=tag, message='Releasing {} {}'.format(workflow_name, version_str))\n\nrelease_assets = release.get_assets()\nasset_names = [asset.name for asset in release_assets]\nif workflow_filename in asset_names:\n raise ValueError(\"Packed workflow already exists in this release.\")\n\nos.makedirs(BUILD_DIR, exist_ok=True)\n# pack the workflow\nwith open(packed_workflow_path, 'w') as outfile:\n subprocess.call([\"cwltool\", \"--pack\", workflow_path], stdout=outfile)\nrelease.upload_asset(packed_workflow_path)\n", "id": "2874774", "language": "Python", "matching_score": 0.5396655797958374, "max_stars_count": 0, "path": "scripts/release.py" }, { "content": "from kubernetes import client, config, watch\n\n\nclass ClusterApi(object):\n def __init__(self, namespace):\n config.load_kube_config()\n self.core = client.CoreV1Api()\n self.batch = client.BatchV1Api()\n self.namespace = namespace\n\n def create_persistent_volume_claim(self, name, storage_size_in_g,\n access_modes=[\"ReadWriteMany\"],\n storage_class_name=\"glusterfs-storage\"):\n pvc = client.V1PersistentVolumeClaim()\n pvc.metadata = client.V1ObjectMeta(name=name)\n storage_size = \"{}Gi\".format(storage_size_in_g)\n resources = client.V1ResourceRequirements(requests={\"storage\": storage_size})\n pvc.spec = client.V1PersistentVolumeClaimSpec(access_modes=access_modes,\n resources=resources,\n storage_class_name=storage_class_name)\n return self.core.create_namespaced_persistent_volume_claim(self.namespace, pvc)\n\n def delete_persistent_volume_claim(self, name):\n self.core.delete_namespaced_persistent_volume_claim(name, self.namespace, client.V1DeleteOptions())\n\n def create_secret(self, name, string_value_dict):\n body = client.V1Secret(string_data=string_value_dict, metadata={'name': name})\n return self.core.create_namespaced_secret(namespace=self.namespace, body=body)\n\n def delete_secret(self, name):\n self.core.delete_namespaced_secret(name, self.namespace, body=client.V1DeleteOptions())\n\n def create_job(self, name, batch_job_spec):\n body = client.V1Job(\n metadata=client.V1ObjectMeta(name=name),\n spec=batch_job_spec.create())\n return self.batch.create_namespaced_job(self.namespace, body)\n\n def wait_for_jobs(self, job_names):\n waiting_for_job_names = set(job_names)\n failed_job_names = []\n w = watch.Watch()\n for event in w.stream(self.batch.list_namespaced_job, self.namespace):\n job = event['object']\n job_name = job.metadata.name\n if job.status.succeeded:\n waiting_for_job_names.remove(job_name)\n elif job.status.failed:\n waiting_for_job_names.remove(job_name)\n failed_job_names.append(job_name)\n if not waiting_for_job_names:\n w.stop()\n if failed_job_names:\n raise ValueError(\"Failed jobs: {}\".format(','.join(failed_job_names)))\n else:\n print(\"Jobs complete: {}\".format(','.join(job_names)))\n\n def delete_job(self, name, propagation_policy='Background'):\n body = client.V1DeleteOptions(propagation_policy=propagation_policy)\n self.batch.delete_namespaced_job(name, self.namespace, body=body)\n\n def create_config_map(self, name, data):\n body = client.V1ConfigMap(\n metadata=client.V1ObjectMeta(name=name),\n data=data\n )\n return self.core.create_namespaced_config_map(self.namespace, body)\n\n def delete_config_map(self, name):\n self.core.delete_namespaced_config_map(name, self.namespace, body=client.V1DeleteOptions())\n\n\nclass Container(object):\n def __init__(self, name, image_name, command, args, working_dir, env_dict,\n requested_cpu, requested_memory, volumes):\n self.name = name\n self.image_name = image_name\n self.command = command\n self.args = args\n self.working_dir = working_dir\n self.env_dict = env_dict\n self.requested_cpu = requested_cpu\n self.requested_memory = requested_memory\n self.volumes = volumes\n\n def create_env(self):\n environment_variables = []\n for key, value in self.env_dict.items():\n environment_variables.append(client.V1EnvVar(name=key, value=value))\n return environment_variables\n\n def create_volume_mounts(self):\n return [volume.create_volume_mount() for volume in self.volumes]\n\n def create_volumes(self):\n return [volume.create_volume() for volume in self.volumes]\n\n def create_resource_requirements(self):\n return client.V1ResourceRequirements(\n requests={\n \"memory\": self.requested_memory,\n \"cpu\": self.requested_cpu\n })\n\n def create(self):\n return client.V1Container(\n name=self.name,\n image=self.image_name,\n working_dir=self.working_dir,\n command=[self.command],\n args=self.args,\n resources=self.create_resource_requirements(),\n env=self.create_env(),\n volume_mounts=self.create_volume_mounts()\n )\n\n\nclass VolumeBase(object):\n def __init__(self, name, mount_path):\n self.name = name\n self.mount_path = mount_path\n\n def create_volume_mount(self):\n return client.V1VolumeMount(\n name=self.name,\n mount_path=self.mount_path)\n\n\nclass SecretVolume(VolumeBase):\n def __init__(self, name, mount_path, secret_name):\n super(SecretVolume, self).__init__(name, mount_path)\n self.secret_name = secret_name\n\n def create_volume(self):\n return client.V1Volume(\n name=self.name,\n secret=self.create_secret())\n\n def create_secret(self):\n return client.V1SecretVolumeSource(secret_name=self.secret_name)\n\n\nclass PersistentClaimVolume(VolumeBase):\n def __init__(self, name, mount_path, volume_claim_name):\n super(PersistentClaimVolume, self).__init__(name, mount_path)\n self.volume_claim_name = volume_claim_name\n\n def create_volume(self):\n return client.V1Volume(\n name=self.name,\n persistent_volume_claim=self.create_volume_source())\n\n def create_volume_source(self):\n return client.V1PersistentVolumeClaimVolumeSource(claim_name=self.volume_claim_name)\n\n\nclass ConfigMapVolume(VolumeBase):\n def __init__(self, name, mount_path, config_map_name, source_key, source_path):\n super(ConfigMapVolume, self).__init__(name, mount_path)\n self.config_map_name = config_map_name\n self.source_key = source_key\n self.source_path = source_path\n\n def create_volume(self):\n return client.V1Volume(\n name=self.name,\n config_map=self.create_config_map())\n\n def create_config_map(self):\n items = [client.V1KeyToPath(key=self.source_key, path=self.source_path)]\n return client.V1ConfigMapVolumeSource(name=self.config_map_name,\n items=items)\n\n\nclass BatchJobSpec(object):\n def __init__(self, name, container):\n self.name = name\n self.pod_restart_policy = \"Never\"\n self.container = container\n\n def create(self):\n job_spec_name = \"{}spec\".format(self.name)\n return client.V1JobSpec(\n template=client.V1PodTemplateSpec(\n metadata=client.V1ObjectMeta(name=job_spec_name),\n spec=self.create_pod_spec()\n )\n )\n\n def create_pod_spec(self):\n return client.V1PodSpec(\n containers=self.create_containers(),\n volumes=self.create_volumes(),\n restart_policy=\"Never\"\n )\n\n def create_containers(self):\n container = self.container.create()\n return [container]\n\n def create_volumes(self):\n return self.container.create_volumes()\n", "id": "4832377", "language": "Python", "matching_score": 2.3962693214416504, "max_stars_count": 0, "path": "bespinjob/cluster.py" }, { "content": "from __future__ import print_function\n\nimport ftplib\nimport cStringIO\nimport json\nimport sys\nimport csv\nimport os\nfrom sqlalchemy import Column, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nBase = declarative_base()\nfrom sqlalchemy import create_engine\n\nclass GenDiff(object):\n config_file = \"gendiff.json\"\n\n def __init__(self, dir_name):\n self.dir_name = dir_name\n self._is_setup = False\n\n def setup(self):\n self._setup_dir()\n self._setup_config()\n self._is_setup = True\n\n def run_server(self):\n if not self._is_setup:\n self.setup()\n print(self.config.genome_ftp_url)\n genome_store = GenomeStore(self.dir_name, self.config)\n genome_store.setup_summary_file()\n\n def _setup_dir(self):\n if not os.path.isdir(self.dir_name):\n print(\"Creating directory\", self.dir_name)\n os.makedirs(self.dir_name)\n\n def config_path(self):\n return os.path.join(self.dir_name, self.config_file)\n\n def _setup_config(self):\n config_path = self.config_path()\n config = Config()\n if not os.path.exists(config_path):\n print(\"Creating config file\", config_path)\n config.write_to_path(config_path)\n config.load_from_path(config_path)\n self.config = config\n\nclass Config(object):\n def __init__(self):\n self.genome_ftp_url = \"ftp.ncbi.nlm.nih.gov\"\n self.refseq_dir = \"genomes/refseq/\"\n self.summary_file = \"assembly_summary_refseq.txt\"\n\n def write_to_path(self, path):\n with open(path, 'w') as outfile:\n json.dump(self.__dict__, outfile)\n\n def load_from_path(self, path):\n with open(path, 'r') as infile:\n json_dict = json.load(infile)\n self.set_from_dict(json_dict)\n\n def set_from_dict(self, json_dict):\n self.genome_ftp_url = self.get_val(json_dict,\n 'genome_ftp_url', self.genome_ftp_url)\n self.refseq_dir = self.get_val(json_dict,\n 'refseq_dir', self.refseq_dir)\n self.summary_file = self.get_val(json_dict,\n 'summary_file', self.summary_file)\n\n def get_val(self, json_dict, name, def_val):\n if name in json_dict:\n value = json_dict[name]\n if value:\n return value\n return def_val\n\nclass GenomeStore(object):\n def __init__(self, data_dir, config):\n self.data_dir = data_dir\n self.config = config\n\n def summary_file_path(self):\n return os.path.join(self.data_dir, self.config.summary_file)\n\n def setup_summary_file(self):\n if not os.path.exists(self.summary_file_path()):\n self.download_summary_file()\n self.setup_summary_database()\n\n def download_summary_file(self):\n print(\"Downloading summary file.\")\n ftp = ftplib.FTP(self.config.genome_ftp_url)\n ftp.login()\n ftp.cwd(self.config.refseq_dir)\n filename = self.config.summary_file\n path = self.summary_file_path()\n ftp.retrbinary(\"RETR \" + filename ,open(path, 'wb').write)\n ftp.quit()\n\n def setup_summary_database(self):\n url = os.path.join('sqlite:///', self.data_dir, 'gendiff.sqlite')\n engine = create_engine(url, echo=True)\n\n print(\"create tables\")\n Base.metadata.create_all(engine)\n session = sessionmaker(bind=engine)()\n with open(self.summary_file_path(), 'rb') as csvfile:\n pos = csvfile.tell()\n firstline = csvfile.readline()\n csvfile.seek(pos)\n if firstline.startswith(\"# \"):\n csvfile.read(2)\n csvreader = csv.DictReader(csvfile, delimiter='\\t', quotechar='|')\n row_names = [\n \"assembly_accession\",\n \"bioproject\",\n \"biosample\",\n \"wgs_master\",\n \"refseq_category\",\n \"taxid\",\n \"species_taxid\",\n \"organism_name\",\n \"infraspecific_name\",\n \"isolate\",\n \"version_status\",\n \"assembly_level\",\n \"release_type\",\n \"genome_rep\",\n \"seq_rel_date\",\n \"asm_name\",\n \"submitter\",\n \"gbrs_paired_asm\",\n \"paired_asm_comp\",\n \"ftp_path\"\n ]\n for row in csvreader:\n genome = Genome()\n for name in row_names:\n val = unicode(row[name], \"utf-8\")\n setattr(genome, name, val)\n session.add(genome)\n #print(row['assembly_accession'])\n \"\"\"row = [unicode(s, \"utf-8\") for s in row]\n if row[0].startswith(\"#\"):\n row[0] = row[0].replace(\"# \",\"\")\n for idx,name in enumerate(row):\n row_names[name] = idx\n import pdb; pdb.set_trace()\n else:\n genome = Genome(\n assembly_accession\",\n bioproject\",\n biosample\",\n wgs_master\",\n refseq_category\",\n taxid\",\n species_taxid\",\n organism_name\",\n infraspecific_name\",\n isolate\",\n version_status\",\n assembly_level\",\n release_type\",\n genome_rep\",\n seq_rel_date\",\n asm_name\",\n submitter\",\n gbrs_paired_asm\",\n paired_asm_comp\",\n ftp_path = row[19])\n session.add(genome)\n \"\"\"\n session.commit()\n''\nclass SummaryDB(object):\n pass\n\n#record = SeqIO.parse('GCF_000001765.3_Dpse_3.0_genomic.gbff', \"genbank\")\n#r = record.next()\n#[f for f in r.features if f.type == 'source']\n#>>> r.features[0].type\n#'source'\n#r.features[0].qualifiers['chromosome']\n\n\nclass Genome(Base):\n __tablename__ = 'genome'\n assembly_accession = Column(String, primary_key=True)\n bioproject = Column(String)\n biosample = Column(String)\n wgs_master = Column(String)\n refseq_category = Column(String)\n taxid = Column(String)\n species_taxid = Column(String)\n organism_name = Column(String)\n infraspecific_name = Column(String)\n isolate = Column(String)\n version_status = Column(String)\n assembly_level = Column(String)\n release_type = Column(String)\n genome_rep = Column(String)\n seq_rel_date = Column(String)\n asm_name = Column(String)\n submitter = Column(String)\n gbrs_paired_asm = Column(String)\n paired_asm_comp = Column(String)\n ftp_path = Column(String)\n #assembly_accession\tbioproject\tbiosample\twgs_master\trefseq_category\ttaxid\n # species_taxid\torganism_name\tinfraspecific_name\tisolate\tversion_status\n # assembly_level\trelease_type\tgenome_rep\tseq_rel_date\tasm_name\n # submitter\tgbrs_paired_asm\tpaired_asm_comp\tftp_path\n #GCF_000001215.4\tPRJNA164\tSAMN02803731\t\treference genome\t7227\n # 7227\tDrosophila melanogaster\t\t\tlatest\tChromosome\tMajor\tFull\n # 2014/08/01\tRelease 6 plus ISO1 MT\tThe FlyBase Consortium/Berkeley\n # Drosophila Genome Project/Celera Genomics\tGCA_000001215.4\tidentical\n #ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GCF_000001215.4_Release_6_plus_ISO1_MT\n\n\ndef main(args = sys.argv):\n if len(args) != 2:\n print_usage()\n else:\n gendiff = GenDiff(args[1])\n gendiff.run_server()\n\ndef print_usage():\n print(\"python gendiff.py <data_dir>\")\n\n#data_dir = sys.argv[1]\n#config_file = 'gendiff.json'\n#print(data_dir)\nif __name__ == \"__main__\":\n main()\n\n#with open('data.json') as data_file:\n# data = json.load(data_file)\n\n\"\"\"\ndef filter_non_genomes(dirlist):\n return [x for x in dirlist if not x.endswith(\".txt\")]\n\ngenome_ftp_url = \"ftp.ncbi.nlm.nih.gov\"\nrefseq_dir = \"genomes/refseq/\"\nsummary_file = \"assembly_summary_refseq.txt\"\n\nsio = cStringIO.StringIO()\ndef handle_binary(more_data):\n sio.write(more_data)\n\nftp = FTP(genome_ftp_url)\nftp.login()\nftp.cwd('/genomes/refseq/')\nresp = ftp.retrbinary(\"RETR \" + summary_file, callback=handle_binary)\nsio.seek(0) # Go back to the start\nprint sio.getvalue()\n\"\"\"\n#name = \"class\", value = \"input-content label-is-floating is-invalid style-scope paper-input-container\"\n\"\"\"\nroot_dirs = [\n \"archaea\",\n \"bacteria\",\n \"fungi\",\n \"invertebrate\",\n \"plant\",\n \"protozoa\",\n \"vertebrate_mammalian\",\n \"vertebrate_other\",\n \"viral\"]\n\nftp = FTP(genome_ftp_url)\nftp.login()\nftp.cwd('/genomes/refseq/')\nval = ftp.nlst()\nfor dirname in filter_non_genomes(val):\n print dirname\n print ftp.nlst(dirname)\n \"\"\"\n", "id": "1174008", "language": "Python", "matching_score": 2.8326141834259033, "max_stars_count": 0, "path": "gendiff.py" }, { "content": "from sqlalchemy.engine import create_engine\nfrom sqlalchemy import schema, types, orm\nimport os\n\nclass DB(object):\n def __init__(self, url=None):\n if not url:\n url = os.environ['DBURL']\n self.setup_engine(url)\n self.setup_project()\n\n def setup_engine(self, url):\n self.engine = create_engine(url) #'sqlite:///:memory:', echo=True)\n self.metadata = schema.MetaData()\n self.metadata.bind = self.engine\n orm.clear_mappers()\n sm = orm.sessionmaker(bind=self.engine, autoflush=True, autocommit=False,\n expire_on_commit=True)\n self.session = orm.scoped_session(sm)\n\n def setup_project(self):\n self.project_table = schema.Table('project', self.metadata,\n schema.Column('id', types.Integer, schema.Sequence('project_id_seq'),\n primary_key=True),\n schema.Column('title', types.Unicode()),\n )\n orm.mapper(ProjectRecord, self.project_table)\n\n def create_tables(self):\n self.metadata.create_all(checkfirst=True)\n\n def delete_all(self):\n self.session.query(ProjectRecord).delete()\n\n def close(self):\n self.session.close()\n self.engine.dispose()\n orm.clear_mappers()\n\nclass ProjectRecord(object):\n def to_json(self):\n return {'id':self.id, 'title' : self.title}\n\nclass ProjectRecordList(object):\n def __init__(self, db):\n self.db = db\n self.project_table = self.db.project_table\n\n def save(self, project_record):\n self.db.session.add(project_record)\n self.db.session.commit()\n self.db.session.flush()\n\n def get_all(self):\n query = self.db.session.query(ProjectRecord)\n query.order_by(self.project_table.c.title)\n return list(query)\n\n def get_one(self, project_id):\n query = self.db.session.query(ProjectRecord)\n for item in query.filter(self.project_table.c.id == project_id):\n print(str(item.id))\n return item\n return None\n\n def delete_one(self, project_id):\n item = self.get_one(project_id)\n if item:\n self.db.session.delete(item)\n self.db.session.commit()\n", "id": "1143202", "language": "Python", "matching_score": 2.1198582649230957, "max_stars_count": 0, "path": "seq_exp/db.py" }, { "content": "from flask import Flask, request\nfrom flask_restful import abort, Api, Resource\nfrom Bio import Entrez, SeqIO\n\nfrom sqlalchemy.engine import create_engine\nfrom sqlalchemy import schema, types, orm\nimport os\n\nfrom seq_exp.db import DB, ProjectRecord, ProjectRecordList\n\nEntrez.email = os.environ['EMAIL']\n\nclass Project(Resource):\n def __init__(self, db=None):\n self.project_list = ProjectRecordList(db)\n\n def get(self, project_id):\n proj = self.project_list.get_one(project_id);\n if proj:\n return proj.to_json(), 200, {'Access-Control-Allow-Origin': '*'}\n abort(404, message=\"Project {} doesn't exist\".format(project_id))\n\n def delete(self, project_id):\n self.project_list.delete_one(project_id)\n return '', 204, {'Access-Control-Allow-Origin': '*'}\n\n def put(self, project_id):\n proj = self.project_list.get_one(project_id);\n if proj:\n args = request.values\n proj.title = args['title']\n self.project_list.save(proj)\n return proj.to_json(), 201, {'Access-Control-Allow-Origin': '*'}\n abort(404, message=\"Project {} doesn't exist\".format(project_id))\n\n\nclass ProjectList(Resource):\n def __init__(self, db=None):\n self.project_list = ProjectRecordList(db)\n\n def get(self):\n data = [proj.to_json() for proj in self.project_list.get_all()]\n cnt = len(data)\n return {'items':data}, 201, {'Access-Control-Allow-Origin': '*'}\n\n def post(self):\n args = request.values\n proj = ProjectRecord()\n proj.title = args['title']\n self.project_list.save(proj)\n return proj.to_json(), 201\n\n\nclass EntrezSummary(Resource):\n def __init__(self, db=None):\n self.db = db\n\n def get(self, db):\n return self.get_summary(db, request.values)\n\n def get_summary(self, db, args):\n term = str(args['term'])\n if not term:\n return {'total_cnt': 0, 'count': 0, 'data': []}, 200, {'Access-Control-Allow-Origin': '*'}\n retmax = 10\n retmax_str = args['retmax']\n if retmax_str:\n retmax = int(retmax_str)\n handle = Entrez.esearch(db=db, term=term, retmax=retmax)\n record = Entrez.read(handle)\n total_cnt = record['Count']\n gi_str = \",\".join(record[\"IdList\"])\n count = len(record[\"IdList\"])\n handle = Entrez.esummary(db=db, id=gi_str)\n record = Entrez.read(handle)\n return {'total_cnt': total_cnt, 'count': count, 'data': record}, 200, {'Access-Control-Allow-Origin': '*'}\n\n\nclass EntrezDetail(Resource):\n def __init__(self, db=None):\n self.db = db\n\n def get(self, db, id):\n handle = Entrez.esummary(db=db, id=id)\n record = Entrez.read(handle)\n return record[0]\n\n\nclass EntrezDownload(Resource):\n def __init__(self, db=None):\n self.db = db\n\n def post(self):\n ids = request.form['ids']\n project_id = request.values['project_id']\n db = request.values['db']\n #print(ids, project_id,)\n return {'result':ids + db + project_id}\n\n\ndef setup_api_and_db(url):\n app = Flask(__name__, static_folder=\"website\")\n api = Api(app)\n mydb = DB(url=url)\n api.add_resource(ProjectList, '/projects',\n resource_class_kwargs={ 'db': mydb })\n api.add_resource(Project, '/projects/<project_id>',\n resource_class_kwargs={ 'db': mydb })\n api.add_resource(EntrezSummary, '/entrez/<db>',\n resource_class_kwargs={ 'db': mydb })\n api.add_resource(EntrezDetail, '/entrez/<db>/<id>',\n resource_class_kwargs={ 'db': mydb })\n api.add_resource(EntrezDownload, '/fetch_request',\n resource_class_kwargs={ 'db': mydb })\n return app, mydb\n", "id": "11821320", "language": "Python", "matching_score": 2.700946807861328, "max_stars_count": 0, "path": "seq_exp/seq_exp.py" }, { "content": "import unittest\nfrom seq_exp.seq_exp import DB, ProjectRecord, ProjectRecordList\n\nclass ProjectRecordListTest(unittest.TestCase):\n def setUp(self):\n self.db = DB(url='sqlite:///:memory:')\n self.db.create_tables()\n self.project_list = ProjectRecordList(self.db)\n\n def tearDown(self):\n self.db.close()\n\n def assert_project_cnt(self, expected_cnt):\n cnt = len(self.project_list.get_all())\n self.assertEqual(expected_cnt, cnt)\n\n def save_project(self, title):\n proj = ProjectRecord()\n proj.title = title\n self.project_list.save(proj)\n return proj.id\n\n def test_create_one(self):\n self.assert_project_cnt(0)\n project_id = self.save_project(\"Some Title\")\n self.assert_project_cnt(1)\n proj2 = self.project_list.get_one(project_id)\n self.assertEqual(\"Some Title\", proj2.title)\n\n def test_create_two_and_delete_one(self):\n self.assert_project_cnt(0)\n project_id = self.save_project(\"First Title\")\n project_id2 = self.save_project(\"Second Title\")\n self.assert_project_cnt(2)\n self.project_list.delete_one(project_id)\n self.assert_project_cnt(1)\n\n def test_update(self):\n project_id = self.save_project(\"First Title\")\n proj = self.project_list.get_one(project_id)\n self.assertEqual(\"First Title\", proj.title)\n proj.title = \"Second Title\"\n proj.id = project_id\n self.project_list.save(proj)\n proj2 = self.project_list.get_one(project_id)\n self.assertEqual(\"Second Title\", proj2.title)\n", "id": "5491457", "language": "Python", "matching_score": 1.8859468698501587, "max_stars_count": 0, "path": "seq_exp/test/test_db.py" }, { "content": "import os\nimport seq_exp.seq_exp as seq_exp\n\nif __name__ == \"__main__\":\n url = os.environ['DBURL']\n if not url:\n url='sqlite:///:memory:'\n app, db = seq_exp.setup_api_and_db(url)\n app.run(debug=True)\n", "id": "11496504", "language": "Python", "matching_score": 0.0043541667982935905, "max_stars_count": 0, "path": "run.py" }, { "content": "from django.http import HttpResponse\nfrom django.contrib.auth.views import LoginView, PasswordResetConfirmView, PasswordResetCompleteView\nfrom django.contrib.auth import logout as logout_user\nfrom django.contrib.auth.tokens import PasswordResetTokenGenerator\nfrom django.shortcuts import redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nimport traceback\nimport stripe\n\n\nstripe.api_key = settings.STRIPE_API_KEY\n\n\ndef index(request):\n username = \"\"\n if request.user.is_authenticated:\n username = request.user.username\n return HttpResponse(\"Placeholder for ddh shiny app. Username: {}\".format(username))\n\n\nclass Login(LoginView):\n template_name = 'userauth/Login_form.html'\n\n\nclass PasswordResetConfirm(PasswordResetConfirmView):\n template_name = 'userauth/PasswordResetConfirm_form.html'\n\n\nclass PasswordResetComplete(PasswordResetCompleteView):\n template_name = 'userauth/PasswordResetComplete_form.html'\n\n\ndef logout(request):\n logout_user(request)\n return redirect('login')\n\n\ndef auth(request):\n if request.user.is_authenticated:\n return HttpResponse(status=200)\n return HttpResponse(status=403)\n\n\n@csrf_exempt\ndef webhook(request):\n try:\n signature = request.headers.get('stripe-signature')\n event = stripe.Webhook.construct_event(\n payload=request.body, sig_header=signature, secret=settings.STRIPE_WEBHOOK_SECRET)\n print(\"Webhook event received:\", event.type)\n if event.type == \"customer.subscription.created\" or event.type == \"customer.subscription.updated\":\n on_customer_subscription_changed(request, event)\n elif event.type == \"customer.subscription.deleted\":\n on_customer_subscription_deleted(request, event)\n return JsonResponse({\"success\": True})\n except:\n print('Error while parsing webhook:')\n traceback.print_exc()\n return JsonResponse({\"success\": False})\n\n\ndef on_customer_subscription_changed(request, event):\n obj = event.data[\"object\"]\n status = obj[\"status\"]\n user, created = get_or_create_user_for_stripe_event_object(obj)\n user.is_active = status == \"active\"\n user.save()\n print(\"Updated user {} with subscription status {}\".format(user.username, status))\n if created:\n reset_url = make_reset_password_url(request, user)\n print(\"Sending email with reset url: {} to user {}\".format(reset_url, user.email))\n send_new_user_email(user, reset_url)\n\n\ndef on_customer_subscription_deleted(request, event):\n obj = event.data[\"object\"]\n user, created = get_or_create_user_for_stripe_event_object(obj)\n user.is_active = False\n user.save()\n print(\"Deactivated user {}\".format(user.username))\n\n\ndef get_or_create_user_for_stripe_event_object(obj):\n customer = stripe.Customer.retrieve(obj[\"customer\"])\n if not customer.email:\n raise ValueError(\"Customer has no email:\" + str(customer))\n try:\n return User.objects.get(username=customer.email), False\n except User.DoesNotExist:\n return User.objects.create_user(username=customer.email, email=customer.email), True\n\n\ndef make_reset_password_url(request, user):\n token_generator = PasswordResetTokenGenerator()\n uid = urlsafe_base64_encode(force_bytes(user.pk))\n token = token_generator.make_token(user)\n return request.build_absolute_uri('/accounts/reset/{}/{}/'.format(uid, token))\n\n\ndef send_new_user_email(user, reset_password_url):\n subject = \"Welcome to Data-Driven Hypothesis\"\n message = \"\"\"\n To get started, click to set a password for your account: {}\n \n If you have any problems, reply to this email for support.\n \n We're eager to see what you can discover!\n \"\"\".format(reset_password_url)\n send_mail(\n subject,\n message,\n settings.EMAIL_FROM_USER,\n [user.email],\n fail_silently=False,\n )\n", "id": "4882418", "language": "Python", "matching_score": 2.5287680625915527, "max_stars_count": 0, "path": "userauth/views.py" }, { "content": "from django.contrib import admin\nfrom django.urls import path\nfrom userauth import views as user_auth_views\n\nurlpatterns = [\n path('', user_auth_views.index, name='home'),\n path('auth/', user_auth_views.auth, name='auth'),\n path('webhook/', user_auth_views.webhook, name='webhook'),\n path('login/', user_auth_views.Login.as_view(), name='login'),\n path('accounts/reset/<uidb64>/<token>/', user_auth_views.PasswordResetConfirm.as_view(), name='password_reset_confirm'),\n path('accounts/reset/done/', user_auth_views.PasswordResetComplete.as_view(), name='password_reset_complete'),\n path('logout/', user_auth_views.logout, name='logout'),\n path('admin/', admin.site.urls),\n]\n", "id": "9129986", "language": "Python", "matching_score": 0.33406007289886475, "max_stars_count": 0, "path": "ddh_auth/urls.py" }, { "content": "# fasta_gc.py\n#\n# Reads a fasta file and prints out the GC content percentage of each sequence\n# in the file\n#\n# Usage: python fasta_gc.py sequences.fa\n#\n\nimport sys\nimport time\n\ndef read_fasta_dict(filename):\n \"\"\"\n Reads sequences from a fasta file, and returns a dictionary that maps the \n sequence description (key) to the sequence (value) \n \n For example:\n \n >seq1\n AACCGG\n >seq2\n CCTTTG\n \n would result in {'seq1':'AACCGG','seq2':'CCTTG'}\n \n \"\"\"\n sequences = {}\n f = open(filename)\n for line in f:\n line = line.strip()\n if '>' in line:\n sequence_name = line # Need to keep track of the name since \"line\" will change next time\n sequences[sequence_name] = ''\n else:\n # Append to the last sequence\n sequences[sequence_name] = sequences[sequence_name] + line\n f.close()\n return sequences\n\ndef gc_content_percent(sequence):\n \"\"\"\n Calculates the GC-content percentage of the input sequence\n Returns the percentage as an integer out of 100\n \"\"\"\n gc = sequence.count('G') + sequence.count('C')\n atcg = len(sequence)\n percent_gc = (gc * 100) / atcg\n return percent_gc\n\ndef classify_percent(percent):\n if percent < 35:\n classification = 'Low'\n elif percent < 55:\n classification = 'Normal'\n else:\n classification = 'High'\n return classification\n\ndef main():\n # Make sure we have a file name\n if not len(sys.argv) == 2:\n print \"Usage: python\", sys.argv[0], \"<sequences.fa>\"\n exit(1)\n\n filename = sys.argv[1]\n\n # Read the sequences into a dictionary\n sequences = read_fasta_dict(filename)\n\n # Loop over the keys (sequence names) in the dictionary\n for name in sequences:\n sequence = sequences[name]\n percent = gc_content_percent(sequence)\n classification = classify_percent(percent)\n print percent, classification, name\n\n # Pause so slurm can record our memory usage\n time.sleep(40)\n\nmain()\n", "id": "859364", "language": "Python", "matching_score": 0.9937018752098083, "max_stars_count": 0, "path": "fasta_gc.py" }, { "content": "from Bio import pairwise2\nimport itertools\nfrom sys import argv\n\n#seq1 = \"ABCBCD\"\n#seq2 = \"BC\"\n#aln = pairwise2.align.globalxx(seq1,seq2)\n#print aln\n\n#cnt = 0\n#for opt in itertools.permutations('AABBCCDDEE', 5):\n# cnt += 1\n#print cnt\n#range = \n#for i in range(ord('A'), ord('C') + 1):\n# print unichr(i)\nletters = \"ACDEFGHIKL\"\n# 1234567890\ndef make_str(num, width):\n format_str = \"{0:0\" + str(width) + \"d}\"\n nums = [int(c) for c in format_str.format(num)]\n return ''.join([letters[val] for val in nums])\n\n#for opt in itertools.permutations('ABC', 1):\n# print opt\n\nfilename = argv[1]\nwidth = int(argv[2])\nwith open(filename, \"w\") as outfile:\n for i in range(10 ** width):\n seq1 = make_str(i, width)\n for j in range(10 ** width):\n seq2 = make_str(j, width)\n aln = pairwise2.align.globalxx(seq1,seq2)\n outfile.write(str(aln) + \"\\n\")\n", "id": "12543571", "language": "Python", "matching_score": 0.07120916992425919, "max_stars_count": 0, "path": "pairwise2.py" }, { "content": "from __future__ import print_function\nimport argparse\nimport json\nimport uuid\n\nPOLICY_VERSION = \"2012-10-17\"\n\nAWS_TYPE = \"AWS\"\nEMC_TYPE = \"EMC\"\nPOLICY_TYPES = (AWS_TYPE, EMC_TYPE)\n\nSET_ACL_PERMISSION = \"SET_ACL\"\nCOPY_PERMISSION = \"COPY\"\nPERMISSION_TYPES = (SET_ACL_PERMISSION, COPY_PERMISSION)\n\n# Permissions that can be applied to an object\nGET_OBJECT = \"s3:GetObject\"\nPUT_OBJECT = \"s3:PutObject\"\nGET_OBJECT_ACL = \"s3:GetObjectAcl\"\nPUT_OBJECT_ACL = \"s3:PutObjectAcl\"\nDELETE_OBJECT = \"s3:DeleteObject\"\n\n# Permissions that can be applied to a bucket\nDELETE_BUCKET = \"s3:DeleteBucket\"\nLIST_BUCKET = \"s3:ListBucket\"\nGET_BUCKET_ACL = \"s3:GetBucketAcl\"\nPUT_BUCKET_ACL = \"s3:PutBucketAcl\"\nGET_BUCKET_POLICY = \"s3:GetBucketPolicy\"\nPUT_BUCKET_POLICY = \"s3:PutBucketPolicy\"\nDELETE_BUCKET_POLICY = \"s3:DeleteBucketPolicy\"\n\n\ndef make_policy_id():\n return \"Policy{}\".format(uuid.uuid1())\n\n\ndef make_bucket_resource(type, bucket):\n if type == AWS_TYPE:\n return \"arn:aws:s3:::{}\".format(bucket)\n elif type == EMC_TYPE:\n return bucket\n raise NotImplemented(\"resource creation for {}\".format(type))\n\n\ndef make_principal(type, user):\n if type == AWS_TYPE:\n return {\n \"AWS\": [user]\n }\n elif type == EMC_TYPE:\n return user\n raise NotImplemented(\"make_principal creation for {}\".format(type))\n\n\ndef set_acl_permission(type, bucket, user):\n resource = make_bucket_resource(type, bucket)\n principal = make_principal(type, user)\n return [\n {\n \"Action\": [\n PUT_OBJECT_ACL\n ],\n \"Effect\": \"Allow\",\n \"Resource\": \"{}/*\".format(resource),\n \"Principal\": principal\n },\n {\n \"Action\": [\n LIST_BUCKET,\n PUT_OBJECT_ACL\n ],\n \"Effect\": \"Allow\",\n \"Resource\": resource,\n \"Principal\": principal\n }\n ]\n\n\ndef make_statements(type, bucket, permission, user):\n if permission == SET_ACL_PERMISSION:\n return set_acl_permission(type, bucket, user)\n raise NotImplemented(\"No support for type:{} permission:{}\".format(type, permission))\n\n\ndef get_bucket_suffix(permission):\n if permission == SET_ACL_PERMISSION:\n return \"/*\"\n return \"\"\n\n\ndef create_policy(type, bucket, permission, user):\n policy_id = make_policy_id()\n statements = make_statements(type, bucket, permission, user)\n\n policy = {\n \"Id\": policy_id,\n \"Version\": POLICY_VERSION,\n \"Statement\": statements\n }\n return json.dumps(policy, indent=4, separators=(',', ': '))\n\n\ndef create_argparser():\n parser = argparse.ArgumentParser(description=\"s3 policy generator tool.\")\n parser.add_argument(\"--bucket\", type=str, dest='bucket', required=True)\n parser.add_argument(\"--type\", type=str, dest='type', required=True, choices=POLICY_TYPES)\n parser.add_argument(\"--permission\", type=str, dest='permission', required=True, choices=PERMISSION_TYPES)\n parser.add_argument(\"--user\", type=str, dest='user', required=True)\n return parser\n\n\ndef main():\n parser = create_argparser()\n args = parser.parse_args()\n policy_str = create_policy(args.type, args.bucket, args.permission, args.user)\n print(policy_str)\n\n\nif __name__ == '__main__':\n main()\n", "id": "7610518", "language": "Python", "matching_score": 1.562625527381897, "max_stars_count": 0, "path": "policy.py" }, { "content": "import argparse\nimport googleapiclient.discovery\nimport google.auth\nfrom google.oauth2 import service_account\n\n\ndef get_credentials(service_account_filename=None):\n if service_account_filename:\n return service_account.Credentials.from_service_account_file(service_account_filename)\n else:\n credentials, _ = google.auth.default()\n return credentials\n\n\ndef create_file_store_api(credentials):\n return googleapiclient.discovery.build('file', 'v1', credentials=credentials)\n\n\nclass FileStoreSettings(object):\n def __init__(self, file_store_id, file_share_name, file_share_capacity_gb, tier=\"STANDARD\", network_name=\"default\"):\n self.id = file_store_id\n self.file_share_name = file_share_name\n self.file_share_capacity_gb = file_share_capacity_gb\n self.tier = tier\n self.network_name = network_name\n\n def body_dict(self):\n return {\n \"tier\": self.tier,\n \"fileShares\": [\n {\n \"name\": self.file_share_name,\n \"capacityGb\": self.file_share_capacity_gb\n }\n ],\n \"networks\": [\n {\"network\": self.network_name}\n ]\n }\n\n\ndef create_file_store_instance(file_store_api, project_id, region_zone, file_store_settings):\n parent = \"projects/{}/locations/{}\".format(project_id, region_zone)\n file_store_instances = file_store_api.projects().locations().instances()\n request = file_store_instances.create(\n parent=parent,\n instanceId=file_store_settings.id,\n body=file_store_settings.body_dict())\n return request.execute()\n\n\ndef delete_file_store_instance(file_store_api, project_id, region_zone, instance_id):\n name = \"projects/{}/locations/{}/instances/{}\".format(project_id, region_zone, instance_id)\n file_store_instances = file_store_api.projects().locations().instances()\n request = file_store_instances.delete(name=name)\n return request.execute()\n\n\ndef create_file_store(args):\n credentials = get_credentials(args.service_account_credential_file)\n file_store_api = create_file_store_api(credentials)\n file_store_settings = FileStoreSettings(\n args.file_store_id,\n args.file_share_name,\n args.file_share_capacity_gb,\n args.tier,\n args.network\n )\n instance = create_file_store_instance(file_store_api, args.project_id, args.region_zone, file_store_settings)\n print(\"Created file share {}\".format(instance))\n\n\ndef delete_file_store(args):\n credentials = get_credentials(args.service_account_credential_file)\n file_store_api = create_file_store_api(credentials)\n result = delete_file_store_instance(file_store_api, args.project_id, args.region_zone, args.file_store_id)\n print(\"Delete fs {}\".format(result))\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n\n create_parser = subparsers.add_parser('create', description='Create file share')\n create_parser.add_argument('--project_id', required=True)\n create_parser.add_argument('--region_zone', required=True)\n create_parser.add_argument('--service_account_credential_file')\n create_parser.add_argument('--file_store_id', required=True)\n create_parser.add_argument('--file_share_name', required=True)\n create_parser.add_argument('--file_share_capacity_gb', required=True)\n create_parser.add_argument('--tier', default='STANDARD')\n create_parser.add_argument('--network', default='default')\n create_parser.set_defaults(func=create_file_store)\n\n delete_parser = subparsers.add_parser('delete', description='Delete file share')\n delete_parser.add_argument('--project_id', required=True)\n delete_parser.add_argument('--region_zone', required=True)\n delete_parser.add_argument('--service_account_credential_file')\n delete_parser.add_argument('--file_store_id')\n delete_parser.set_defaults(func=delete_file_store)\n\n return parser\n\n\ndef main():\n parser = create_parser()\n parsed_args = parser.parse_args()\n if hasattr(parsed_args, 'func'):\n parsed_args.func(parsed_args)\n else:\n parser.print_help()\n\n\nif __name__ == '__main__':\n main()\n", "id": "2924143", "language": "Python", "matching_score": 0.9997861981391907, "max_stars_count": 0, "path": "run.py" }, { "content": "import os\nimport seq_exp.seq_exp as seq_exp\nimport unittest\nimport tempfile\nimport ast\n\nclass ProjectsTestCase(unittest.TestCase):\n def setUp(self):\n realapp, db = seq_exp.setup_api_and_db('sqlite:///:memory:')\n self.realapp = realapp\n self.db = db\n self.db.create_tables()\n self.db_fd, self.realapp.config['DATABASE'] = tempfile.mkstemp()\n self.realapp.config['TESTING'] = True\n self.app = self.realapp.test_client()\n\n def tearDown(self):\n self.db.close()\n os.close(self.db_fd)\n os.unlink(self.realapp.config['DATABASE'])\n\n def literal_eval(self, rv):\n resp_str = rv.data.decode(\"utf-8\")\n return ast.literal_eval(resp_str)\n\n def get_projects_data_and_count(self):\n rv = self.app.get('/projects')\n resp = self.literal_eval(rv)\n data = {}\n for project in resp['data']:\n data[project['id']] = project['title']\n return data, resp['count']\n\n def post_and_get_id(self, title):\n ret = self.app.post('/projects', data=dict(title=title))\n return self.literal_eval(ret)['id']\n\n def get_project_by_id(self, id):\n ret = self.app.get('/projects/' + str(id))\n if ret.status.startswith(\"404 \"):\n return None\n return self.literal_eval(ret)\n\n def put_project_by_id(self, id, data):\n self.app.put('/projects/' + str(id), data=data)\n\n def delete_project_by_id(self, id):\n self.app.delete('/projects/' + str(id))\n\n def test_empty_projects(self):\n # Test GET /projects.\n data, count = self.get_projects_data_and_count()\n print(count)\n self.assertEqual(count, 0)\n self.assertEqual(data, {})\n\n def test_add_one(self):\n # Test POST /projects.\n id = self.post_and_get_id(\"Test 1\")\n data, count = self.get_projects_data_and_count()\n self.assertEqual(count, 1)\n self.assertEqual(data[id], \"Test 1\")\n\n def test_add_two(self):\n # Test POST /projects twice.\n red_id = self.post_and_get_id(\"Red\")\n green_id = self.post_and_get_id(\"Green\")\n data, count = self.get_projects_data_and_count()\n self.assertEqual(count, 2)\n self.assertEqual(data[red_id], \"Red\")\n self.assertEqual(data[green_id], \"Green\")\n\n def test_add_two_remove_one(self):\n # Test DELETE /projects/<id>.\n red_id = self.post_and_get_id(\"Red\")\n green_id = self.post_and_get_id(\"Green\")\n self.delete_project_by_id(red_id)\n data, count = self.get_projects_data_and_count()\n self.assertEqual(count, 1)\n self.assertEqual(data[green_id], \"Green\")\n\n def test_add_then_get(self):\n # Test GET /projects/<id>.\n id = self.post_and_get_id(\"Test 1\")\n ret = self.get_project_by_id(id)\n self.assertEqual(ret['id'], id)\n self.assertEqual(ret['title'], \"Test 1\")\n\n def test_add_then_put_then_get(self):\n # Test PUT /projects/<id>.\n id = self.post_and_get_id(\"Test 1\")\n self.put_project_by_id(id, dict(title='Test 2'))\n ret = self.get_project_by_id(id)\n self.assertEqual(ret['id'], id)\n self.assertEqual(ret['title'], \"Test 2\")\n\n def test_get_invalid(self):\n ret = self.get_project_by_id(99)\n self.assertEqual(None, ret)\n\nif __name__ == '__main__':\n unittest.main()\n", "id": "6399675", "language": "Python", "matching_score": 4.3600029945373535, "max_stars_count": 0, "path": "seq_exp/test/test_projects.py" }, { "content": "import os\nimport seq_exp.seq_exp as seq_exp\nimport unittest\nimport tempfile\nimport ast\n\nclass EntrezTestCase(unittest.TestCase):\n def setUp(self):\n realapp, db = seq_exp.setup_api_and_db('sqlite:///:memory:')\n self.realapp = realapp\n self.db = db\n self.db_fd, realapp.config['DATABASE'] = tempfile.mkstemp()\n realapp.config['TESTING'] = True\n self.app = realapp.test_client()\n seq_exp.PROJECTS = {}\n\n def tearDown(self):\n self.db.close()\n os.close(self.db_fd)\n os.unlink(self.realapp.config['DATABASE'])\n\n def literal_eval(self, rv):\n resp_str = rv.data.decode(\"utf-8\")\n return ast.literal_eval(resp_str)\n\n def test_fetch_four_human_dna(self):\n #kind of fragile since relies upon external web server\n rv = self.app.get('/entrez/nucleotide', data=dict(term='human', retmax='4'))\n resp = self.literal_eval(rv)\n self.assertEqual(4, resp['count'])\n\n def test_fetch_five_mouse_protein(self):\n #kind of fragile since relies upon external web server\n rv = self.app.get('/entrez/protein', data=dict(term='mouse', retmax='5'))\n resp = self.literal_eval(rv)\n self.assertEqual(5, resp['count'])\n", "id": "2024432", "language": "Python", "matching_score": 0.26126062870025635, "max_stars_count": 0, "path": "seq_exp/test/test_entrez.py" }, { "content": "import click\nimport json\nfrom ddsc.sdk.client import Client as DukeDSClient\nfrom ddsc.core.upload import ProjectUpload\nfrom ddsc.core.remotestore import ProjectNameOrId\n\n\nclass UploadList(object):\n def __init__(self, cmdfile):\n data = json.load(cmdfile)\n self.destination = data['destination']\n self.paths = data['paths']\n\n\[email protected]()\[email protected]('cmdfile', type=click.File())\ndef upload_files(cmdfile):\n upload_list = UploadList(cmdfile)\n click.echo(\"Uploading {} paths to {}.\".format(len(upload_list.paths), upload_list.destination))\n dds_client = DukeDSClient()\n project_upload = ProjectUpload(dds_client.dds_connection.config,\n ProjectNameOrId.create_from_name(upload_list.destination),\n upload_list.paths)\n click.echo(project_upload.get_differences_summary())\n if project_upload.needs_to_upload():\n click.echo(\"Uploading\")\n project_upload.run()\n else:\n click.echo(\"Nothing needs to be done.\")\n\n\nif __name__ == '__main__':\n upload_files()\n", "id": "2365785", "language": "Python", "matching_score": 3.300424575805664, "max_stars_count": 0, "path": "staging/upload.py" }, { "content": "import click\nimport json\nfrom ddsc.sdk.client import Client as DukeDSClient\n\n\nclass DownloadList(object):\n def __init__(self, cmdfile):\n data = json.load(cmdfile)\n self.files = data['files']\n\n def items(self):\n items = []\n for file_data in self.files:\n key = file_data['key']\n dest = file_data['dest']\n items.append((key, dest))\n return items\n\n\[email protected]()\[email protected]('cmdfile', type=click.File())\ndef download_files(cmdfile):\n dds_client = DukeDSClient()\n download_list = DownloadList(cmdfile)\n items = download_list.items()\n click.echo(\"Downloading {} files.\".format(len(items)))\n for key, dest in items:\n click.echo(\"Downloading file {} to {}.\".format(key, dest))\n dds_file = dds_client.get_file_by_id(file_id=key)\n dds_file.download_to_path(dest)\n\n\nif __name__ == '__main__':\n download_files()\n", "id": "7997632", "language": "Python", "matching_score": 1.8921725749969482, "max_stars_count": 0, "path": "staging/download.py" } ]
2.312693
dgmz
[ { "content": "#!/usr/bin/python3\n\nimport tga\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\ndef line(buffer, x0, y0, x1, y1, color):\n\tsteep = False\n\tif abs(x0 - x1) < abs(y0 - y1):\n\t\tx0, y0 = y0, x0\n\t\tx1, y1 = y1, x1\n\t\tsteep = True\n\tif x0 > x1:\n\t\tx0, x1 = x1, x0\n\t\ty0, y1 = y1, y0\n\tfor x in range(x0, x1):\n\t\tt = (x - x0) / (x1 - x0)\n\t\ty = y0 * (1 - t) + y1 * t\n\t\tif steep:\n\t\t\tbuffer[int(x)][int(y)] = color\n\t\telse:\n\t\t\tbuffer[int(y)][int(x)] = color\n\n# new 100x100 black image\ndata = [\n\t[black for x in range(100)] for y in range(100)\n]\n\nline(data, 13, 20, 80, 40, white)\nline(data, 20, 13, 40, 80, red)\nline(data, 80, 0, 13, 20, blue)\n\ndata = data[::-1] # flip vertically\n# save as TGA\nimage = tga.Image(data)\nimage.save(\"lesson01-3.tga\")\n\nprint(\"DONE\")", "id": "4121813", "language": "Python", "matching_score": 3.103243589401245, "max_stars_count": 0, "path": "Python/lesson01-3.py" }, { "content": "#!/usr/bin/python3\n\nimport tga\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\ndef line(buffer, x0, y0, x1, y1, color):\n\tsteep = False\n\tif abs(x0 - x1) < abs(y0 - y1):\n\t\tx0, y0 = y0, x0\n\t\tx1, y1 = y1, x1\n\t\tsteep = True\n\tif x0 > x1:\n\t\tx0, x1 = x1, x0\n\t\ty0, y1 = y1, y0\n\tdx = x1 - x0\n\tdy = y1 - y0\n\tsy = 1 if y0 < y1 else -1\n\tderror2 = int(abs(dy) * 2)\n\terror2 = 0\n\ty = y0\n\tdx2 = int(dx * 2)\n\tfor x in range(x0, x1):\n\t\tif steep:\n\t\t\tbuffer[x][y] = color\n\t\telse:\n\t\t\tbuffer[y][x] = color\n\t\terror2 += derror2\n\t\tif dx < error2:\n\t\t\ty += sy\n\t\t\terror2 -= dx2\n\n# new 100x100 black image\ndata = [\n\t[black for x in range(100)] for y in range(100)\n]\n\nfor n in range(1000*1000):\n\tline(data, 13, 20, 80, 40, white)\n\tline(data, 20, 13, 40, 80, red)\n\tline(data, 80, 0, 13, 20, blue)\n\ndata = data[::-1] # flip vertically\n# save as TGA\nimage = tga.Image(data)\nimage.save(\"lesson01-5.tga\")\n\nprint(\"DONE\")", "id": "1414211", "language": "Python", "matching_score": 2.032752752304077, "max_stars_count": 0, "path": "Python/lesson01-5.py" }, { "content": "#!/usr/bin/python3\n\nimport tga\nimport wavefront\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\ndef line(buffer, x0, y0, x1, y1, color):\n\tsteep = False\n\tif abs(x0 - x1) < abs(y0 - y1):\n\t\tx0, y0 = y0, x0\n\t\tx1, y1 = y1, x1\n\t\tsteep = True\n\tif x0 > x1:\n\t\tx0, x1 = x1, x0\n\t\ty0, y1 = y1, y0\n\tdx = x1 - x0\n\tdy = y1 - y0\n\tsy = 1 if y0 < y1 else -1\n\tderror = abs(dy / dx) if dx else 1\n\terror = 0\n\ty = y0\n\tif steep:\n\t\tfor x in range(x0, x1):\n\t\t\tbuffer[int(x)][int(y)] = color\n\t\t\terror += derror\n\t\t\tif 0.5 < error:\n\t\t\t\ty += sy\n\t\t\t\terror -= 1\n\telse:\n\t\tfor x in range(x0, x1):\n\t\t\tbuffer[int(y)][int(x)] = color\n\t\t\terror += derror\n\t\t\tif 0.5 < error:\n\t\t\t\ty += sy\n\t\t\t\terror -= 1\n\nWIDTH = 800\nHEIGHT = 800\n# new 100x100 black image\ndata = [\n\t[black for x in range(WIDTH)] for y in range(HEIGHT)\n]\nmodel = wavefront.load_obj('african_head.obj')\nfor face in model.polygons:\n\tprint(\"face: \" + str(face))\n\tfor j in range(3):\n\t\tv0 = model.vertices[face[j][0]]\n\t\tv1 = model.vertices[face[(j + 1) % 3][0]]\n\t\tx0 = (v0[0] + 1) * (WIDTH - 1) / 2\n\t\ty0 = (v0[1] + 1) * (HEIGHT - 1) / 2\n\t\tx1 = (v1[0] + 1) * (WIDTH - 1) / 2\n\t\ty1 = (v1[1] + 1) * (HEIGHT - 1) / 2\n\t\tline(data, int(x0), int(y0), int(x1), int(y1), white)\n\ndata = data[::-1] # flip vertically\n# save as TGA\nimage = tga.Image(data)\nimage.save(\"lesson01-6.tga\")\n\nprint(\"DONE\")", "id": "9164854", "language": "Python", "matching_score": 3.2360734939575195, "max_stars_count": 0, "path": "Python/lesson01-6.py" }, { "content": "#!/usr/bin/python3\n\nimport tga\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\ndef line(buffer, x0, y0, x1, y1, color):\n\tfor x in range(x0, x1):\n\t\tt = (x - x0) / (x1 - x0)\n\t\ty = y0 * (1 - t) + y1 * t\n\t\tbuffer[int(y)][int(x)] = color\n\n# new 100x100 black image\ndata = [\n\t[black for x in range(100)] for y in range(100)\n]\n\nline(data, 13, 20, 80, 40, white)\nline(data, 20, 13, 40, 80, red)\nline(data, 80, 0, 13, 20, blue)\n\ndata = data[::-1] # flip vertically\n# save as TGA\nimage = tga.Image(data)\nimage.save(\"lesson01-2.tga\")\n\nprint(\"DONE\")", "id": "10080379", "language": "Python", "matching_score": 3.016906261444092, "max_stars_count": 0, "path": "Python/lesson01-2.py" }, { "content": "#!/usr/bin/python3\n\nimport tga\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\n\ndef frange(start, end, step):\n\tcount = int((end - start) / step)\n\tfor i in range(count):\n\t\tyield start + i * step\n\ndef line(buffer, x0, y0, x1, y1, color):\n\tfor t in frange(0, 1, 0.01):\n\t\tx = x0 + (x1 - x0) * t\n\t\ty = y0 + (y1 - y0) * t\n\t\tbuffer[int(y)][int(x)] = color\n\n# new 100x100 black image\ndata = [\n\t[black for x in range(100)] for y in range(100)\n]\n\nline(data, 10, 20, 80, 40, white)\n\ndata = data[::-1] # flip vertically\n# save as TGA\nimage = tga.Image(data)\nimage.save(\"lesson01-1.tga\")\n\nprint(\"DONE\")", "id": "10278567", "language": "Python", "matching_score": 2.5589098930358887, "max_stars_count": 0, "path": "Python/lesson01-1.py" }, { "content": "#!/usr/bin/python3\n\nimport tga\n\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\n\n# new 100x100 black image\ndata = [\n\t[black for x in range(100)] for y in range(100)\n]\ndata[10][41] = red # set pixel\ndata = data[::-1] # flip vertically\n\n# save as TGA\nimage = tga.Image(data)\nimage.save(\"lesson00.tga\")\n\nprint(\"DONE\")", "id": "8758161", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "Python/lesson00.py" }, { "content": "# wavefront.py\n#import numpy as np\n\nclass WavefrontOBJ:\n def __init__( self, default_mtl='default_mtl' ):\n self.path = None # path of loaded object\n self.mtllibs = [] # .mtl files references via mtllib\n self.mtls = [ default_mtl ] # materials referenced\n self.mtlid = [] # indices into self.mtls for each polygon\n self.vertices = [] # vertices as an Nx3 or Nx6 array (per vtx colors)\n self.normals = [] # normals\n self.texcoords = [] # texture coordinates\n self.polygons = [] # M*Nv*3 array, Nv=# of vertices, stored as vid,tid,nid (-1 for N/A)\n\ndef load_obj( filename: str, default_mtl='default_mtl', triangulate=False ) -> WavefrontOBJ:\n \"\"\"Reads a .obj file from disk and returns a WavefrontOBJ instance\n\n Handles only very rudimentary reading and contains no error handling!\n\n Does not handle:\n - relative indexing\n - subobjects or groups\n - lines, splines, beziers, etc.\n \"\"\"\n # parses a vertex record as either vid, vid/tid, vid//nid or vid/tid/nid\n # and returns a 3-tuple where unparsed values are replaced with -1\n def parse_vertex( vstr ):\n vals = vstr.split('/')\n vid = int(vals[0])-1\n tid = int(vals[1])-1 if len(vals) > 1 and vals[1] else -1\n nid = int(vals[2])-1 if len(vals) > 2 else -1\n return (vid,tid,nid)\n\n with open( filename, 'r' ) as objf:\n obj = WavefrontOBJ(default_mtl=default_mtl)\n obj.path = filename\n cur_mat = obj.mtls.index(default_mtl)\n for line in objf:\n toks = line.split()\n if not toks:\n continue\n if toks[0] == 'v':\n obj.vertices.append( [ float(v) for v in toks[1:]] )\n elif toks[0] == 'vn':\n obj.normals.append( [ float(v) for v in toks[1:]] )\n elif toks[0] == 'vt':\n obj.texcoords.append( [ float(v) for v in toks[1:]] )\n elif toks[0] == 'f':\n poly = [ parse_vertex(vstr) for vstr in toks[1:] ]\n if triangulate:\n for i in range(2,len(poly)):\n obj.mtlid.append( cur_mat )\n obj.polygons.append( (poly[0], poly[i-1], poly[i] ) )\n else:\n obj.mtlid.append(cur_mat)\n obj.polygons.append( poly )\n elif toks[0] == 'mtllib':\n obj.mtllibs.append( toks[1] )\n elif toks[0] == 'usemtl':\n if toks[1] not in obj.mtls:\n obj.mtls.append(toks[1])\n cur_mat = obj.mtls.index( toks[1] )\n return obj\n\ndef save_obj( obj: WavefrontOBJ, filename: str ):\n \"\"\"Saves a WavefrontOBJ object to a file\n\n Warning: Contains no error checking!\n\n \"\"\"\n with open( filename, 'w' ) as ofile:\n for mlib in obj.mtllibs:\n ofile.write('mtllib {}\\n'.format(mlib))\n for vtx in obj.vertices:\n ofile.write('v '+' '.join(['{}'.format(v) for v in vtx])+'\\n')\n for tex in obj.texcoords:\n ofile.write('vt '+' '.join(['{}'.format(vt) for vt in tex])+'\\n')\n for nrm in obj.normals:\n ofile.write('vn '+' '.join(['{}'.format(vn) for vn in nrm])+'\\n')\n if not obj.mtlid:\n obj.mtlid = [-1] * len(obj.polygons)\n #poly_idx = np.argsort( np.array( obj.mtlid ) )\n poly_idx = obj.mtlid\n cur_mat = -1\n for pid in poly_idx:\n if obj.mtlid[pid] != cur_mat:\n cur_mat = obj.mtlid[pid]\n ofile.write('usemtl {}\\n'.format(obj.mtls[cur_mat]))\n pstr = 'f '\n for v in obj.polygons[pid]:\n # UGLY!\n vstr = '{}/{}/{} '.format(v[0]+1,v[1]+1 if v[1] >= 0 else 'X', v[2]+1 if v[2] >= 0 else 'X' )\n vstr = vstr.replace('/X/','//').replace('/X ', ' ')\n pstr += vstr\n ofile.write( pstr+'\\n')", "id": "3309533", "language": "Python", "matching_score": 1, "max_stars_count": 0, "path": "Python/wavefront/wavefront.py" }, { "content": "from . wavefront import *", "id": "6011324", "language": "Python", "matching_score": 0.6729329824447632, "max_stars_count": 0, "path": "Python/wavefront/__init__.py" } ]
2.295831
tmierzwa
[ { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom app.engine.decoders.parameter import decode_parameters\nfrom app.engine.decoders.event import decode_event\nfrom app.engine.providers.semantics import get_semantics\n\n\ndef decode_trace(chain_id: str, block_hash: str, trace: dict, trace_id: str or None, indent: int) -> tuple:\n\n semantics = get_semantics(\n chain_id,\n trace[\"code_address\"],\n block_hash,\n )\n\n decoded_trace = dict()\n decoded_trace[\"trace_id\"] = trace_id\n decoded_trace[\"type\"] = 'DEPLOY' if trace[\"entry_point_type\"] == 'CONSTRUCTOR' else 'INVOKE_FUNCTION'\n decoded_trace[\"entry_point_type\"] = trace[\"entry_point_type\"]\n decoded_trace[\"caller\"] = trace[\"caller_address\"]\n decoded_trace[\"contract\"] = trace[\"code_address\"]\n\n if decoded_trace[\"type\"] == \"INVOKE_FUNCTION\":\n\n decoded_trace[\"code\"] = trace[\"code_address\"]\n\n decoded_trace['entry_point_selector'] = trace[\"selector\"]\n decoded_trace['entry_point_type'] = trace[\"entry_point_type\"]\n\n function_abi = semantics[\"abi\"][\"functions\"].get(trace[\"selector\"])\n if not function_abi and '__default__' in semantics[\"abi\"][\"functions\"]:\n function_abi = semantics[\"abi\"][\"functions\"]['__default__']\n if function_abi['inputs'][1]['name'] == 'calldata_size' or \\\n (len(trace['calldata']) > 2 and int(trace['calldata'][1], 16) != len(trace['calldata']) - 2):\n trace['calldata'].insert(0, hex(len(trace[\"calldata\"])))\n trace['calldata'].insert(0, trace[\"selector\"])\n function_abi['inputs'][1]['name'] = 'calldata_len'\n if function_abi['outputs'][0]['name'] == 'retdata_size' or \\\n (len(trace['result']) > 0 and int(trace['result'][0], 16) != len(trace['result']) - 2):\n function_abi['outputs'][0]['name'] = 'retdata_len'\n trace['result'].insert(0, hex(len(trace[\"result\"])))\n\n if function_abi:\n decoded_trace[\"function\"] = function_abi[\"name\"]\n decoded_trace[\"inputs\"] = decode_parameters(\n chain_id,\n trace[\"calldata\"],\n function_abi[\"inputs\"]\n )\n decoded_trace[\"outputs\"] = decode_parameters(\n chain_id,\n trace.get(\"result\", []),\n function_abi[\"outputs\"],\n )\n else:\n decoded_trace[\"function\"] = trace[\"selector\"]\n decoded_trace[\"inputs\"] = \\\n [dict(name=f'input_{i}', value=value)\n for i, value in enumerate(trace[\"calldata\"])]\n decoded_trace[\"outputs\"] = \\\n [dict(name=f'output_{i}', value=value)\n for i, value in enumerate(trace.get(\"result\", []))]\n\n elif decoded_trace[\"type\"] == \"DEPLOY\":\n\n decoded_trace['entry_point_selector'] = \"constructor\"\n decoded_trace['entry_point_type'] = \"CONSTRUCTOR\"\n\n if \"constructor\" in semantics[\"abi\"][\"functions\"]:\n function_abi = semantics[\"abi\"][\"functions\"][\"constructor\"]\n decoded_trace[\"function\"] = function_abi[\"name\"]\n decoded_trace[\"inputs\"] = decode_parameters(\n chain_id,\n trace['calldata'],\n function_abi[\"inputs\"]\n )\n decoded_trace[\"outputs\"] = []\n else:\n decoded_trace[\"function\"] = None\n decoded_trace[\"inputs\"] = []\n decoded_trace[\"outputs\"] = []\n\n else:\n print(\"Unknown type...\")\n\n decoded_events = []\n if 'events' in trace:\n for event in trace['events']:\n if len(event['keys']):\n if type(event['keys'][0]) == int:\n selector = hex(int(event['keys'][0]))\n else:\n selector = event['keys'][0]\n event_abi = semantics[\"abi\"][\"events\"].get(selector)\n else:\n event_abi = None\n decoded_events.append(decode_event(chain_id, trace['contract_address'], event, event_abi))\n\n decoded_trace['calls'] = []\n for i, sub_trace in enumerate(trace['internal_calls']):\n sub_trace_id = trace_id + '_' if trace_id else ''\n sub_trace_id += str(i)\n sub_calls, sub_events = decode_trace(chain_id, block_hash, sub_trace, sub_trace_id, indent + 1)\n decoded_trace['calls'].append(sub_calls)\n decoded_events += sub_events\n\n decoded_trace['execution_resources'] = trace.get('execution_resources', [])\n decoded_trace['indent'] = indent\n\n return decoded_trace, decoded_events\n", "id": "5673069", "language": "Python", "matching_score": 2.469238758087158, "max_stars_count": 2, "path": "stark_tx/app/app/engine/decoders/trace.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom app.engine.decoders.parameter import decode_parameters\n\n\ndef decode_event(chain_id, contract, event, event_abi):\n if event_abi and 'parameters' in event_abi:\n parameters = decode_parameters(chain_id, event[\"data\"], event_abi['parameters'])\n else:\n parameters = [dict(name=f'key_{i}', value=event['keys'][i]) for i in range(len(event['keys']))] + \\\n [dict(name=f'data_{i}', value=event['data'][i]) for i in range(len(event['data']))]\n\n decoded_event = dict(contract=contract, name=event_abi['name'] if event_abi else \"anonymous\",\n keys=event['keys'], parameters=parameters, order=event['order'])\n\n return decoded_event\n", "id": "727034", "language": "Python", "matching_score": 0.2769432067871094, "max_stars_count": 2, "path": "stark_tx/app/app/engine/decoders/event.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nfrom mongoengine import connect\nfrom pymongo import MongoClient\n\nfrom .decoders.abi.decoder import ABIDecoder\nfrom .decoders.decoder_service import DecoderService\nfrom .decoders.semantic.decoder import SemanticDecoder\nfrom .models.objects_model import Call\nfrom .providers.etherscan_provider import EtherscanProvider\nfrom .providers.semantic_providers.semantics_database import (\n MongoSemanticsDatabase,\n ISemanticsDatabase,\n)\nfrom .providers.semantic_providers.semantics_repository import SemanticsRepository\nfrom .providers.web3_provider import Web3Provider\nfrom .utils.validators import assert_tx_hash\n\n\nclass EthTxConfig:\n mongo_connection_string: str\n etherscan_api_key: str\n web3nodes: Dict[str, str]\n mongo_database: str\n etherscan_urls: Dict[str, str]\n default_chain: str\n\n def __init__(\n self,\n mongo_connection_string: str,\n mongo_database: str,\n web3nodes: Dict[str, str],\n etherscan_api_key: str,\n etherscan_urls: Dict[str, str],\n default_chain: str = \"mainnet\",\n ):\n self.mongo_connection_string = mongo_connection_string\n self.etherscan_api_key = etherscan_api_key\n self.web3nodes = web3nodes\n self.mongo_database = mongo_database\n self.default_chain = default_chain\n self.etherscan_urls = etherscan_urls\n\n\nclass EthTxDecoders:\n semantic_decoder: SemanticDecoder\n abi_decoder: ABIDecoder\n\n def __init__(\n self,\n semantic_decoder: SemanticDecoder,\n abi_decoder: ABIDecoder,\n decoder_service: DecoderService,\n ):\n self.semantic_decoder = semantic_decoder\n self.abi_decoder = abi_decoder\n self._decoder_service = decoder_service\n\n def decode_transaction(self, tx_hash: str, chain_id: str = None):\n assert_tx_hash(tx_hash)\n return self._decoder_service.decode_transaction(chain_id, tx_hash)\n\n def get_proxies(self, call_tree: Call):\n delegations = self._decoder_service.get_delegations(call_tree)\n return self._decoder_service.get_token_proxies(delegations)\n\n\nclass EthTxProviders:\n web3provider: Web3Provider\n\n def __init__(self, web3provider: Web3Provider):\n self.web3provider = web3provider\n\n\nclass EthTx:\n @staticmethod\n def initialize(config: EthTxConfig):\n default_chain = config.default_chain\n mongo_client: MongoClient = connect(\n config.mongo_database, host=config.mongo_connection_string\n )\n repository = MongoSemanticsDatabase(mongo_client.db)\n web3provider = Web3Provider(\n config.web3nodes, default_chain=config.default_chain\n )\n etherscan = EtherscanProvider(\n config.etherscan_api_key,\n config.etherscan_urls,\n default_chain_id=config.default_chain,\n )\n\n return EthTx(default_chain, web3provider, repository, etherscan)\n\n semantics: SemanticsRepository\n\n def __init__(\n self,\n default_chain: str,\n web3provider: Web3Provider,\n repository: ISemanticsDatabase,\n etherscan: EtherscanProvider,\n ):\n self._default_chain = default_chain\n self._semantics = SemanticsRepository(repository, etherscan, web3provider)\n abi_decoder = ABIDecoder(self.semantics, self._default_chain)\n semantic_decoder = SemanticDecoder(self.semantics, self._default_chain)\n decoder_service = DecoderService(\n abi_decoder, semantic_decoder, web3provider, self._default_chain\n )\n self._decoders = EthTxDecoders(semantic_decoder, abi_decoder, decoder_service)\n self._providers = EthTxProviders(web3provider)\n\n @property\n def decoders(self) -> EthTxDecoders:\n \"\"\"EthTx Decoders.\"\"\"\n return self._decoders\n\n @property\n def semantics(self) -> SemanticsRepository:\n \"\"\"EthTx Semantics Repository.\"\"\"\n return self._semantics\n\n @property\n def providers(self) -> EthTxProviders:\n \"\"\"EthTx Providers.\"\"\"\n return self._providers\n\n @property\n def default_chain(self) -> str:\n \"\"\"Default chain.\"\"\"\n return self._default_chain\n\n @default_chain.setter\n def default_chain(self, chain: str) -> None:\n \"\"\"Default chain setter.\"\"\"\n self._default_chain = chain\n", "id": "7427182", "language": "Python", "matching_score": 2.988565444946289, "max_stars_count": 1, "path": "ethtx/ethtx.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, List, Union\n\nfrom ..models.decoded_model import DecodedTransaction\nfrom ..models.objects_model import Block, Call\n\n\nclass DecoderService:\n def __init__(self, abi_decoder, semantic_decoder, web3provider, default_chain):\n self.abi_decoder = abi_decoder\n self.semantic_decoder = semantic_decoder\n self.web3provider = web3provider\n self.default_chain = default_chain\n\n def get_delegations(\n self,\n calls: Union[Call, List[Call]]\n ) -> Dict[str, set]:\n\n delegations = {}\n\n if not calls:\n return delegations\n\n if isinstance(calls, list):\n for call in calls:\n if call.call_type == \"delegatecall\":\n if call.from_address not in delegations:\n delegations[call.from_address] = set()\n delegations[call.from_address].add(call.to_address)\n else:\n calls_queue = [calls]\n\n while calls_queue:\n call = calls_queue.pop()\n for _, sub_call in enumerate(call.subcalls):\n calls_queue.insert(0, sub_call)\n\n if call.call_type == \"delegatecall\":\n if call.from_address not in delegations:\n delegations[call.from_address] = set()\n delegations[call.from_address].add(call.to_address)\n\n return delegations\n\n def get_token_proxies(self, delegations: Dict[str, set]) -> Dict[str, Dict]:\n token_proxies = {}\n\n for delegator in delegations:\n delegator_semantic = self.semantic_decoder.repository.get_token_data(\n self.default_chain, delegator\n )\n if (\n delegator_semantic[0] == delegator\n and delegator_semantic[1] == \"Unknown\"\n ):\n for delegate in delegations[delegator]:\n delegate_semantic = self.semantic_decoder.repository.get_token_data(\n self.default_chain, delegate\n )\n if (\n delegate_semantic[0] != delegate\n and delegate_semantic[1] != \"Unknown\"\n ):\n token_proxies[delegator] = delegate_semantic\n break\n elif all(delegator_semantic):\n token_proxies[delegator] = delegator_semantic\n\n return token_proxies\n\n def decode_transaction(self, chain_id: str, tx_hash: str) -> DecodedTransaction:\n\n # verify the transaction hash\n tx_hash = tx_hash if tx_hash.startswith(\"0x\") else \"0x\" + tx_hash\n\n chain_id = chain_id or self.default_chain\n\n # read a raw transaction from a node\n transaction = self.web3provider.get_full_transaction(\n tx_hash=tx_hash, chain_id=chain_id\n )\n # read a raw block from a node\n block = Block.from_raw(\n w3block=self.web3provider.get_block(transaction.metadata.block_number),\n chain_id=chain_id,\n )\n\n # prepare lists of delegations to properly decode delegate-calling contracts\n delegations = self.get_delegations(transaction.root_call)\n token_proxies = self.get_token_proxies(delegations)\n\n # decode transaction using ABI\n abi_decoded_tx = self.abi_decoder.decode_transaction(\n block=block,\n transaction=transaction,\n delegations=delegations,\n token_proxies=token_proxies,\n chain_id=chain_id,\n )\n\n # decode transaction using additional semantics\n semantically_decoded_tx = self.semantic_decoder.decode_transaction(\n block=block.metadata,\n transaction=abi_decoded_tx,\n token_proxies=token_proxies,\n chain_id=chain_id,\n )\n\n return semantically_decoded_tx\n", "id": "10740080", "language": "Python", "matching_score": 3.4529004096984863, "max_stars_count": 1, "path": "ethtx/decoders/decoder_service.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport traceback\nfrom typing import Optional, Dict, List\n\nfrom ethtx.models.decoded_model import (\n DecodedTransaction,\n DecodedCall,\n DecodedEvent,\n DecodedTransfer,\n)\nfrom ethtx.models.objects_model import (\n Block,\n BlockMetadata,\n Transaction,\n TransactionMetadata,\n Call,\n Event,\n)\nfrom ethtx.utils.measurable import ExecutionTimer\nfrom .abc import IABIDecoder\nfrom .balances import ABIBalancesDecoder\nfrom .calls import ABICallsDecoder\nfrom .events import ABIEventsDecoder\nfrom .transfers import ABITransfersDecoder\n\nlog = logging.getLogger(__name__)\n\n\nclass ABIDecoder(IABIDecoder):\n def decode_transaction(\n self,\n block: Block,\n transaction: Transaction,\n chain_id: str,\n delegations: Optional[Dict[str, set]] = None,\n token_proxies: Optional[Dict[str, dict]] = None,\n ) -> Optional[DecodedTransaction]:\n\n log.info(\"ABI decoding for %s / %s.\", transaction.metadata.tx_hash, chain_id)\n\n try:\n with ExecutionTimer(f\"ABI decoding for \" + transaction.metadata.tx_hash):\n log.info(\n \"ABI decoding for %s / %s.\", transaction.metadata.tx_hash, chain_id\n )\n full_decoded_transaction = self._decode_transaction(\n block.metadata, transaction, chain_id, delegations, token_proxies\n )\n return full_decoded_transaction\n except Exception as e:\n log.warning(\n \"ABI decoding of %s / %s failed.\",\n transaction.metadata.tx_hash,\n chain_id,\n )\n traceback.print_exc(e)\n return None\n\n def decode_calls(\n self,\n root_call: Call,\n block: BlockMetadata,\n transaction: TransactionMetadata,\n delegations: Optional[Dict[str, set]] = None,\n token_proxies: Optional[Dict[str, dict]] = None,\n chain_id: Optional[str] = None,\n ) -> Optional[DecodedCall]:\n return ABICallsDecoder(\n repository=self._repository, chain_id=chain_id or self._default_chain\n ).decode(\n call=root_call,\n block=block,\n transaction=transaction,\n delegations=delegations,\n token_proxies=token_proxies,\n chain_id=chain_id or self._default_chain\n )\n\n def decode_call(\n self,\n root_call: Call,\n block: BlockMetadata,\n transaction: TransactionMetadata,\n delegations: Optional[Dict[str, set]] = None,\n token_proxies: Optional[Dict[str, dict]] = None,\n ) -> Optional[DecodedCall]:\n return ABICallsDecoder(\n repository=self._repository, chain_id=self._default_chain\n ).decode(\n call=root_call,\n block=block,\n transaction=transaction,\n delegations=delegations,\n token_proxies=token_proxies,\n )\n\n def decode_events(\n self,\n events: [Event],\n block: BlockMetadata,\n transaction: TransactionMetadata,\n delegations: Optional[Dict[str, set]] = None,\n token_proxies: Optional[Dict[str, dict]] = None,\n chain_id: Optional[str] = None,\n ) -> List[DecodedEvent]:\n return ABIEventsDecoder(\n repository=self._repository, chain_id=chain_id or self._default_chain\n ).decode(\n events=events,\n block=block,\n transaction=transaction,\n delegations=delegations or {},\n token_proxies=token_proxies or {},\n chain_id=chain_id or self._default_chain\n )\n\n def decode_event(\n self,\n events: Event,\n block: BlockMetadata,\n transaction: TransactionMetadata,\n delegations: Optional[Dict[str, set]] = None,\n token_proxies: Optional[Dict[str, dict]] = None,\n chain_id: Optional[str] = None,\n ) -> DecodedEvent:\n return ABIEventsDecoder(\n repository=self._repository, chain_id=chain_id or self._default_chain\n ).decode(\n events=events,\n block=block,\n transaction=transaction,\n delegations=delegations or {},\n token_proxies=token_proxies or {},\n chain_id=chain_id or self._default_chain\n )\n\n def decode_transfers(\n self,\n call: DecodedCall,\n events: List[DecodedEvent],\n token_proxies: Optional[Dict[str, dict]] = None,\n chain_id: Optional[str] = None,\n ):\n return ABITransfersDecoder(\n repository=self._repository, chain_id=chain_id or self._default_chain\n ).decode(\n call=call,\n events=events,\n token_proxies=token_proxies or {},\n )\n\n def decode_balances(self, transfers: List[DecodedTransfer]):\n return ABIBalancesDecoder(\n repository=self._repository, chain_id=self._default_chain\n ).decode(transfers=transfers)\n\n def _decode_transaction(\n self,\n block: BlockMetadata,\n transaction: Transaction,\n chain_id: str,\n delegations: Optional[Dict[str, set]] = None,\n token_proxies: Optional[Dict[str, dict]] = None,\n ) -> DecodedTransaction:\n\n full_decoded_transaction = DecodedTransaction(\n block_metadata=block,\n tx_metadata=transaction.metadata,\n events=[],\n calls=None,\n transfers=[],\n balances=[],\n )\n\n self._repository.record()\n\n try:\n full_decoded_transaction.events = self.decode_events(\n transaction.events,\n block,\n transaction.metadata,\n delegations,\n token_proxies,\n chain_id\n )\n except Exception as e:\n log.warning(\n \"ABI decoding of events for %s / %s failed.\",\n transaction.metadata.tx_hash,\n chain_id,\n )\n log.warning(e)\n return full_decoded_transaction\n\n try:\n full_decoded_transaction.calls = self.decode_calls(\n transaction.root_call,\n block,\n transaction.metadata,\n delegations,\n token_proxies,\n chain_id\n )\n except Exception as e:\n log.warning(\n \"ABI decoding of calls tree for %s / %s failed.\",\n transaction.metadata.tx_hash,\n chain_id,\n )\n log.warning(e)\n return full_decoded_transaction\n\n try:\n full_decoded_transaction.transfers = self.decode_transfers(\n full_decoded_transaction.calls,\n full_decoded_transaction.events,\n token_proxies,\n chain_id\n )\n except Exception as e:\n log.warning(\n \"ABI decoding of transfers for %s / %s failed.\",\n transaction.metadata.tx_hash,\n chain_id,\n )\n log.warning(e)\n return full_decoded_transaction\n\n try:\n full_decoded_transaction.balances = self.decode_balances(\n full_decoded_transaction.transfers\n )\n except Exception as e:\n log.warning(\n \"ABI decoding of balances for %s / %s failed.\",\n transaction.metadata.tx_hash,\n chain_id,\n )\n log.warning(e)\n return full_decoded_transaction\n\n used_semantics = self._repository.end_record()\n log.info(\n f\"Semantics used in decoding {transaction.metadata.tx_hash}: \"\n + \", \".join(used_semantics)\n )\n\n full_decoded_transaction.status = True\n\n return full_decoded_transaction\n", "id": "5456134", "language": "Python", "matching_score": 4.7704901695251465, "max_stars_count": 1, "path": "ethtx/decoders/abi/decoder.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom typing import Optional, Any, List, Dict\n\nfrom ethtx.models.decoded_model import DecodedCall, DecodedTransfer\nfrom ethtx.models.objects_model import Block, Transaction, Call, Event, TransactionMetadata, BlockMetadata\nfrom ethtx.providers.semantic_providers.semantics_repository import SemanticsRepository\n\n\nclass ABIBasic:\n def __init__(self, repository: SemanticsRepository, chain_id: str):\n self._default_chain = chain_id\n self._repository: SemanticsRepository = repository\n\n\nclass ABISubmoduleAbc(ABC, ABIBasic):\n @abstractmethod\n def decode(self, *args, **kwargs) -> Any:\n ...\n\n\nclass IABIDecoder(ABC, ABIBasic):\n def __init__(\n self,\n repository: SemanticsRepository,\n chain_id: str,\n strict: Optional[bool] = False,\n ):\n super().__init__(repository, chain_id)\n self.strict = strict\n\n @abstractmethod\n def decode_transaction(\n self,\n block: Block,\n transaction: Transaction,\n delegations: Dict[str, set],\n token_proxies: Dict[str, dict],\n ):\n ...\n\n @abstractmethod\n def decode_calls(\n self,\n call: Call,\n block: BlockMetadata,\n transaction: TransactionMetadata,\n delegations: Dict[str, set],\n token_proxies: Dict[str, dict]\n ) -> ABISubmoduleAbc.decode:\n ...\n\n @abstractmethod\n def decode_events(\n self,\n events: [Event],\n block: BlockMetadata,\n transaction: TransactionMetadata,\n delegations: Dict[str, set],\n token_proxies: Dict[str, dict],\n ) -> ABISubmoduleAbc.decode:\n ...\n\n @abstractmethod\n def decode_transfers(\n self,\n call: DecodedCall,\n events: [Event],\n token_proxies: Dict[str, dict]\n ) -> ABISubmoduleAbc.decode:\n ...\n\n @abstractmethod\n def decode_balances(\n self,\n transfers: List[DecodedTransfer]\n ) -> ABISubmoduleAbc.decode:\n ...\n", "id": "6053992", "language": "Python", "matching_score": 1.5715994834899902, "max_stars_count": 1, "path": "ethtx/decoders/abi/abc.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom datetime import datetime\nfrom typing import List, Any, Optional\n\nfrom ethtx.models.objects_model import BlockMetadata, TransactionMetadata\nfrom ethtx.utils.pickable import JsonObject\n\n\nclass AddressInfo(JsonObject):\n address: str\n name: str\n badge: Optional[str]\n\n def __init__(self, address: str, name: str, badge: Optional[str] = \"\"):\n self.address = address\n self.name = name\n self.badge = badge\n\n def __eq__(self, other):\n if isinstance(other, AddressInfo):\n return (\n self.address == other.address\n and self.name == other.name\n and self.badge == other.badge\n )\n return False\n\n\nclass DecodedTransactionMetadata(JsonObject):\n chain_id: str\n tx_hash: str\n block_number: int\n block_hash: str\n timestamp: datetime\n gas_price: int\n sender: AddressInfo\n receiver: AddressInfo\n tx_index: int\n tx_value: int\n eth_price: float\n gas_limit: int\n gas_used: int\n success: bool\n\n def __init__(\n self,\n chain_id: str,\n tx_hash: str,\n block_number: int,\n block_hash: str,\n timestamp: datetime,\n gas_price: int,\n sender: AddressInfo,\n receiver: AddressInfo,\n tx_index: int,\n tx_value: int,\n eth_price: float,\n gas_limit: int,\n gas_used: int,\n success: bool,\n ):\n self.chain_id = chain_id\n self.tx_hash = tx_hash\n self.block_number = block_number\n self.block_hash = block_hash\n self.timestamp = timestamp\n self.gas_price = gas_price\n self.sender = sender\n self.receiver = receiver\n self.tx_index = tx_index\n self.tx_value = tx_value\n self.eth_price = eth_price\n self.gas_limit = gas_limit\n self.gas_used = gas_used\n self.success = success\n\n\nclass Argument(JsonObject):\n name: str\n type: str\n value: Any\n\n def __init__(self, name: str, type: str, value: Any):\n self.name = name\n self.type = type\n self.value = value\n\n def __eq__(self, other):\n if isinstance(other, Argument):\n return (\n self.name == other.name\n and self.type == other.type\n and self.value == other.value\n )\n return False\n\n\nclass DecodedEvent(JsonObject):\n chain_id: str\n tx_hash: str\n timestamp: datetime\n contract: AddressInfo\n index: int\n call_id: str\n event_signature: str\n event_name: str\n parameters: List[Argument]\n\n def __init__(\n self,\n chain_id: str,\n tx_hash: str,\n timestamp: datetime,\n contract_address: str,\n contract_name: str,\n index: int,\n call_id: str,\n event_signature: str,\n event_name: str,\n parameters: List[Argument],\n ):\n self.chain_id = chain_id\n self.tx_hash = tx_hash\n self.timestamp = timestamp\n self.contract = AddressInfo(contract_address, contract_name)\n self.contract_name = contract_name\n self.index = index\n self.call_id = call_id\n self.event_signature = event_signature\n self.event_name = event_name\n self.parameters = parameters\n\n def __eq__(self, other):\n if isinstance(other, DecodedEvent):\n return (\n self.chain_id == other.chain_id\n and self.tx_hash == other.tx_hash\n and self.timestamp == other.timestamp\n and self.contract == other.contract\n and self.contract_name == other.contract_name\n and self.index == other.index\n and self.call_id == other.call_id\n and self.event_signature == other.event_signature\n and self.event_name == other.event_name\n and self.parameters == other.parameters\n )\n return False\n\n\nclass DecodedCall(JsonObject):\n chain_id: str\n timestamp: datetime\n tx_hash: str\n call_id: str\n call_type: str\n from_address: AddressInfo\n to_address: AddressInfo\n value: int\n function_signature: str\n function_name: str\n arguments: List[Argument]\n outputs: List[Argument]\n gas_used: int\n error: str\n status: bool\n subcalls: List[DecodedCall]\n\n def __init__(\n self,\n chain_id: str,\n tx_hash: str,\n timestamp: datetime,\n call_id: str,\n call_type: str,\n from_address: str,\n from_name: str,\n to_address: str,\n to_name: str,\n value: int,\n function_signature: str,\n function_name: str,\n arguments: List[Argument],\n outputs: List[Argument],\n gas_used: int,\n error: str,\n status: bool,\n indent: int,\n subcalls: Optional[List[DecodedCall]] = None,\n ):\n self.chain_id = chain_id\n self.tx_hash = tx_hash\n self.timestamp = timestamp\n self.call_id = call_id\n self.call_type = call_type\n self.from_address = AddressInfo(from_address, from_name)\n self.to_address = AddressInfo(to_address, to_name)\n self.to_name = to_name\n self.value = value\n self.function_signature = function_signature\n self.function_name = function_name\n self.arguments = arguments\n self.outputs = outputs\n self.gas_used = gas_used\n self.error = error\n self.status = status\n self.indent = indent\n self.subcalls = subcalls if subcalls else []\n\n def __eq__(self, other):\n if isinstance(other, DecodedCall):\n return (\n self.chain_id == other.chain_id\n and self.tx_hash == other.tx_hash\n and self.timestamp == other.timestamp\n and self.call_type == other.call_type\n and self.from_address == other.from_address\n and self.to_address == other.to_address\n and self.to_name == other.to_name\n and self.value == other.value\n and self.function_signature == other.function_signature\n and self.function_name == other.function_name\n and self.arguments == other.arguments\n and self.outputs == other.outputs\n and self.gas_used == other.gas_used\n and self.error == other.error\n and self.status == other.status\n and self.subcalls == other.subcalls\n )\n\n return False\n\n\nclass DecodedTransfer(JsonObject):\n from_address: AddressInfo\n to_address: AddressInfo\n token_address: Optional[str]\n token_symbol: str\n token_standard: Optional[str]\n value: float\n\n def __init__(\n self,\n from_address: AddressInfo,\n to_address: AddressInfo,\n token_standard: Optional[str],\n token_address: Optional[str],\n token_symbol: str,\n value: float,\n ):\n self.from_address = from_address\n self.to_address = to_address\n self.token_address = token_address\n self.token_symbol = token_symbol\n self.token_standard = token_standard\n self.value = value\n\n\nclass DecodedBalance(JsonObject):\n holder: AddressInfo\n tokens: List[dict]\n\n def __init__(self, holder: AddressInfo, tokens: List[dict]):\n self.holder = holder\n self.tokens = tokens\n\n\nclass DecodedTransaction(JsonObject):\n block_metadata: BlockMetadata\n metadata: TransactionMetadata\n events: List[DecodedEvent]\n calls: Optional[DecodedCall]\n transfers: List[DecodedTransfer]\n balances: List[DecodedBalance]\n status: bool\n\n def __init__(\n self,\n block_metadata: BlockMetadata,\n tx_metadata: TransactionMetadata,\n events: List[DecodedEvent],\n calls: Optional[DecodedCall],\n transfers: List[DecodedTransfer],\n balances: List[DecodedBalance],\n ):\n self.block_metadata = block_metadata\n self.metadata = tx_metadata\n self.events = events\n self.calls = calls\n self.transfers = transfers\n self.balances = balances\n self.status = False\n", "id": "9449986", "language": "Python", "matching_score": 4.105271816253662, "max_stars_count": 1, "path": "ethtx/models/decoded_model.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ethtx.models.decoded_model import DecodedTransactionMetadata, AddressInfo\nfrom ethtx.models.objects_model import BlockMetadata, TransactionMetadata\nfrom .abc import SemanticSubmoduleAbc\nfrom .helpers.utils import get_eth_price\n\n\nclass SemanticMetadataDecoder(SemanticSubmoduleAbc):\n \"\"\"Semantic Metadata Decoder.\"\"\"\n\n def decode(\n self,\n block_metadata: BlockMetadata,\n tx_metadata: TransactionMetadata,\n chain_id: str,\n ) -> DecodedTransactionMetadata:\n \"\"\"Semantically decode metadata.\"\"\"\n\n decoded_metadata = DecodedTransactionMetadata(\n chain_id=chain_id,\n tx_hash=tx_metadata.tx_hash,\n block_number=block_metadata.block_number,\n block_hash=block_metadata.block_hash,\n timestamp=block_metadata.timestamp,\n gas_price=tx_metadata.gas_price / 10 ** 9,\n sender=AddressInfo(\n tx_metadata.from_address,\n self.repository.get_address_label(chain_id, tx_metadata.from_address),\n \"sender\",\n ),\n receiver=AddressInfo(\n tx_metadata.to_address,\n self.repository.get_address_label(chain_id, tx_metadata.to_address),\n \"receiver\",\n ),\n tx_index=tx_metadata.tx_index,\n tx_value=tx_metadata.tx_value,\n eth_price=get_eth_price(),\n gas_limit=tx_metadata.gas_limit,\n gas_used=tx_metadata.gas_used,\n success=tx_metadata.success,\n )\n\n return decoded_metadata\n", "id": "11791950", "language": "Python", "matching_score": 2.7013137340545654, "max_stars_count": 1, "path": "ethtx/decoders/semantic/metadata.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom typing import List\n\nfrom hexbytes import HexBytes\n\nfrom ethtx.models.objects_model import BlockMetadata, TransactionMetadata, Event, Call\n\n\n@dataclass\nclass W3Block:\n chain_id: str\n difficulty: int\n extraData: HexBytes\n gasLimit: int\n gasUsed: int\n hash: HexBytes\n logsBloom: HexBytes\n miner: str\n nonce: HexBytes\n number: int\n parentHash: HexBytes\n receiptsRoot: HexBytes\n sha3Uncles: HexBytes\n size: int\n stateRoot: HexBytes\n timestamp: int\n totalDifficulty: int\n transactions: List\n transactionsRoot: HexBytes\n uncles: List\n\n def to_object(self) -> BlockMetadata:\n block_hash = self.hash.hex()\n timestamp = datetime.utcfromtimestamp(self.timestamp)\n parent_hash = self.parentHash.hex()\n miner = self.miner.lower()\n gas_limit = self.gasLimit\n gas_used = self.gasUsed\n tx_count = len(self.transactions)\n\n return BlockMetadata(\n block_number=self.number,\n block_hash=block_hash,\n timestamp=timestamp,\n parent_hash=parent_hash,\n miner=miner,\n gas_limit=gas_limit,\n gas_used=gas_used,\n tx_count=tx_count,\n )\n\n\n@dataclass\nclass W3Transaction:\n chain_id: str\n blockHash: str\n blockNumber: int\n from_address: str\n gas: int\n gasPrice: int\n hash: HexBytes\n input: str\n nonce: int\n r: HexBytes\n s: HexBytes\n to: str\n transactionIndex: int\n v: int\n value: int\n\n def to_object(self, w3receipt: W3Receipt) -> TransactionMetadata:\n tx_hash = self.hash.hex()\n block_number = self.blockNumber\n tx_index = self.transactionIndex\n from_address = self.from_address.lower()\n to_address = self.to.lower() if self.to else w3receipt.contractAddress.lower() if w3receipt.contractAddress else None\n tx_value = self.value\n gas_limit = self.gas\n gas_price = self.gasPrice\n gas_used = w3receipt.gasUsed\n success = w3receipt.status == 1\n\n return TransactionMetadata(\n tx_hash=tx_hash,\n block_number=block_number,\n tx_index=tx_index,\n from_address=from_address,\n to_address=to_address,\n tx_value=tx_value,\n gas_limit=gas_limit,\n gas_price=gas_price,\n gas_used=gas_used,\n success=success,\n )\n\n\n@dataclass\nclass W3Receipt:\n tx_hash: str\n chain_id: str\n blockHash: HexBytes\n blockNumber: int\n contractAddress: str\n cumulativeGasUsed: int\n from_address: str\n gasUsed: int\n logsBloom: HexBytes\n root: str\n status: int\n to_address: str\n transactionHash: HexBytes\n transactionIndex: int\n logs: list = field(default_factory=list)\n\n\n@dataclass\nclass W3Log:\n tx_hash: str\n chain_id: str\n address: str\n blockHash: HexBytes\n blockNumber: int\n data: str\n logIndex: int\n removed: bool\n topics: List[HexBytes]\n transactionHash: HexBytes\n transactionIndex: int\n\n def to_object(self) -> Event:\n contract = self.address.lower()\n log_index = self.logIndex\n log_data = self.data\n topics = []\n\n for i in range(len(self.topics)):\n topics.append(self.topics[i].hex())\n\n return Event(\n contract=contract, topics=topics, log_data=log_data, log_index=log_index\n )\n\n\n@dataclass\nclass W3CallTree:\n tx_hash: str\n chain_id: str\n type: str\n from_address: str\n to_address: str\n input: str\n output: str\n value: str = None\n time: str = None\n gas: str = None\n gasUsed: str = None\n error: str = None\n calls: list = field(default_factory=list)\n\n def to_object(self) -> Call:\n from_address = self.from_address\n to_address = self.to_address\n call_value = int(self.value, 16) if self.value else 0\n call_type = self.type.lower()\n call_data = self.input\n return_value = self.output\n gas_used = int(self.gasUsed, 16) if self.gasUsed else None\n call_gas = int(self.gas, 16) if self.gas else None\n status = self.error is None\n error = self.error\n\n call = Call(\n call_type=call_type,\n from_address=from_address,\n to_address=to_address,\n call_value=call_value,\n call_data=call_data,\n return_value=return_value,\n gas_used=gas_used,\n call_gas=call_gas,\n status=status,\n error=error,\n )\n\n for child_call in self.calls:\n call.subcalls.append(child_call.to_object())\n\n return call\n", "id": "12331444", "language": "Python", "matching_score": 4.92221736907959, "max_stars_count": 1, "path": "ethtx/models/w3_model.py" }, { "content": "from typing import Dict\n\nfrom hexbytes import HexBytes\n\nfrom ethtx.models.w3_model import W3Transaction, W3Receipt, W3Block, W3Log\nfrom ethtx.utils.attr_dict import AttrDict\n\n\nclass MockWeb3Provider:\n blocks = {\n 1: {\n \"difficulty\": 123, # int\n \"extraData\": \"test\", # HexBytes\n \"gasLimit\": 123, # int\n \"gasUsed\": 123, # int\n \"hash\": HexBytes(\n b\"\\x88\\xe9mE7\\xbe\\xa4\\xd9\\xc0]\\x12T\\x99\\x07\\xb3%a\\xd3\\xbf1\\xf4Z\\xaesL\\xdc\\x11\\x9f\\x13@l\\xb6\"\n ), # str\n \"logsBloom\": \"test\", # HexBytes\n \"miner\": \"test\", # str\n \"nonce\": \"test\", # HexBytes\n \"number\": 123, # int\n \"parentHash\": HexBytes(\n b\"\\x88\\xe9mE7\\xbe\\xa4\\xd9\\xc0]\\x12T\\x99\\x07\\xb3%a\\xd3\\xbf1\\xf4Z\\xaesL\\xdc\\x11\\x9f\\x13@l\\xb6\"\n ), # str\n \"receiptsRoot\": \"test\", # HexBytes\n \"sha3Uncles\": \"test\", # HexBytes\n \"size\": 123, # int\n \"stateRoot\": \"test\", # HexBytes\n \"timestamp\": 123, # int,\n \"totalDifficulty\": 123, # int\n \"transactions\": [], # List\n \"transactionsRoot\": \"test\", # HexBytes\n \"uncles\": [], # List\n }\n }\n\n txs = {\n \"0xd7701a0fc05593aee3a16f20cab605db7183f752ae942cc75fd0975feaf1072e\": {\n \"blockHash\": HexBytes(\n b\"\\x88\\xe9mE7\\xbe\\xa4\\xd9\\xc0]\\x12T\\x99\\x07\\xb3%a\\xd3\\xbf1\\xf4Z\\xaesL\\xdc\\x11\\x9f\\x13@l\\xb6\"\n ), # str\n \"blockNumber\": 1, # int\n \"from_address\": \"fromasd\", # str\n \"gas\": 420, # int\n \"gasPrice\": 1, # int\n \"hash\": HexBytes(\n b\"\\x88\\xe9mE7\\xbe\\xa4\\xd9\\xc0]\\x12T\\x99\\x07\\xb3%a\\xd3\\xbf1\\xf4Z\\xaesL\\xdc\\x11\\x9f\\x13@l\\xb6\"\n ), # HexBytes,\n \"input\": \"jeszcze jak\", # str\n \"nonce\": 123, # int\n \"r\": \"ds\", # HexBytes\n \"s\": \"sdf\", # HexBytes\n \"to\": \"sdf\", # str\n \"transactionIndex\": 1, # int\n \"v\": 1, # int\n \"value\": 1, # int\n }\n }\n\n def add_mocked_block_details(self, block_number, block_details: Dict):\n self.blocks[block_number] = block_details\n\n def get_transaction(self, tx_hash, chain_id=\"mainnet\"):\n return W3Transaction(chain_id=chain_id, **self.txs[tx_hash])\n\n def get_receipt(self, tx_hash, chain_id):\n log_values = AttrDict(\n {\n \"tx_hash\": tx_hash,\n \"chain_id\": chain_id,\n \"address\": \"test\", # str\n \"blockHash\": \"test\", # HexBytes\n \"blockNumber\": 123, # int\n \"data\": \"test\", # str\n \"logIndex\": 132, # int\n \"removed\": False, # bool,\n \"topics\": [HexBytes(\"d\")], # List[HexBytes]\n \"transactionHash\": \"test\", # HexBytes\n \"transactionIndex\": 123, # int\n }\n )\n\n log = W3Log(**log_values)\n values = {\n \"blockHash\": \"test\", # HexBytes\n \"blockNumber\": 123, # int\n \"contractAddress\": 123, # str\n \"cumulativeGasUsed\": 132, # int,\n \"from_address\": \"from\", # str\n \"gasUsed\": 123, # int\n \"logs\": [log], # List\n \"logsBloom\": \"test\", # HexBytes\n \"root\": \"test\", # str\n \"status\": 123, # int,\n \"to_address\": \"test\", # str\n \"transactionHash\": \"test\", # HexBytes\n \"transactionIndex\": 123, # int\n }\n return W3Receipt(tx_hash, chain_id, **values)\n\n def get_block(self, block_number: int, chain_id: str = None) -> W3Block:\n return W3Block(chain_id=chain_id, **self.blocks[block_number])\n", "id": "1294945", "language": "Python", "matching_score": 0.39726492762565613, "max_stars_count": 1, "path": "tests/mocks/web3provider.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef print_transaction(tx):\n log.info(\n \"Block: %s/%s Time: %s\", tx[\"block_hash\"], tx[\"block_number\"], tx[\"timestamp\"]\n )\n log.info(\n \"Tx: %s Index: %s Status: %s\",\n tx[\"transaction_hash\"],\n tx[\"transaction_index\"],\n tx[\"status\"],\n )\n if tx[\"type\"] == \"INVOKE_FUNCTION\":\n input_string = \", \".join(\n [f\"{_input['name']}={_input['value']}\" for _input in tx[\"inputs\"]]\n )\n output_string = \", \".join(\n [f\"{_output['name']}={_output['value']}\" for _output in tx[\"outputs\"]]\n )\n log.info(\n \"Invoke %s.%s(%s) -> (%s)\",\n tx[\"contract\"],\n tx[\"function\"],\n input_string,\n output_string,\n )\n elif tx[\"type\"] == \"DEPLOY\":\n log.info(\"Deploy %s\", tx[\"contract\"])\n input_string = \", \".join(\n [f\"{_input['name']}={_input['value']}\" for _input in tx[\"inputs\"]]\n )\n for transaction in tx[\"l2_to_l1_messages\"]:\n payload_string = \", \".join(transaction[\"payload\"])\n log.info(\n \"L2->L1: %s -> %s (%s)\",\n transaction[\"from_address\"],\n transaction[\"to_address\"],\n payload_string,\n )\n\n return\n", "id": "686924", "language": "Python", "matching_score": 1.0391770601272583, "max_stars_count": 2, "path": "stark_tx/app/app/frontend/output.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom datetime import datetime\n\n\ndef decode_parameters(chain_id, parameters, parameters_abi):\n decoded_parameters = []\n parameters_index = 0\n abi_index = 0\n\n while parameters_index < len(parameters) and abi_index < len(parameters_abi):\n\n parameter_type = (\n \"address\"\n if \"address\" in parameters_abi[abi_index][\"name\"]\n else \"timestamp\" if \"timestamp\" in parameters_abi[abi_index][\"name\"]\n else parameters_abi[abi_index][\"type\"]\n )\n\n if parameter_type == \"struct\":\n name = parameters_abi[abi_index][\"name\"]\n value, delta = decode_struct(\n parameters[parameters_index:],\n parameters_abi[abi_index][\"struct_members\"]\n )\n value = \"{\" + create_parameters_string(value) + \"}\"\n parameters_index += delta\n abi_index += 1\n\n elif parameter_type == \"tuple\":\n name = parameters_abi[abi_index][\"name\"]\n value, delta = decode_struct(\n parameters[parameters_index:],\n parameters_abi[abi_index][\"tuple_members\"]\n )\n value = \"(\" + create_parameters_string(value) + \")\"\n parameters_index += delta\n abi_index += 1\n\n elif parameter_type == \"struct*\":\n name = parameters_abi[abi_index][\"name\"]\n value = []\n if abi_index > 0 and parameters_abi[abi_index-1][\"name\"] == parameters_abi[abi_index][\"name\"] + \"_len\":\n array_len = decoded_parameters[-1][\"value\"]\n decoded_parameters.pop()\n value = []\n for _ in range(array_len):\n fields, delta = decode_struct(\n parameters[parameters_index:],\n parameters_abi[abi_index][\"struct_members\"]\n )\n fields = \"{\" + create_parameters_string(fields) + \"}\"\n value.append(fields)\n parameters_index += delta\n abi_index += 1\n\n elif parameter_type == \"tuple*\":\n name = parameters_abi[abi_index][\"name\"]\n value = []\n if abi_index > 0 and parameters_abi[abi_index-1][\"name\"] == parameters_abi[abi_index][\"name\"] + \"_len\":\n array_len = decoded_parameters[-1][\"value\"]\n decoded_parameters.pop()\n value = []\n for _ in range(array_len):\n fields, delta = decode_struct(\n parameters[parameters_index:],\n parameters_abi[abi_index][\"tuple_members\"]\n )\n fields = \"(\" + create_parameters_string(fields) + \")\"\n value.append(fields)\n parameters_index += delta\n abi_index += 1\n\n else:\n value = decode_atomic_parameter(\n parameters[parameters_index], parameter_type\n )\n if (\n abi_index + 1 < len(parameters_abi)\n and parameters_abi[abi_index + 1][\"type\"] == \"felt*\"\n and parameters_abi[abi_index][\"name\"]\n == parameters_abi[abi_index + 1][\"name\"] + \"_len\"\n ):\n array_len = value\n value = [\n array_element\n for array_element in parameters[\n parameters_index + 1: parameters_index + array_len + 1\n ]\n ]\n name = parameters_abi[abi_index + 1][\"name\"]\n parameters_index += array_len + 1\n abi_index += 2\n else:\n name = parameters_abi[abi_index][\"name\"]\n parameters_index += 1\n abi_index += 1\n\n decoded_parameters.append(dict(name=name, value=value))\n\n return decoded_parameters\n\n\ndef create_parameters_string(parameters):\n parameters_string = \", \".join(\n [\n f\"{_input['name']+'=' if _input['name'] else ''}\"\n f\"{_input['value'] if type(_input['value']) != list else '{'+create_parameters_string(_input['value'])+'}'}\"\n for _input in parameters\n ]\n )\n return parameters_string\n\n\ndef decode_struct(raw_values, members):\n fields = []\n i = 0\n for member in members:\n field = dict()\n if member[\"type\"] == 'struct':\n field[\"name\"] = member[\"name\"]\n value, delta = decode_struct(\n raw_values[i:],\n member[\"struct_members\"],\n )\n field[\"value\"] = value\n i += delta\n else:\n field[\"name\"] = member[\"name\"]\n field[\"value\"] = decode_atomic_parameter(raw_values[i], member[\"type\"])\n i += 1\n fields.append(field)\n\n return fields, i\n\n\ndef decode_atomic_parameter(raw_value, parameter_type):\n if parameter_type == \"felt\":\n if raw_value[:2] == '0x':\n parameter_value = int(raw_value, 16)\n else:\n parameter_value = int(raw_value)\n if parameter_value > 10**40:\n parameter_value = hex(parameter_value)\n elif parameter_type == \"address\":\n if type(raw_value) == int:\n parameter_value = hex(int(raw_value))\n else:\n parameter_value = raw_value\n elif parameter_type == \"timestamp\":\n try:\n parameter_value = str(datetime.fromtimestamp(int(raw_value, 16)))[:19]\n except:\n parameter_value = int(raw_value, 16)\n elif parameter_type == \"string\":\n parameter_value = (\n bytes.fromhex(hex(int(raw_value))[2:]).decode(\"utf-8\").replace(\"\\x00\", \"\")\n )\n else:\n parameter_value = raw_value\n\n return parameter_value\n", "id": "5156665", "language": "Python", "matching_score": 2.164825677871704, "max_stars_count": 2, "path": "stark_tx/app/app/engine/decoders/parameter.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom typing import Dict\n\nfrom eth_hash.auto import keccak\nfrom starkware.cairo.lang.vm.crypto import pedersen_hash\n\nMAX_STORAGE_ITEM_SIZE = 256\nADDRESS_BOUND = 2 ** 251 - MAX_STORAGE_ITEM_SIZE\nMASK_250 = 2 ** 250 - 1\n\n\ndef starknet_keccak(data: bytes) -> int:\n return int.from_bytes(keccak(data), \"big\") & MASK_250\n\n\ndef get_selector_from_name(func_name: str) -> str:\n return hex(starknet_keccak(data=func_name.encode(\"ascii\")))\n\n\ndef get_storage_var_address(var_name: str, *args) -> str:\n res = starknet_keccak(var_name.encode(\"utf8\"))\n for arg in args:\n assert isinstance(arg, int), f\"Expected arguments to be integers. Found: {arg}.\"\n res = pedersen_hash(res, arg)\n address = hex(res % ADDRESS_BOUND)\n address = '0x' + '0'*(64 - len(address[2:])) + address[2:]\n\n return address\n\n\ndef decode_abi(raw_abi: dict) -> Dict[str, dict]:\n\n def _flatten_parameters(_parameters, _structures):\n flattened_parameters = []\n for _parameter in _parameters:\n _parameter[\"type\"] = _parameter[\"type\"].strip()\n if _parameter[\"type\"][0] == \"(\" and _parameter[\"type\"][-1] == \")\":\n _parameter[\"tuple_members\"] = []\n for _tuple_item in _parameter[\"type\"][1:-1].split(','):\n _parameter[\"tuple_members\"] += _flatten_parameters([dict(name=\"\", type=_tuple_item)], _structures)\n _parameter[\"type\"] = \"tuple\"\n elif _parameter[\"type\"][0] == \"(\" and _parameter[\"type\"][-2:] == \")*\":\n _parameter[\"tuple_members\"] = []\n for _tuple_item in _parameter[\"type\"][1:-2].split(','):\n _parameter[\"tuple_members\"] += _flatten_parameters([dict(name=\"\", type=_tuple_item)], _structures)\n _parameter[\"type\"] = \"tuple*\"\n elif _parameter[\"type\"] in _structures:\n _parameter[\"struct_name\"] = _parameter[\"type\"]\n _parameter[\"struct_members\"] = _flatten_parameters(_structures[_parameter[\"struct_name\"]], _structures)\n _parameter[\"type\"] = \"struct\"\n elif _parameter[\"type\"][-1:] == '*' and _parameter[\"type\"][:-1] in _structures:\n _parameter[\"struct_name\"] = _parameter[\"type\"][:-1]\n _parameter[\"struct_members\"] = _flatten_parameters(_structures[_parameter[\"struct_name\"]], _structures)\n _parameter[\"type\"] = \"struct*\"\n flattened_parameters.append(_parameter)\n return flattened_parameters\n\n if \"abi\" in raw_abi:\n raw_abi = raw_abi[\"abi\"]\n\n structures = dict()\n for element in raw_abi:\n if element[\"type\"] == \"struct\":\n structures[element[\"name\"]] = [\n dict(name=member[\"name\"], type=member[\"type\"])\n for member in element[\"members\"]\n ]\n\n functions = dict()\n events = dict()\n l1_handlers = dict()\n for element in raw_abi:\n\n if element[\"type\"] == \"constructor\":\n inputs = _flatten_parameters(element[\"inputs\"], structures)\n functions['constructor'] = dict(\n name=element[\"name\"], inputs=inputs, outputs=dict()\n )\n\n elif element[\"type\"] == \"function\":\n selector = get_selector_from_name(element[\"name\"]) \\\n if element[\"name\"] != \"__default__\" else element[\"name\"]\n inputs = _flatten_parameters(element[\"inputs\"], structures)\n outputs = _flatten_parameters(element[\"outputs\"], structures)\n functions[selector] = dict(\n name=element[\"name\"], inputs=inputs, outputs=outputs\n )\n\n elif element[\"type\"] == \"event\":\n selector = get_selector_from_name(element[\"name\"])\n parameters = _flatten_parameters(element[\"data\"], structures)\n events[selector] = dict(\n name=element[\"name\"], keys=element[\"keys\"], parameters=parameters\n )\n\n elif element[\"type\"] == \"l1_handler\":\n selector = get_selector_from_name(element[\"name\"]) \\\n if element[\"name\"] != \"__l1_default__\" else element[\"name\"]\n inputs = _flatten_parameters(element[\"inputs\"], structures)\n outputs = _flatten_parameters(element[\"outputs\"], structures)\n l1_handlers[selector] = dict(name=element[\"name\"], inputs=inputs, outputs=outputs)\n\n return dict(structures=structures, functions=functions, events=events, l1_handlers=l1_handlers)\n", "id": "8326905", "language": "Python", "matching_score": 2.022601842880249, "max_stars_count": 2, "path": "stark_tx/app/app/engine/decoders/abi.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nimport json\nfrom typing import Optional\nfrom eth_hash.auto import keccak\n\nfrom app.engine.decoders.abi import decode_abi\nfrom app.engine.providers.sequencer import get_abi\n\nsemantics = {}\n\n\ndef load_semantics():\n global semantics\n try:\n semantics = json.load(open(\"artefacts/semantics.json\", \"r\"))\n except Exception:\n pass\n\n\ndef store_semantics():\n global semantics\n json.dump(semantics, open(\"artefacts/semantics.json\", \"w\"))\n\n\ndef get_semantics(\n chain_id: str, contract: str, block_hash: Optional[str] = None\n) -> dict:\n global semantics\n\n if contract in semantics:\n contract_semantics = semantics[contract]\n else:\n raw_abi = get_abi(chain_id, contract, block_hash=block_hash)\n decoded_abi = decode_abi(raw_abi[\"abi\"] if \"abi\" in raw_abi else {})\n contract_semantics = dict(\n contract=contract, name=contract[:10], abi=decoded_abi\n )\n if raw_abi[\"bytecode\"]:\n code_hash = keccak(bytearray.fromhex(''.join([\"{0:0{1}x}\".format(int(code, 16), 64)\n for code in raw_abi[\"bytecode\"]])))\n contract_semantics['hash'] = '0x' + code_hash.hex()\n semantics[contract] = contract_semantics\n\n return contract_semantics\n", "id": "7841981", "language": "Python", "matching_score": 1.3877170085906982, "max_stars_count": 2, "path": "stark_tx/app/app/engine/providers/semantics.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nimport logging\nfrom functools import lru_cache\nfrom typing import Optional\n\nimport requests\n\nfrom app.core.config import SequencerURL\nfrom app.engine.decorators import starknet_api_handler\nfrom app.engine.types import TStarkNetAPIResponse\n\nlogger = logging.getLogger(__name__)\n\n\n# reads block data from the sequencer (by hash)\n@lru_cache()\n@starknet_api_handler\ndef get_block_hash(chain_id: str, block_hash: int) -> TStarkNetAPIResponse:\n url = f\"{SequencerURL[chain_id]}/get_block?blockHash={block_hash}\"\n logger.info(\"Get_block url: %s\", url)\n\n return requests.get(url)\n\n\n# reads block data from the sequencer (by id)\n@lru_cache()\n@starknet_api_handler\ndef get_block_id(chain_id: str, block_id: int) -> TStarkNetAPIResponse:\n url = f\"{SequencerURL[chain_id]}/get_block?blockNumber={block_id}\"\n logger.info(\"Get_block url: %s\", url)\n\n return requests.get(url)\n\n\n# reads transaction data from the sequencer\n@lru_cache()\n@starknet_api_handler\ndef get_transaction(chain_id: str, transaction_hash: str) -> TStarkNetAPIResponse:\n url = f\"{SequencerURL[chain_id]}/get_transaction?transactionHash={transaction_hash}\"\n logger.info(\"Get_transaction: url: %s\", url)\n\n return requests.get(url)\n\n\n# reads transaction trace data from the sequencer\n@lru_cache()\n@starknet_api_handler\ndef get_transaction_trace(chain_id: str, transaction_hash: str) -> TStarkNetAPIResponse:\n url = f\"{SequencerURL[chain_id]}/get_transaction_trace?transactionHash={transaction_hash}\"\n logger.info(\"Get_transaction trace: url: %s\", url)\n\n return requests.get(url)\n\n\n# reads the contract data from the sequencer\n@lru_cache()\n@starknet_api_handler\ndef get_abi(\n chain_id: str, contract_id: str, *, block_hash: Optional[str] = None\n) -> TStarkNetAPIResponse:\n url = (\n f\"{SequencerURL[chain_id]}/get_code?\"\n f'contractAddress={contract_id}{\"&blockHash=\" + block_hash if block_hash and block_hash != \"pending\" else \"\"}'\n )\n logger.info(\"Get_abi: url: %s\", url)\n\n return requests.get(url)\n", "id": "4297244", "language": "Python", "matching_score": 1.1655964851379395, "max_stars_count": 2, "path": "stark_tx/app/app/engine/providers/sequencer.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nimport requests\nfrom ethereum.transactions import Transaction\n\n\ndef decode_from_4byte(method_sig, decoded_methods):\n if method_sig not in decoded_methods:\n url = (\n \"https://www.4byte.directory/api/v1/signatures/?hex_signature=\" + method_sig\n )\n r = requests.get(url).json()\n if len(r[\"results\"]):\n text_sig = r[\"results\"][-1][\"text_signature\"]\n else:\n text_sig = f\"{method_sig}()\"\n\n decoded_methods[method_sig] = text_sig\n else:\n text_sig = decoded_methods.get(method_sig)\n\n return text_sig\n\n\ndef decode_sequencer_batch(data):\n BATCH_CONTEXT_START_POS = 15\n BATCH_CONTEXT_SIZE = 16\n TX_DATA_HEADER_SIZE = 3\n\n def load_call_data(data, position, shift):\n\n sub_data = data[2 + 2 * position :]\n value = int(sub_data[: shift * 2], 16)\n\n return value\n\n def load_tx_data(data, position, length):\n def ECDSA_recover(transaction):\n tx = Transaction(\n transaction[\"nonce\"],\n transaction[\"gas_price\"],\n transaction[\"gas_limit\"],\n b\"\"\n if transaction[\"to_address\"]\n == \"0x0000000000000000000000000000000000000000\"\n else transaction[\"to_address\"],\n transaction[\"value\"],\n bytes.fromhex(transaction[\"data\"]),\n int(transaction[\"v\"], 16) + 55,\n int(transaction[\"r\"], 16),\n int(transaction[\"s\"], 16),\n )\n\n tx_hash = \"0x\" + tx.hash.hex()\n from_address = \"0x\" + tx.sender.hex()\n\n return from_address, tx_hash\n\n sub_data = data[2 + 2 * position :][: length * 2]\n\n is_eip155 = int(sub_data[:2])\n r = sub_data[2 : 33 * 2]\n s = sub_data[33 * 2 : 65 * 2]\n v = sub_data[65 * 2 : 66 * 2]\n gas_limit = int(sub_data[66 * 2 : 69 * 2], 16)\n gas_price = int(sub_data[69 * 2 : 72 * 2], 16)\n nonce = int(sub_data[72 * 2 : 75 * 2], 16)\n to_address = \"0x\" + sub_data[75 * 2 : 95 * 2]\n data = sub_data[95 * 2 :]\n signature = decode_from_4byte(\"0x\" + data[:8], decoded_methods)\n input_data = data[8:]\n\n transaction = dict(\n eip155=(is_eip155 == 0),\n r=r,\n s=s,\n v=v,\n gas_limit=gas_limit,\n gas_price=gas_price,\n nonce=nonce,\n to_address=to_address,\n value=0,\n data=data,\n signature=signature,\n input=input_data,\n )\n\n transaction[\"from_address\"], transaction[\"tx_hash\"] = ECDSA_recover(transaction)\n transaction.pop(\"data\")\n\n return transaction\n\n decoded_methods = dict()\n\n data = \"0x00000000\" + data\n\n shouldStartAtElement = load_call_data(data, 4, 5)\n totalElementsToAppend = load_call_data(data, 9, 3)\n numContexts = load_call_data(data, 12, 3)\n numTransactions = 0\n\n batch = dict(\n shouldStartAtElement=shouldStartAtElement,\n totalElementsToAppend=totalElementsToAppend,\n numContexts=numContexts,\n contexts=[],\n )\n\n nextTransactionPtr = BATCH_CONTEXT_START_POS + BATCH_CONTEXT_SIZE * numContexts\n\n for i in range(numContexts):\n\n contextPtr = 15 + i * BATCH_CONTEXT_SIZE\n numSequencedTransactions = load_call_data(data, contextPtr, 3)\n numSubsequentQueueTransactions = load_call_data(data, contextPtr + 3, 3)\n ctxTimestamp = datetime.utcfromtimestamp(\n load_call_data(data, contextPtr + 6, 5)\n )\n ctxBlockNumber = load_call_data(data, contextPtr + 11, 5)\n\n context = dict(\n numSequencedTransactions=numSequencedTransactions,\n numSubsequentQueueTransactions=numSubsequentQueueTransactions,\n ctxTimestamp=ctxTimestamp,\n ctxBlockNumber=ctxBlockNumber,\n ctxSequencedTransactions=[],\n )\n\n for _ in range(numSequencedTransactions):\n txDataLength = load_call_data(data, nextTransactionPtr, 3)\n transactionData = load_tx_data(data, nextTransactionPtr + 3, txDataLength)\n context[\"ctxSequencedTransactions\"].append(transactionData)\n numTransactions += 1\n nextTransactionPtr += TX_DATA_HEADER_SIZE + txDataLength\n\n batch[\"contexts\"].append(context)\n\n batch[\"numTransactions\"] = numTransactions\n\n return batch\n\n\ndef decode_ovm_message(data):\n target = gas_limit = data\n signature = input_data = None\n transaction = dict(\n eip155=Transaction,\n r=None,\n s=None,\n v=None,\n gas_limit=gas_limit,\n gas_price=0,\n nonce=0,\n to_address=target,\n value=0,\n data=data,\n signature=signature,\n input=input_data,\n )\n\n context = dict(\n ctxTimestamp=None, ctxBlockNumber=None, ctxSequencedTransactions=[transaction]\n )\n\n batch = dict(numContexts=1, contexts=[context])\n\n return batch\n", "id": "1654685", "language": "Python", "matching_score": 0.8992982506752014, "max_stars_count": 1, "path": "ethtx/semantics/rollups/optimism.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC\nfrom typing import Dict, Optional\n\nfrom pymongo.database import Database as MongoDatabase\n\n\nclass ISemanticsDatabase(ABC):\n \"\"\"Semantics Database. Represents raw interface required to be\n implemented by a database that provides persistent\n data about semantics\"\"\"\n\n def get_address_semantics(self, chain_id: str, address: str) -> Optional[Dict]:\n ...\n\n def get_contract_semantics(self, code_hash: str) -> Optional[Dict]:\n ...\n\n def get_signature_semantics(self, signature_hash: str) -> Optional[Dict]:\n ...\n\n def insert_contract(self, contract: dict, update_if_exist: bool = False):\n ...\n\n def insert_address(self, address_data: dict, update_if_exist: bool = False):\n ...\n\n def insert_signature(self, signature, update_if_exist: bool = False):\n ...\n\n\nclass MongoCollections:\n ADDRESSES = \"addresses\"\n CONTRACTS = \"contracts\"\n SIGNATURES = \"signatures\"\n\n\nclass MongoSemanticsDatabase(ISemanticsDatabase):\n def get_collection_count(self):\n return len(self._db.list_collection_names())\n\n def __init__(self, db: MongoDatabase):\n self._db = db\n self._addresses = self._db[\"addresses\"]\n self._contracts = self._db[\"contracts\"]\n self._signatures = self._db[\"signatures\"]\n\n def get_address_semantics(self, chain_id, address) -> Optional[Dict]:\n _id = f\"{chain_id}-{address}\"\n return self._addresses.find_one({\"_id\": _id}, {\"_id\": 0})\n\n def get_signature_semantics(self, signature_hash):\n return self._signatures.find_one({\"_id\": signature_hash}, {\"_id\": 0})\n\n def get_contract_semantics(self, code_hash):\n \"\"\"Contract hashes are always the same, no mather what chain we use, so there is no need\n to use chain_id\"\"\"\n return self._contracts.find_one({\"_id\": code_hash}, {\"_id\": 0})\n\n def insert_contract(self, contract, update_if_exist=False):\n contract_with_id = {\"_id\": contract[\"code_hash\"], **contract}\n\n if update_if_exist:\n self._contracts.replace_one(\n {\"_id\": contract_with_id[\"_id\"]}, contract_with_id, upsert=True\n )\n else:\n self._contracts.insert_one(contract_with_id)\n\n def insert_address(self, address, update_if_exist=False):\n address_with_id = {\n \"_id\": f\"{address['chain_id']}-{address['address']}\",\n **address,\n }\n\n if update_if_exist:\n self._addresses.replace_one(\n {\"_id\": address_with_id[\"_id\"]}, address_with_id, upsert=True\n )\n else:\n self._addresses.insert_one(address_with_id)\n\n def insert_signature(self, signature, update_if_exist=False):\n signature_with_id = {\"_id\": signature[\"hash\"], **signature}\n\n if update_if_exist:\n self._signatures.replace_one(\n {\"_id\": signature_with_id[\"_id\"]}, signature_with_id, upsert=True\n )\n else:\n self._signatures.insert_one(signature_with_id)\n", "id": "7712565", "language": "Python", "matching_score": 2.942680597305298, "max_stars_count": 1, "path": "ethtx/providers/semantic_providers/semantics_database.py" }, { "content": "from mongomock import Database\n\nfrom ethtx.providers.semantic_providers.semantics_database import MongoCollections\n\n\nclass TestMongoSemanticsDatabase:\n def test_no_address_semantics(self, mongo_semantics_database):\n sema = mongo_semantics_database.get_address_semantics(\"mainnet\", \"not_existing\")\n assert not sema\n\n def test_save_and_get_contract_semantic(\n self, mongo_semantics_database, mongo_db: Database\n ):\n code_hash = \"test_code_hash\"\n contract_data = {\"code_hash\": code_hash, \"chain_id\": \"mainnet\"}\n\n try:\n assert 0 == mongo_db.get_collection(MongoCollections.CONTRACTS).count()\n mongo_semantics_database.insert_contract(contract_data)\n assert 1 == mongo_db.get_collection(MongoCollections.CONTRACTS).count()\n contract_from_db = mongo_semantics_database.get_contract_semantics(\n code_hash\n )\n assert contract_from_db == contract_data\n assert mongo_db.list_collection_names() == [MongoCollections.CONTRACTS]\n finally:\n mongo_db.drop_collection(MongoCollections.CONTRACTS)\n\n def test_save_and_get_address_semantic(self, mongo_db, mongo_semantics_database):\n address = \"test_address\"\n address_data = {\n \"address\": address,\n \"chain_id\": \"mainnet\",\n \"erc20\": {\n \"name\": \"test_name\",\n \"symbol\": \"test_symbol\",\n \"decimals\": \"test_decimal\",\n },\n \"contract\": \"test_contract\",\n \"name\": \"test_contract_name\",\n \"is_contract\": False,\n \"standard\": False,\n }\n\n try:\n assert 0 == mongo_db.get_collection(MongoCollections.ADDRESSES).count()\n mongo_semantics_database.insert_address(address_data)\n assert 1 == mongo_db.get_collection(MongoCollections.ADDRESSES).count()\n address_from_db = mongo_semantics_database.get_address_semantics(\n \"mainnet\", address\n )\n assert address_from_db == address_data\n assert mongo_db.list_collection_names() == [MongoCollections.ADDRESSES]\n finally:\n mongo_db.drop_collection(MongoCollections.ADDRESSES)\n", "id": "9099655", "language": "Python", "matching_score": 3.0133397579193115, "max_stars_count": 1, "path": "tests/semantics_database_test.py" }, { "content": "import pytest\nfrom mongoengine import connect\nfrom pymongo import MongoClient\n\nfrom ethtx.providers.semantic_providers.semantics_database import MongoSemanticsDatabase\n\n\[email protected]\ndef mongo_db():\n db_name = \"mongo_semantics_test\"\n client: MongoClient = connect(db=db_name, host=\"mongomock://localhost\")\n yield client.db\n client.drop_database(db_name)\n client.close()\n\n\[email protected]\ndef mongo_semantics_database(mongo_db):\n yield MongoSemanticsDatabase(mongo_db)\n", "id": "2712554", "language": "Python", "matching_score": 1.049730896949768, "max_stars_count": 1, "path": "tests/conftest.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import lru_cache\nfrom typing import Optional, List\n\nfrom ethtx.decoders.decoders.semantics import decode_events_and_functions\nfrom ethtx.models.semantics_model import (\n AddressSemantics,\n ContractSemantics,\n ParameterSemantics,\n ERC20Semantics,\n TransformationSemantics,\n FunctionSemantics,\n EventSemantics,\n)\nfrom ethtx.providers.etherscan_provider import EtherscanProvider\nfrom ethtx.providers.semantic_providers.semantics_database import ISemanticsDatabase\nfrom ethtx.providers.web3_provider import Web3Provider\nfrom ethtx.semantics.protocols_router import amend_contract_semantics\nfrom ethtx.semantics.standards.erc20 import ERC20_FUNCTIONS, ERC20_EVENTS\nfrom ethtx.semantics.standards.erc721 import ERC721_FUNCTIONS, ERC721_EVENTS\nfrom ethtx.semantics.solidity.precompiles import precompiles\n\n\nclass SemanticsRepository:\n def __init__(\n self,\n database_connection: ISemanticsDatabase,\n etherscan_provider: EtherscanProvider,\n web3provider: Web3Provider,\n ):\n self.database = database_connection\n self.etherscan = etherscan_provider\n self._web3provider = web3provider\n self._records: Optional[List] = None\n\n def record(self):\n \"\"\"Records is an array used to hold semantics used in tx decing process.\n This recording is used just for logging\"\"\"\n self._records = []\n\n def end_record(self) -> List:\n tmp_records = self._records\n self._records = None\n return tmp_records\n\n def _read_stored_semantics(self, address: str, chain_id: str):\n\n def decode_parameter(_parameter):\n components_semantics = []\n for component in _parameter[\"components\"]:\n components_semantics.append(decode_parameter(component))\n\n decoded_parameter = ParameterSemantics(\n _parameter[\"parameter_name\"],\n _parameter[\"parameter_type\"],\n components_semantics,\n _parameter[\"indexed\"],\n _parameter[\"dynamic\"],\n )\n\n return decoded_parameter\n\n if not address:\n return None\n\n ZERO_HASH = \"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"\n\n raw_address_semantics = self.database.get_address_semantics(chain_id, address)\n\n if raw_address_semantics:\n\n if raw_address_semantics.get(\"erc20\"):\n erc20_semantics = ERC20Semantics(\n raw_address_semantics[\"erc20\"][\"name\"],\n raw_address_semantics[\"erc20\"][\"symbol\"],\n raw_address_semantics[\"erc20\"][\"decimals\"],\n )\n else:\n erc20_semantics = None\n\n if raw_address_semantics[\"contract\"] == ZERO_HASH:\n contract_semantics = ContractSemantics(\n raw_address_semantics[\"contract\"], \"EOA\", dict(), dict(), dict()\n )\n\n else:\n\n raw_contract_semantics = self.database.get_contract_semantics(\n raw_address_semantics[\"contract\"]\n )\n events = dict()\n\n for signature, event in raw_contract_semantics[\"events\"].items():\n\n parameters_semantics = []\n for parameter in event[\"parameters\"]:\n parameters_semantics.append(decode_parameter(parameter))\n\n events[signature] = EventSemantics(\n signature,\n event[\"anonymous\"],\n event[\"name\"],\n parameters_semantics,\n )\n\n functions = dict()\n for signature, function in raw_contract_semantics[\"functions\"].items():\n\n inputs_semantics = []\n for parameter in function[\"inputs\"]:\n inputs_semantics.append(decode_parameter(parameter))\n outputs_semantics = []\n for parameter in function[\"outputs\"]:\n outputs_semantics.append(decode_parameter(parameter))\n\n functions[signature] = FunctionSemantics(\n signature, function[\"name\"], inputs_semantics, outputs_semantics\n )\n\n transformations = dict()\n for signature, parameters_transformations in raw_contract_semantics[\n \"transformations\"\n ].items():\n transformations[signature] = dict()\n for parameter, transformation in parameters_transformations.items():\n transformations[signature][parameter] = TransformationSemantics(\n transformation[\"transformed_name\"],\n transformation[\"transformed_type\"],\n transformation[\"transformation\"],\n )\n\n contract_semantics = ContractSemantics(\n raw_contract_semantics[\"code_hash\"],\n raw_contract_semantics[\"name\"],\n events,\n functions,\n transformations,\n )\n\n address_semantics = AddressSemantics(\n chain_id,\n address,\n raw_address_semantics[\"name\"],\n raw_address_semantics[\"is_contract\"],\n contract_semantics,\n raw_address_semantics[\"standard\"],\n erc20_semantics,\n )\n\n return address_semantics\n\n else:\n return None\n\n @lru_cache(maxsize=128)\n def get_semantics(self, chain_id: str, address: str) -> Optional[AddressSemantics]:\n\n if not address:\n return None\n\n ZERO_HASH = \"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"\n\n address_semantics = self._read_stored_semantics(address, chain_id)\n if not address_semantics:\n\n # try to read the semantics form the Etherscan provider\n provider = self._web3provider\n code_hash = provider.get_code_hash(address)\n\n if code_hash != ZERO_HASH:\n # smart contract\n raw_semantics, decoded = self.etherscan.get_contract_abi(\n chain_id, address\n )\n if decoded and raw_semantics:\n # raw semantics received from Etherscan\n events, functions = decode_events_and_functions(\n raw_semantics[\"abi\"]\n )\n standard, standard_semantics = self._decode_standard_semantics(\n address, raw_semantics[\"name\"], events, functions\n )\n if standard == \"ERC20\":\n erc20_semantics = standard_semantics\n else:\n proxy_erc20 = provider.guess_erc20_proxy(address)\n if proxy_erc20:\n erc20_semantics = ERC20Semantics(**proxy_erc20)\n else:\n erc20_semantics = None\n contract_semantics = ContractSemantics(\n code_hash, raw_semantics[\"name\"], events, functions, dict()\n )\n address_semantics = AddressSemantics(\n chain_id,\n address,\n raw_semantics[\"name\"],\n True,\n contract_semantics,\n standard,\n erc20_semantics,\n )\n\n else:\n # try to guess if the address is a toke\n potential_erc20_semantics = provider.guess_erc20_token(address)\n if potential_erc20_semantics:\n standard = \"ERC20\"\n erc20_semantics = ERC20Semantics(\n potential_erc20_semantics[\"name\"],\n potential_erc20_semantics[\"symbol\"],\n potential_erc20_semantics[\"decimals\"],\n )\n else:\n standard = None\n erc20_semantics = None\n\n contract_semantics = ContractSemantics(\n code_hash, address, dict(), dict(), dict()\n )\n address_semantics = AddressSemantics(\n chain_id,\n address,\n address,\n True,\n contract_semantics,\n standard,\n erc20_semantics,\n )\n\n else:\n # externally owned address\n contract_semantics = ContractSemantics(\n ZERO_HASH, \"EOA\", dict(), dict(), dict()\n )\n address_semantics = AddressSemantics(\n chain_id, address, address, False, contract_semantics, None, None\n )\n\n self.update_semantics(address_semantics)\n\n # amend semantics with locally stored updates\n amend_contract_semantics(address_semantics.contract)\n\n return address_semantics\n\n def _decode_standard_semantics(self, address, name, events, functions):\n standard = None\n standard_semantics = None\n\n if not address:\n return standard, standard_semantics\n\n if all(erc20_event in events for erc20_event in ERC20_EVENTS) and all(\n erc20_function in functions for erc20_function in ERC20_FUNCTIONS\n ):\n standard = \"ERC20\"\n try:\n provider = self._web3provider\n token_data = provider.get_erc20_token(address, name, functions)\n standard_semantics = ERC20Semantics(\n token_data[\"name\"], token_data[\"symbol\"], token_data[\"decimals\"]\n )\n except Exception:\n standard_semantics = ERC20Semantics(name, name, 18)\n elif all(erc721_event in events for erc721_event in ERC721_EVENTS) and all(\n erc721_function in functions for erc721_function in ERC721_FUNCTIONS\n ):\n standard = \"ERC721\"\n standard_semantics = None\n\n return standard, standard_semantics\n\n @lru_cache(maxsize=128)\n def get_event_abi(self, chain_id, address, signature):\n\n if not address:\n return None\n\n semantics = self.get_semantics(chain_id, address)\n event_semantics = (\n semantics.contract.events.get(signature) if semantics else None\n )\n\n return event_semantics\n\n @lru_cache(maxsize=128)\n def get_transformations(self, chain_id, address, signature):\n\n if not address:\n return None\n\n semantics = self.get_semantics(chain_id, address)\n if semantics:\n transformations = semantics.contract.transformations.get(signature)\n else:\n transformations = None\n\n return transformations\n\n @lru_cache(maxsize=128)\n def get_anonymous_event_abi(self, chain_id, address):\n\n if not address:\n return None\n\n semantics = self.get_semantics(chain_id, address)\n event_semantics = None\n if semantics:\n anonymous_events = {\n signature\n for signature, event in semantics.contract.events.items()\n if event.anonymous\n }\n if len(anonymous_events) == 1:\n event_signature = anonymous_events.pop()\n event_semantics = semantics.contract.events[event_signature]\n\n return event_semantics\n\n @lru_cache(maxsize=128)\n def get_function_abi(self, chain_id, address, signature):\n\n if not address:\n return None\n\n semantics = self.get_semantics(chain_id, address)\n function_semantics = (\n semantics.contract.functions.get(signature) if semantics else None\n )\n\n return function_semantics\n\n @lru_cache(maxsize=128)\n def get_constructor_abi(self, chain_id, address):\n\n if not address:\n return None\n\n semantics = self.get_semantics(chain_id, address)\n constructor_semantics = (\n semantics.contract.functions.get(\"constructor\") if semantics else None\n )\n if constructor_semantics:\n constructor_semantics.outputs.append(\n ParameterSemantics(\"__create_output__\", \"ignore\", [], False, True)\n )\n\n return constructor_semantics\n\n def get_address_label(self, chain_id, address, token_proxies=None):\n\n if not address:\n return ''\n\n if int(address, 16) in precompiles:\n contract_label = 'Precompiled'\n else:\n semantics = self.get_semantics(chain_id, address)\n if semantics.erc20:\n contract_label = semantics.erc20.symbol\n elif token_proxies and address in token_proxies:\n contract_label = token_proxies[address][1] + \"_proxy\"\n else:\n contract_label = semantics.name if semantics and semantics.name else address\n\n return contract_label\n\n @lru_cache(maxsize=128)\n def check_is_contract(self, chain_id, address):\n\n if not address:\n return False\n\n semantics = self.get_semantics(chain_id, address)\n is_contract = semantics is not None and semantics.is_contract\n\n return is_contract\n\n @lru_cache(maxsize=128)\n def get_standard(self, chain_id, address):\n\n if not address:\n return None\n\n semantics = self.get_semantics(chain_id, address)\n standard = semantics.standard if semantics is not None else None\n\n return standard\n\n def get_token_data(self, chain_id, address, token_proxies=None):\n\n if not address:\n return None, None, None\n\n semantics = self.get_semantics(chain_id, address)\n if semantics and semantics.erc20:\n token_name = (\n semantics.erc20.name if semantics and semantics.erc20 else address\n )\n token_symbol = (\n semantics.erc20.symbol if semantics and semantics.erc20 else \"Unknown\"\n )\n token_decimals = (\n semantics.erc20.decimals if semantics and semantics.erc20 else 18\n )\n elif token_proxies and address in token_proxies:\n token_name, token_symbol, token_decimals = token_proxies[address]\n else:\n token_name = address\n token_symbol = \"Unknown\"\n token_decimals = 18\n\n return token_name, token_symbol, token_decimals\n\n def update_address(self, chain_id, address, contract):\n\n updated_address = {\"network\": chain_id, \"address\": address, **contract}\n self.database.insert_address(address_data=updated_address, update_if_exist=True)\n\n return updated_address\n\n def update_semantics(self, semantics):\n\n if not semantics:\n return\n\n address_semantics = semantics.json(False)\n contract_semantics = semantics.contract.json()\n\n self.database.insert_contract(contract_semantics, update_if_exist=True)\n self.database.insert_address(address_semantics, update_if_exist=True)\n", "id": "1165440", "language": "Python", "matching_score": 4.755014419555664, "max_stars_count": 1, "path": "ethtx/providers/semantic_providers/semantics_repository.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom copy import deepcopy\nfrom typing import List, Dict, Optional, Union\n\nimport jsonpickle\n\nfrom ethtx.utils.pickable import JsonObject\n\n\nclass TransformationSemantics:\n transformed_name: Optional[str]\n transformed_type: Optional[str]\n transformation: Optional[str]\n\n def __init__(\n self,\n transformed_name: Optional[str] = None,\n transformed_type: Optional[str] = None,\n transformation: Optional[str] = \"\",\n ):\n self.transformed_name = transformed_name\n self.transformed_type = transformed_type\n self.transformation = transformation\n\n\nclass ParameterSemantics:\n parameter_name: str\n parameter_type: str\n indexed: bool\n dynamic: bool\n components: list\n\n def __init__(\n self,\n parameter_name: str,\n parameter_type: str,\n components: list,\n indexed: bool = False,\n dynamic: bool = False,\n ):\n self.parameter_name = parameter_name\n self.parameter_type = parameter_type\n self.components = components\n self.indexed = indexed\n self.dynamic = dynamic\n\n\nclass EventSemantics:\n signature: str\n anonymous: bool\n name: str\n parameters: List[ParameterSemantics]\n\n def __init__(\n self,\n signature: str,\n anonymous: bool,\n name: str,\n parameters: List[ParameterSemantics],\n ):\n self.signature = signature\n self.anonymous = anonymous\n self.name = name\n self.parameters = parameters\n\n\nclass FunctionSemantics:\n signature: str\n name: str\n inputs: List[ParameterSemantics]\n outputs: List[ParameterSemantics]\n\n def __init__(\n self,\n signature: str,\n name: str,\n inputs: List[ParameterSemantics],\n outputs: List[ParameterSemantics],\n ):\n self.signature = signature\n self.name = name\n self.inputs = inputs\n self.outputs = outputs\n\n\nclass ERC20Semantics:\n name: str\n symbol: str\n decimals: int\n\n def __init__(self, name: str, symbol: str, decimals: int):\n self.name = name\n self.symbol = symbol\n self.decimals = decimals\n\n\nclass ContractSemantics(JsonObject):\n code_hash: str\n name: str\n events: Dict[str, EventSemantics]\n functions: Dict[str, FunctionSemantics]\n transformations: [Dict[str, Dict[str, TransformationSemantics]]]\n\n def __init__(\n self,\n code_hash: str,\n name: str,\n events: Dict[str, EventSemantics],\n functions: Dict[str, FunctionSemantics],\n transformations: [Dict[str, TransformationSemantics]],\n ):\n self.code_hash = code_hash\n self.name = name\n self.events = events\n self.functions = functions\n self.transformations = transformations\n\n\nclass AddressSemantics(JsonObject):\n chain_id: str\n address: str\n name: str\n is_contract: bool\n contract: Union[ContractSemantics, str]\n standard: Optional[str]\n erc20: Optional[ERC20Semantics]\n\n def __init__(\n self,\n chain_id: str,\n address: str,\n name: str,\n is_contract: bool,\n contract: ContractSemantics,\n standard: Optional[str],\n erc20: Optional[ERC20Semantics],\n ):\n self.chain_id = chain_id\n self.address = address\n self.name = name\n self.is_contract = is_contract\n self.contract = contract\n self.standard = standard\n self.erc20 = erc20\n\n def json_str(self, entire: Optional[bool] = True) -> str:\n \"\"\"Return object as encoded json.\"\"\"\n if entire:\n return jsonpickle.encode(self, unpicklable=False)\n\n new_obj = deepcopy(self)\n new_obj.contract = new_obj.contract.code_hash\n\n return jsonpickle.encode(new_obj, unpicklable=False)\n\n def json(self, entire: Optional[bool] = True) -> Dict:\n \"\"\"Return object as decoded dict.\"\"\"\n if entire:\n return jsonpickle.decode(self.json_str(entire))\n\n new_obj = deepcopy(self)\n new_obj.contract = new_obj.contract.code_hash\n return jsonpickle.decode(self.json_str(entire))\n", "id": "1779766", "language": "Python", "matching_score": 2.708139657974243, "max_stars_count": 1, "path": "ethtx/models/semantics_model.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Dict, Tuple\n\nfrom ethtx.models.semantics_model import (\n ParameterSemantics,\n EventSemantics,\n FunctionSemantics,\n TransformationSemantics,\n)\n\n\ndef _decode_parameters_list(raw_parameters_list: list) -> List[ParameterSemantics]:\n parameters_list = []\n\n if not raw_parameters_list:\n return parameters_list\n\n for raw_parameter_semantics in raw_parameters_list:\n\n if \"indexed\" in raw_parameter_semantics:\n indexed = raw_parameter_semantics[\"indexed\"]\n else:\n indexed = False\n\n if \"dynamic\" in raw_parameter_semantics:\n dynamic = raw_parameter_semantics[\"dynamic\"]\n else:\n dynamic = False\n\n if raw_parameter_semantics[\"type\"] == \"tuple\":\n components = _decode_parameters_list(raw_parameter_semantics[\"components\"])\n else:\n components = []\n\n parameters_list.append(\n ParameterSemantics(\n raw_parameter_semantics[\"name\"],\n raw_parameter_semantics[\"type\"],\n components,\n indexed,\n dynamic,\n )\n )\n return parameters_list\n\n\ndef decode_events_and_functions(\n abi: dict,\n) -> Tuple[Dict[str, EventSemantics], Dict[str, FunctionSemantics]]:\n events = dict()\n for signature, raw_event_semantics in abi.get(\"events\", {}).items():\n parameters = _decode_parameters_list(raw_event_semantics.get(\"parameters\"))\n events[signature] = EventSemantics(\n signature,\n raw_event_semantics[\"anonymous\"],\n raw_event_semantics[\"name\"],\n parameters,\n )\n\n functions = dict()\n for signature, raw_function_semantics in abi.get(\"functions\", {}).items():\n if raw_function_semantics:\n inputs = _decode_parameters_list(raw_function_semantics.get(\"inputs\"))\n outputs = _decode_parameters_list(raw_function_semantics.get(\"outputs\"))\n name = raw_function_semantics[\"name\"]\n else:\n inputs = outputs = []\n name = signature\n\n functions[signature] = FunctionSemantics(signature, name, inputs, outputs)\n\n return events, functions\n\n\ndef decode_transformations(\n raw_transformations: dict,\n) -> Dict[str, Dict[str, TransformationSemantics]]:\n transformations = dict()\n if raw_transformations:\n for signature, transformation in raw_transformations.items():\n transformations[signature] = dict()\n for parameter_name, parameter_transformation in transformation.get(\n \"arguments\", dict()\n ).items():\n transformations[signature][parameter_name] = TransformationSemantics(\n parameter_transformation.get(\"name\"),\n parameter_transformation.get(\"type\"),\n parameter_transformation.get(\"value\"),\n )\n return transformations\n", "id": "10115173", "language": "Python", "matching_score": 1.9541155099868774, "max_stars_count": 1, "path": "ethtx/decoders/decoders/semantics.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport logging\nfrom functools import lru_cache\nfrom typing import Dict, Optional\n\nimport requests\nfrom web3 import Web3\n\nfrom ethtx.exceptions import ProcessingException\n\nlog = logging.getLogger(__name__)\n\n\nclass EtherscanProvider:\n api_key: str\n endpoints: Dict[str, str]\n default_chain: Optional[str]\n\n def __init__(\n self, api_key, nodes: Dict[str, str], default_chain_id: Optional[str] = None\n ):\n self.api_key = api_key\n self.endpoints = nodes\n self.default_chain = default_chain_id\n\n def _get_chain_id(self, chain_id):\n _id = chain_id or self.default_chain\n\n if _id is None:\n raise ProcessingException(\n \"chain_id must be provided as argument or constructor default\"\n )\n return _id\n\n @lru_cache(maxsize=1024)\n def _get_contract_abi(self, chain_id, contract_name) -> Dict:\n # Etherscan connection parameters\n params = dict(\n module=\"contract\",\n action=\"getsourcecode\",\n address=contract_name,\n apikey=self.api_key,\n )\n\n chain_id = self._get_chain_id(chain_id)\n headers = {\"User-Agent\": \"API\"}\n resp = requests.get(\n url=self.endpoints[chain_id], params=params, headers=headers\n )\n\n if resp.status_code != 200:\n raise Exception(\n \"Invalid status code for etherscan get: \" + str(resp.status_code)\n )\n\n return resp.json()\n\n def get_contract_abi(self, chain_id, contract_name):\n\n decoded = False\n raw_abi = []\n\n try:\n resp = self._get_contract_abi(chain_id, contract_name)\n if resp[\"status\"] == \"1\" and resp[\"message\"] == \"OK\":\n contract_name = resp[\"result\"][0][\"ContractName\"]\n if (\n len(resp[\"result\"][0][\"ABI\"])\n and resp[\"result\"][0][\"ABI\"] != \"Contract source code not verified\"\n ):\n raw_abi = json.loads(resp[\"result\"][0][\"ABI\"])\n decoded = True\n\n except Exception as e:\n log.exception(\n \"Etherscan connection failed while getting abi for %s on %s\",\n contract_name,\n chain_id,\n exc_info=e,\n )\n\n abi = self._parse_abi(raw_abi)\n\n return dict(name=contract_name, abi=abi), decoded\n\n # helper function decoding contract ABI\n @staticmethod\n def _parse_abi(json_abi) -> Dict:\n\n # helper function to recursively parse components\n def _parse_components(components):\n\n comp_canonical = \"(\"\n comp_inputs = list()\n\n for i, component in enumerate(components):\n\n argument = dict(name=component[\"name\"], type=component[\"type\"])\n\n if component[\"type\"][:5] == \"tuple\":\n sub_canonical, sub_components = _parse_components(\n component[\"components\"]\n )\n comp_canonical += sub_canonical + component[\"type\"][5:]\n argument[\"components\"] = sub_components\n else:\n comp_canonical += component[\"type\"]\n sub_components = []\n\n if i < len(components) - 1:\n comp_canonical += \",\"\n\n if (\n component[\"type\"] in (\"string\", \"bytes\")\n or component[\"type\"][-2:] == \"[]\"\n ):\n argument[\"dynamic\"] = True\n elif component[\"type\"] == \"tuple\":\n argument[\"dynamic\"] = any(c[\"dynamic\"] for c in sub_components)\n else:\n argument[\"dynamic\"] = False\n\n if \"indexed\" in component:\n argument[\"indexed\"] = component[\"indexed\"]\n\n comp_inputs.append(argument)\n\n comp_canonical += \")\"\n\n return comp_canonical, comp_inputs\n\n functions = dict()\n events = dict()\n\n for item in json_abi:\n\n if \"type\" in item:\n\n # parse contract functions\n if item[\"type\"] == \"constructor\":\n _, inputs = _parse_components(item[\"inputs\"])\n functions[\"constructor\"] = dict(\n signature=\"constructor\",\n name=\"constructor\",\n inputs=inputs,\n outputs=[],\n )\n\n elif item[\"type\"] == \"fallback\":\n functions[\"fallback\"] = {}\n\n elif item[\"type\"] == \"function\":\n canonical, inputs = _parse_components(item[\"inputs\"])\n canonical = item[\"name\"] + canonical\n function_hash = Web3.sha3(text=canonical).hex()\n signature = function_hash[0:10]\n\n _, outputs = _parse_components(item[\"outputs\"])\n\n functions[signature] = dict(\n signature=signature,\n name=item[\"name\"],\n inputs=inputs,\n outputs=outputs,\n )\n\n # parse contract events\n elif item[\"type\"] == \"event\":\n canonical, parameters = _parse_components(item[\"inputs\"])\n canonical = item[\"name\"] + canonical\n event_hash = Web3.sha3(text=canonical).hex()\n signature = event_hash\n\n events[signature] = dict(\n signature=signature,\n name=item[\"name\"],\n anonymous=item[\"anonymous\"],\n parameters=parameters,\n )\n\n return dict(functions=functions, events=events)\n", "id": "7183657", "language": "Python", "matching_score": 3.065859794616699, "max_stars_count": 1, "path": "ethtx/providers/etherscan_provider.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n__all__ = [\"Web3ConnectionException\", \"ProcessingException\", \"InvalidTransactionHash\"]\n\n\nclass Web3ConnectionException(Exception):\n \"\"\"Web3 Connection Exception.\"\"\"\n\n def __init__(self):\n super().__init__(\"Couldn't connect to web3provider\")\n\n\nclass ProcessingException(Exception):\n \"\"\"Processing Exception.\"\"\"\n\n def __init__(self, msg):\n super().__init__(\"Exception processing: \" + msg)\n\n\nclass InvalidTransactionHash(Exception):\n \"\"\"Invalid Transaction Hash.\"\"\"\n\n def __init__(self, tx_hash):\n super().__init__(\"Invalid transaction hash provided: \" + tx_hash)\n", "id": "10945864", "language": "Python", "matching_score": 0.8135114908218384, "max_stars_count": 1, "path": "ethtx/exceptions.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom datetime import datetime\n\nfrom app.engine.decoders.trace import decode_trace\n\n\ndef decode_transaction(chain_id: str, block: dict, transaction: dict, traces: dict) -> dict:\n\n decoded_transaction = dict()\n decoded_transaction[\"chain_id\"] = chain_id or 'mainnet'\n decoded_transaction[\"block_number\"] = transaction[\"block_number\"] if \"block_number\" in transaction else None\n decoded_transaction[\"block_hash\"] = transaction[\"block_hash\"] if \"block_hash\" in transaction else None\n\n decoded_transaction[\"timestamp\"] = (\n datetime.fromtimestamp(block[\"timestamp\"])\n if block and \"timestamp\" in block\n else None\n )\n\n decoded_transaction[\"transaction_hash\"] = transaction[\"transaction\"][\"transaction_hash\"]\n decoded_transaction[\"signature\"] = transaction[\"transaction\"].get(\"signature\", [])\n\n decoded_transaction[\"type\"] = transaction[\"transaction\"][\"type\"]\n decoded_transaction[\"status\"] = transaction[\"status\"]\n\n decoded_transaction[\"error\"] = (\n transaction[\"transaction_failure_reason\"][\"error_message\"]\n if \"transaction_failure_reason\" in transaction\n else None\n )\n\n receipt = (\n [\n receipt\n for receipt in block[\"transaction_receipts\"]\n if receipt[\"transaction_hash\"]\n == transaction[\"transaction\"][\"transaction_hash\"]\n ][0]\n if block and block != 'pending'\n else None\n )\n\n decoded_transaction[\"transaction_index\"] = receipt[\"transaction_index\"] if receipt else None\n decoded_transaction['l2_to_l1_messages'] = receipt['l2_to_l1_messages'] if receipt else []\n\n decoded_transaction['calls'], decoded_transaction['events'] = \\\n decode_trace(chain_id, transaction[\"block_hash\"], traces[\"function_invocation\"], None, 0)\n\n if decoded_transaction['events']:\n decoded_transaction['events'].sort(key=lambda x: x['order'])\n\n decoded_transaction['execution_resources'] = receipt['execution_resources'] if receipt else []\n\n return decoded_transaction\n", "id": "4110727", "language": "Python", "matching_score": 1.959174394607544, "max_stars_count": 2, "path": "stark_tx/app/app/engine/decoders/transaction.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom typing import Optional\n\nfrom flask import Blueprint, render_template\n\nfrom app.engine.decoders.transaction import decode_transaction\nfrom app.engine.providers.sequencer import get_transaction, get_block_hash, get_transaction_trace\nfrom app.frontend import frontend_route\n\nbp = Blueprint(\"transactions\", __name__)\n\n\n@frontend_route(bp, \"/<string:tx_hash>/\")\n@frontend_route(bp, \"/<string:chain_id>/<string:tx_hash>/\")\ndef route_transaction(\n tx_hash: str, chain_id: Optional[str] = None\n) -> tuple[\"render_template\", int]:\n tx = starktx_transaction(chain_id, tx_hash)\n return render_template(\"transaction.html\", transaction=tx), 200\n\n\ndef starktx_transaction(chain_id: str, transaction_hash: str) -> dict:\n\n raw_transaction = get_transaction(chain_id, transaction_hash)\n\n raw_block = (\n get_block_hash(chain_id, raw_transaction[\"block_hash\"])\n if \"block_hash\" in raw_transaction\n else None\n ) if raw_transaction[\"block_hash\"] != 'pending' else 'pending'\n\n raw_traces = get_transaction_trace(chain_id, transaction_hash)\n\n if not raw_traces or not raw_traces['function_invocation']['selector']:\n raw_traces = dict(function_invocation=\n dict(type=raw_transaction[\"transaction\"][\"type\"],\n caller_address=None,\n contract_address=raw_transaction[\"transaction\"][\"contract_address\"],\n code_address=raw_transaction[\"transaction\"][\"contract_address\"],\n selector=raw_transaction[\"transaction\"].get(\"entry_point_selector\", 'constructor'),\n entry_point_type=raw_transaction[\"transaction\"].get(\"entry_point_type\", 'CONSTRUCTOR'),\n calldata=raw_transaction[\"transaction\"].get(\"calldata\") or\n raw_transaction[\"transaction\"].get(\"constructor_calldata\") or [],\n result=raw_transaction[\"transaction\"].get(\"result\", []),\n internal_calls=[]\n ))\n\n decoded_transaction = decode_transaction(chain_id, raw_block, raw_transaction, raw_traces)\n\n return decoded_transaction\n", "id": "7619246", "language": "Python", "matching_score": 2.592066764831543, "max_stars_count": 2, "path": "stark_tx/app/app/frontend/transaction.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom flask import render_template, Blueprint\n\nfrom app.core.config import settings\nfrom . import frontend_route\n\nbp = Blueprint(\"static\", __name__)\n\n\n@frontend_route(bp, \"/\")\ndef route_home():\n return render_template(\"index.html\", config=settings.SEQUENCERS.items()), 200\n", "id": "10081177", "language": "Python", "matching_score": 1.3544590473175049, "max_stars_count": 2, "path": "stark_tx/app/app/frontend/starktx.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nimport secrets\nfrom enum import Enum, EnumMeta\n\nfrom pydantic import AnyHttpUrl, BaseSettings\n\nfrom app.base_exceptions import NotSupportedChainError\n\n\nclass Settings(BaseSettings):\n\n SECRET_KEY: str = secrets.token_urlsafe(32)\n # 60 minutes * 24 hours * 8 days\n ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8\n\n # StarkNet environments\n SEQUENCERS = dict()\n SEQUENCERS['mainnet']: AnyHttpUrl = \"https://alpha-mainnet.starknet.io/feeder_gateway\"\n SEQUENCERS['testnet']: AnyHttpUrl = \"https://alpha4.starknet.io/feeder_gateway\"\n SEQUENCERS['integration']: AnyHttpUrl = \"https://external.integration.starknet.io/feeder_gateway\"\n\n DEFAULT_SEQUENCER_URL: AnyHttpUrl = SEQUENCERS['mainnet']\n\n PROJECT_NAME: str\n\n class Config:\n case_sensitive = True\n use_enum_values = True\n\n\nsettings = Settings()\n\n\nclass EnumValidator(EnumMeta):\n def __getitem__(cls, name):\n try:\n if not name:\n return super().__getitem__(\"DEFAULT\")\n return super().__getitem__(name)\n except KeyError:\n raise NotSupportedChainError(name)\n\n\nclass SequencerURL(str, Enum, metaclass=EnumValidator):\n\n DEFAULT = settings.DEFAULT_SEQUENCER_URL\n\n mainnet = settings.SEQUENCERS['mainnet']\n testnet = settings.SEQUENCERS['testnet']\n integration = settings.SEQUENCERS['integration']\n", "id": "11810148", "language": "Python", "matching_score": 2.606279134750366, "max_stars_count": 2, "path": "stark_tx/app/app/core/config.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom enum import Enum, EnumMeta\n\n\nclass MyEnumMeta(EnumMeta):\n def __contains__(cls, item):\n return item in [k for k in cls.__members__.keys()]\n\n\nclass SequencerStatus(Enum, metaclass=MyEnumMeta):\n NOT_RECEIVED = False, 404\n ACCEPTED_ON_L2 = True, 200\n", "id": "8629991", "language": "Python", "matching_score": 0.14083677530288696, "max_stars_count": 2, "path": "stark_tx/app/app/engine/providers/status.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nimport re\n\nfrom flask import request\n\n\ndef extract_tx_hash_from_req() -> str:\n \"\"\"Extract tx hash from request url.\"\"\"\n hash_match = re.findall(r\"(0x)?([A-Fa-f0-9]{63})\", request.url)\n\n return (\n f\"{hash_match[0][0]}{hash_match[0][1]}\"\n if hash_match and len(hash_match[0]) == 2\n else \"\"\n )\n", "id": "12303178", "language": "Python", "matching_score": 0.40664762258529663, "max_stars_count": 2, "path": "stark_tx/app/app/frontend/deps.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nimport logging\nfrom functools import wraps\nfrom typing import Callable, Optional\n\nfrom flask import Blueprint, render_template\nfrom requests import HTTPError\nfrom werkzeug.exceptions import HTTPException\n\nfrom app.base_exceptions import NotSupportedChainError, TransactionStatusError\nfrom app.frontend.deps import extract_tx_hash_from_req\n\nlog = logging.getLogger(__name__)\n\nexceptions_bp = Blueprint(\"exceptions\", __name__)\n\n\ndef render_error_page(status: Optional[int] = 500):\n \"\"\"Render error page.\"\"\"\n\n def _render_error_page(f: Callable):\n @wraps(f)\n def wrapper(*args, **kwargs):\n error = f(*args, **kwargs)\n status_code = status\n if isinstance(error, HTTPException):\n error, status_code = error.description, error.code\n elif isinstance(error, HTTPError):\n error, status_code = (\n error.response.json()[\"message\"],\n error.response.status_code,\n )\n return (\n render_template(\n \"exception.html\",\n status_code=status_code,\n error=error,\n tx_hash=extract_tx_hash_from_req(),\n ),\n status_code,\n )\n\n return wrapper\n\n return _render_error_page\n\n\n@exceptions_bp.app_errorhandler(HTTPException)\n@render_error_page()\ndef handle_all_http_exceptions(error: HTTPException) -> HTTPException:\n \"\"\"All HTTP Exceptions handler.\"\"\"\n return error\n\n\n@exceptions_bp.app_errorhandler(NotSupportedChainError)\n@render_error_page(status=501)\ndef handle_not_supported_chain_error(error: NotSupportedChainError) -> str:\n \"\"\"Not supported chain error handler.\"\"\"\n return str(error)\n\n\n@exceptions_bp.app_errorhandler(HTTPError)\n@render_error_page()\ndef handle_starknet_api_errors(error: HTTPError) -> HTTPError:\n \"\"\"StarkNet API errors handler.\"\"\"\n return error\n\n\n@exceptions_bp.app_errorhandler(TransactionStatusError)\n@render_error_page(status=404)\ndef handle_transaction_status_error(error: TransactionStatusError) -> str:\n \"\"\"Transaction status error handler.\"\"\"\n return str(error)\n\n\n@exceptions_bp.app_errorhandler(Exception)\n@render_error_page(status=500)\ndef handle_all_exceptions(error: Exception) -> str:\n \"\"\"All Exceptions handler.\"\"\"\n log.exception(str(error))\n\n return \"Unexpected error\"\n", "id": "12662357", "language": "Python", "matching_score": 4.020109176635742, "max_stars_count": 2, "path": "stark_tx/app/app/frontend/exceptions.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom functools import wraps\nfrom typing import Callable\n\nfrom requests import HTTPError\n\nfrom app.base_exceptions import TransactionStatusError\nfrom app.engine.providers.status import SequencerStatus\nfrom app.engine.types import TStarkNetAPIHandler, TStarkNetAPIResponse\n\n\n# TODO: definitely handle these trash requests exceptions\ndef starknet_api_handler(\n func: Callable[..., TStarkNetAPIHandler]\n) -> Callable[..., TStarkNetAPIHandler]:\n \"\"\" StarkNet API handler.\"\"\"\n\n @wraps(func)\n def wrapped(*args, **kwargs) -> TStarkNetAPIResponse:\n try:\n response = func(*args, **kwargs)\n response.raise_for_status()\n\n json = response.json()\n\n if json.get(\"status\") and json[\"status\"] in SequencerStatus:\n if SequencerStatus[json[\"status\"]].value[0] is False:\n raise TransactionStatusError(json[\"status\"])\n\n return json\n except HTTPError as e:\n raise HTTPError(response=e.response)\n\n return wrapped\n", "id": "8510557", "language": "Python", "matching_score": 0.9611287117004395, "max_stars_count": 2, "path": "stark_tx/app/app/engine/decorators.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nfrom functools import wraps\nfrom typing import Callable, Dict, Optional, Union, Type\n\nfrom flask import Blueprint, Flask\n\nfrom app import factory\nfrom app.engine.providers.semantics import load_semantics\n\n\ndef create_app(settings_override: Optional[Union[Dict, Type]] = None) -> Flask:\n \"\"\"Returns Frontend app instance.\"\"\"\n app = factory.create_app(\n __name__,\n __path__,\n settings_override,\n template_folder=\"frontend/templates\",\n static_folder=\"frontend/static\",\n )\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n load_semantics()\n\n return app\n\n\ndef frontend_route(bp: Blueprint, *args, **kwargs):\n \"\"\"Route in blueprint context.\"\"\"\n\n def decorator(f: Callable):\n @bp.route(*args, **kwargs)\n @wraps(f)\n def wrapper(*args, **kwargs):\n return f(*args, **kwargs)\n\n f.__name__ = str(id(f)) + f.__name__\n return f\n\n return decorator\n", "id": "4053430", "language": "Python", "matching_score": 2.7226104736328125, "max_stars_count": 2, "path": "stark_tx/app/app/frontend/__init__.py" }, { "content": "# Copyright 2022 Token Flow Insights\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\nimport os\nfrom typing import Optional, Dict\n\nfrom flask import Flask\n\nfrom .config import Config\nfrom .helpers import class_import, register_blueprints\nfrom .logger import setup_logging\n\nenv = os.getenv(\"ENV\", \"development\").capitalize()\nconfig_class = f\"app.config.{env}Config\"\nconfig: Config = class_import(config_class)\n\n\ndef create_app(\n package_name: str,\n package_path: str,\n settings_override: Optional[Dict] = None,\n **app_kwargs,\n) -> Flask:\n \"\"\"\n Returns a :class:`Flask` application instance\n :param package_name: application package name\n :param package_path: application package path\n :param settings_override: a dictionary of settings to override\n :param app_kwargs: additional app kwargs\n \"\"\"\n app = Flask(__name__, instance_relative_config=True, **app_kwargs)\n\n app.config.from_object(config)\n setup_logging(app=app)\n app.config.from_object(settings_override)\n\n register_blueprints(app, package_name, package_path)\n\n return app\n", "id": "2036738", "language": "Python", "matching_score": 0.022491058334708214, "max_stars_count": 2, "path": "stark_tx/app/app/factory.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom ethtx.models.semantics_model import FunctionSemantics, ParameterSemantics\n\n\nprecompiles = {\n 1: FunctionSemantics(\n \"\",\n \"ecrecover\",\n [\n ParameterSemantics(\"hash\", \"bytes32\", []),\n ParameterSemantics(\"v\", \"bytes8\", []),\n ParameterSemantics(\"r\", \"bytes32\", []),\n ParameterSemantics(\"s\", \"bytes32\", []),\n ],\n [ParameterSemantics(\"\", \"address\", [])],\n ),\n 2: FunctionSemantics(\n \"\",\n \"sha256\",\n [\n ParameterSemantics(\"data\", \"raw\", [])\n ],\n [ParameterSemantics(\"\", \"bytes32\", [])],\n ),\n 3: FunctionSemantics(\n \"\",\n \"ripemd160\",\n [\n ParameterSemantics(\"data\", \"raw\", [])\n ],\n [ParameterSemantics(\"\", \"bytes32\", [])],\n ),\n 4: FunctionSemantics(\n \"\",\n \"datacopy\",\n [\n ParameterSemantics(\"data\", \"raw\", [])\n ],\n [ParameterSemantics(\"\", \"raw\", [])],\n ),\n 5: FunctionSemantics(\n \"\",\n \"bigModExp\",\n [\n ParameterSemantics(\"base\", \"bytes32\", []),\n ParameterSemantics(\"exp\", \"bytes32\", []),\n ParameterSemantics(\"mod\", \"bytes32\", [])\n ],\n [ParameterSemantics(\"\", \"bytes32\", [])],\n ),\n 6: FunctionSemantics(\n \"\",\n \"bn256Add\",\n [\n ParameterSemantics(\"ax\", \"bytes32\", []),\n ParameterSemantics(\"ay\", \"bytes32\", []),\n ParameterSemantics(\"bx\", \"bytes32\", []),\n ParameterSemantics(\"by\", \"bytes32\", [])\n ],\n [ParameterSemantics(\"\", \"bytes32[2]\", [])],\n ),\n 7: FunctionSemantics(\n \"\",\n \"bn256ScalarMul\",\n [\n ParameterSemantics(\"x\", \"bytes32\", []),\n ParameterSemantics(\"y\", \"bytes32\", []),\n ParameterSemantics(\"scalar\", \"bytes32\", [])\n ],\n [ParameterSemantics(\"\", \"bytes32[2]\", [])],\n ),\n 8: FunctionSemantics(\n \"\",\n \"bn256Pairing\",\n [\n ParameterSemantics(\"input\", \"raw\", [])\n ],\n [ParameterSemantics(\"\", \"bytes32\", [])],\n ),\n}\n", "id": "3950937", "language": "Python", "matching_score": 2.354703664779663, "max_stars_count": 1, "path": "ethtx/semantics/solidity/precompiles.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ethtx.models.semantics_model import (\n EventSemantics,\n FunctionSemantics,\n ParameterSemantics,\n TransformationSemantics,\n)\n\nerc721_transfer_event = EventSemantics(\n \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\",\n False,\n \"Transfer\",\n [\n ParameterSemantics(\"from\", \"address\", [], True),\n ParameterSemantics(\"to\", \"address\", [], True),\n ParameterSemantics(\"tokenId\", \"uint256\", [], True),\n ],\n)\n\nerc721_transfer_event_transformation = {\n \"__input2__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input2__)\"\n )\n}\n\nerc721_approval_event = EventSemantics(\n \"0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925\",\n False,\n \"Approval\",\n [\n ParameterSemantics(\"owner\", \"address\", [], True),\n ParameterSemantics(\"approved\", \"address\", [], True),\n ParameterSemantics(\"tokenId\", \"uint256\", [], True),\n ],\n)\n\nerc721_approval_event_transformation = {\n \"__input2__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input2__)\"\n )\n}\n\nerc721_approvalForAll_event = EventSemantics(\n \"0x17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31\",\n False,\n \"ApprovalForAll\",\n [\n ParameterSemantics(\"owner\", \"address\", [], True),\n ParameterSemantics(\"operator\", \"address\", [], True),\n ParameterSemantics(\"approved\", \"bool\", [], False),\n ],\n)\n\nerc721_balanceOf_function = FunctionSemantics(\n \"0x70a08231\",\n \"balanceOf\",\n [ParameterSemantics(\"owner\", \"address\", [])],\n [ParameterSemantics(\"\", \"uint256\", [])],\n)\n\nerc721_ownerOf_function = FunctionSemantics(\n \"0x6352211e\",\n \"ownerOf\",\n [ParameterSemantics(\"tokenId\", \"uint256\", [])],\n [ParameterSemantics(\"\", \"address\", [])],\n)\n\nerc721_ownerOf_function_transformation = {\n \"__input0__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input0__)\"\n )\n}\n\nerc721_transferFrom_function = FunctionSemantics(\n \"0x23b872dd\",\n \"transferFrom\",\n [\n ParameterSemantics(\"from\", \"address\", []),\n ParameterSemantics(\"to\", \"address\", []),\n ParameterSemantics(\"tokenId\", \"uint256\", []),\n ],\n [],\n)\n\nerc721_transferFrom_function_transformation = {\n \"__input2__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input2__)\"\n )\n}\n\nerc721_safeTransferFrom_function = FunctionSemantics(\n \"0x42842e0e\",\n \"safeTransferFrom\",\n [\n ParameterSemantics(\"from\", \"address\", []),\n ParameterSemantics(\"to\", \"address\", []),\n ParameterSemantics(\"tokenId\", \"uint256\", []),\n ],\n [],\n)\n\nerc721_safeTransferFrom_function_transformation = {\n \"__input2__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input2__)\"\n )\n}\n\nerc721_safeTransferFrom_with_data_function = FunctionSemantics(\n \"0xb88d4fde\",\n \"safeTransferFrom\",\n [\n ParameterSemantics(\"from\", \"address\", []),\n ParameterSemantics(\"to\", \"address\", []),\n ParameterSemantics(\"tokenId\", \"uint256\", []),\n ParameterSemantics(\"data\", \"bytes\", [], dynamic=True),\n ],\n [],\n)\n\nerc721_safeTransferFrom_with_data_function_transformation = {\n \"__input2__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input2__)\"\n )\n}\n\nerc721_approve_function = FunctionSemantics(\n \"0x095ea7b3\",\n \"approve\",\n [\n ParameterSemantics(\"operator\", \"address\", []),\n ParameterSemantics(\"tokenId\", \"uint256\", []),\n ],\n [],\n)\n\nerc721_approve_function_transformation = {\n \"__input1__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input1__)\"\n )\n}\n\nerc721_setApprovalForAll_function = FunctionSemantics(\n \"0xa22cb465\",\n \"setApprovalForAll\",\n [\n ParameterSemantics(\"address\", \"address\", []),\n ParameterSemantics(\"approved\", \"bool\", []),\n ],\n [],\n)\n\nerc721_getApproved_function = FunctionSemantics(\n \"0x081812fc\",\n \"getApproved\",\n [ParameterSemantics(\"tokenId\", \"uint256\", [])],\n [ParameterSemantics(\"\", \"address\", [])],\n)\n\nerc721_getApproved_function_transformation = {\n \"__input0__\": TransformationSemantics(\n transformed_type=\"nft\", transformation=\"decode_nft(__input0__)\"\n )\n}\n\nerc721_isApprovedForAll_function = FunctionSemantics(\n \"0xe985e9c5\",\n \"isApprovedForAll\",\n [\n ParameterSemantics(\"owner\", \"address\", []),\n ParameterSemantics(\"operator\", \"address\", []),\n ],\n [ParameterSemantics(\"\", \"bool\", [])],\n)\n\nERC721_EVENTS = {\n erc721_transfer_event.signature: erc721_transfer_event,\n erc721_approval_event.signature: erc721_approval_event,\n erc721_approvalForAll_event.signature: erc721_approvalForAll_event,\n}\n\nERC721_FUNCTIONS = {\n erc721_balanceOf_function.signature: erc721_balanceOf_function,\n erc721_ownerOf_function.signature: erc721_ownerOf_function,\n erc721_transferFrom_function.signature: erc721_transferFrom_function,\n erc721_safeTransferFrom_function.signature: erc721_safeTransferFrom_function,\n erc721_safeTransferFrom_with_data_function.signature: erc721_safeTransferFrom_with_data_function,\n erc721_approve_function.signature: erc721_approve_function,\n erc721_setApprovalForAll_function.signature: erc721_setApprovalForAll_function,\n erc721_getApproved_function.signature: erc721_getApproved_function,\n erc721_isApprovedForAll_function.signature: erc721_isApprovedForAll_function,\n}\n\nERC721_TRANSFORMATIONS = {\n erc721_transfer_event.signature: erc721_transfer_event_transformation,\n erc721_approval_event.signature: erc721_approval_event_transformation,\n erc721_ownerOf_function.signature: erc721_ownerOf_function_transformation,\n erc721_transferFrom_function.signature: erc721_transferFrom_function_transformation,\n erc721_safeTransferFrom_function.signature: erc721_safeTransferFrom_function_transformation,\n erc721_safeTransferFrom_with_data_function.signature: erc721_safeTransferFrom_with_data_function_transformation,\n erc721_approve_function.signature: erc721_approve_function_transformation,\n erc721_getApproved_function.signature: erc721_getApproved_function_transformation,\n}\n", "id": "4090183", "language": "Python", "matching_score": 3.4081194400787354, "max_stars_count": 1, "path": "ethtx/semantics/standards/erc721.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ethtx.models.semantics_model import (\n EventSemantics,\n ParameterSemantics,\n TransformationSemantics,\n)\n\nlognote_event_v1 = EventSemantics(\n \"0xd3ff30f94bb4ebb4f3d773ea26b6efc7328b9766f99f19dff6f01392138be46d\",\n False,\n \"LogNote\",\n [\n ParameterSemantics(\"sig\", \"bytes4\", [], True),\n ParameterSemantics(\"arg1\", \"bytes32\", [], True),\n ParameterSemantics(\"arg2\", \"bytes32\", [], True),\n ParameterSemantics(\"arg3\", \"bytes32\", [], True),\n ParameterSemantics(\"data\", \"bytes\", [], False, True),\n ],\n)\n\nlognote_transformation_v1 = {\n \"sig\": TransformationSemantics(transformed_type=\"ignore\"),\n \"arg1\": TransformationSemantics(transformed_type=\"ignore\"),\n \"arg2\": TransformationSemantics(transformed_type=\"ignore\"),\n \"arg3\": TransformationSemantics(transformed_type=\"ignore\"),\n \"data\": TransformationSemantics(\n transformed_type=\"call\", transformation=\"decode_call(__contract__, data)\"\n ),\n}\n\nlognote_event_v2 = EventSemantics(\n \"0xd3d8bec38a91a5f4411247483bc030a174e77cda9c0351924c759f41453aa5e8\",\n False,\n \"LogNote\",\n [\n ParameterSemantics(\"sig\", \"bytes4\", [], True),\n ParameterSemantics(\"user\", \"address\", [], True),\n ParameterSemantics(\"arg1\", \"bytes32\", [], True),\n ParameterSemantics(\"arg2\", \"bytes32\", [], True),\n ParameterSemantics(\"data\", \"bytes\", [], False, True),\n ],\n)\n\nlognote_transformation_v2 = {\n \"sig\": TransformationSemantics(transformed_type=\"ignore\"),\n \"arg1\": TransformationSemantics(transformed_type=\"ignore\"),\n \"arg2\": TransformationSemantics(transformed_type=\"ignore\"),\n \"data\": TransformationSemantics(\n transformed_type=\"call\", transformation=\"decode_call(__contract__, data)\"\n ),\n}\n\nlognote_event_v3 = EventSemantics(\n \"0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31\",\n False,\n \"LogNote\",\n [\n ParameterSemantics(\"sig\", \"bytes4\", [], True),\n ParameterSemantics(\"guy\", \"address\", [], True),\n ParameterSemantics(\"foo\", \"bytes32\", [], True),\n ParameterSemantics(\"bar\", \"bytes32\", [], True),\n ParameterSemantics(\"wad\", \"uint256\", [], False),\n ParameterSemantics(\"fax\", \"bytes\", [], False, True),\n ],\n)\n\nlognote_transformation_v3 = {\n \"sig\": TransformationSemantics(transformed_type=\"ignore\"),\n \"foo\": TransformationSemantics(transformed_type=\"ignore\"),\n \"bar\": TransformationSemantics(transformed_type=\"ignore\"),\n \"wad\": TransformationSemantics(transformation=\"wad / 10**18\"),\n \"fax\": TransformationSemantics(\n transformed_type=\"call\", transformation=\"decode_call(__contract__, fax)\"\n ),\n}\n\nlogcall_event = EventSemantics(\n \"0x25fce1fe01d9b241fda40b2152ddd6f4ba063fcfb3c2c81dddf84ee20d3f341f\",\n False,\n \"LOG_CALL\",\n [\n ParameterSemantics(\"sig\", \"bytes4\", [], True),\n ParameterSemantics(\"caller\", \"address\", [], True),\n ParameterSemantics(\"data\", \"bytes\", [], False, True),\n ],\n)\n\nlogcall_transformation = {\n \"sig\": TransformationSemantics(transformed_type=\"ignore\"),\n \"data\": TransformationSemantics(\n transformed_type=\"call\", transformation=\"decode_call(__contract__, data)\"\n ),\n}\n\nanonymous_events = {\n lognote_event_v1.signature: lognote_transformation_v1,\n lognote_event_v2.signature: lognote_transformation_v2,\n lognote_event_v3.signature: lognote_transformation_v3,\n logcall_event.signature: logcall_transformation,\n}\n", "id": "12389106", "language": "Python", "matching_score": 0.7034555673599243, "max_stars_count": 1, "path": "ethtx/semantics/protocols/anonymous.py" }, { "content": "# Copyright 2021 DAI Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport importlib\nimport os\n\nfrom ..semantics.base import BaseType, Base\n\n\nclass Router:\n \"\"\"\n Semantics router.\n Returns all objects withs semantics to include.\n \"\"\"\n\n root_dir = os.path.dirname(__file__)\n root_module_name = \".\".join(__name__.split(\".\")[:-1])\n\n def __new__(cls) -> BaseType:\n return cls._get_semantics()\n\n @classmethod\n def _get_semantics(cls) -> BaseType:\n \"\"\"\n Get all available semantics.\n Match pattern:\n - .py file\n - object is a class type\n - object is a Base subclass\n \"\"\"\n rv = {}\n files = (\n semantic\n for semantic in glob.iglob(cls.root_dir + \"**/**\", recursive=True)\n if os.path.isfile(semantic)\n and \"__\" not in semantic\n and semantic.endswith(\".py\")\n )\n\n for filename in files:\n filename = filename.replace(\"/\", \".\").replace(\".py\", \"\")\n foo = importlib.import_module(\n f\"{cls.root_module_name}{filename.split(cls.root_module_name)[-1]}\"\n )\n for item in dir(foo):\n obj = getattr(foo, item)\n if isinstance(obj, type) and issubclass(obj, Base) and obj != Base:\n rv[obj.code_hash] = obj.contract_semantics\n\n return rv\n", "id": "972660", "language": "Python", "matching_score": 1.731093168258667, "max_stars_count": 1, "path": "ethtx/semantics/router.py" }, { "content": "import sys, os\n\nlines = open(os.path.dirname(__file__)+\"/../CHANGELOG.md\").readlines()[3:]\nwas_last_line_blank = False\nlog = \"\"\nfor line in lines:\n if line == \"\\n\":\n if was_last_line_blank:\n print(log)\n sys.exit(0)\n else: \n was_last_line_blank = True\n else:\n log += line\n was_last_line_blank = False\n", "id": "12436772", "language": "Python", "matching_score": 0.15401609241962433, "max_stars_count": 1, "path": "scripts/get_last_changelog_entry.py" } ]
2.022602
xImAnton
[ { "content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom .modulebase import MoodleModule, requires_resolved\nfrom .resources import MoodleResource\nfrom .util import filter\nfrom typing import Optional, List, TYPE_CHECKING\nif TYPE_CHECKING:\n from .moodle import MoodleCrawler\n\n\nclass CourseManager(MoodleModule):\n def __init__(self, moodle: MoodleCrawler):\n super(CourseManager, self).__init__(moodle)\n self.courses: Optional[List[MoodleCourse]] = None\n\n async def fetch(self):\n \"\"\"\n Fetches the Course Overview of the current Moodle Account.\n After calling this, CourseManager#courses is accessible.\n \"\"\"\n json = await self.moodle.api_request(\"core_enrol_get_users_courses\", userid=self.moodle.site_info.user_id,\n returnusercount=1)\n self.courses = []\n self._set_resolved()\n for course in json:\n self.courses.append(MoodleCourse(self.moodle, course))\n\n\nclass MoodleCourse(MoodleModule):\n IGNORE_FIELDS = {\"default\": [\"id\"],\n \"init\": [\"short_name\", \"full_name\", \"display_name\", \"id_number\", \"visible\",\n \"summary\", \"summary_format\", \"format\", \"show_grades\", \"language\",\n \"category\", \"progress\", \"completed\", \"start_date\", \"end_date\", \"marker\",\n \"last_access\", \"overview_files\"],\n \"resources\": [\"resources\"]}\n\n def __init__(self, moodle, data):\n super(MoodleCourse, self).__init__(moodle)\n self.id: int = data[\"id\"]\n try:\n self.short_name: str = data[\"shortname\"]\n self.full_name: str = data[\"fullname\"]\n self.display_name: str = data[\"displayname\"]\n self.id_number = data[\"idnumber\"]\n self.visible: bool = data[\"visible\"]\n self.summary: str = data[\"summary\"]\n self.summary_format: int = data[\"summaryformat\"]\n self.format = data[\"format\"]\n self.show_grades: bool = data[\"showgrades\"]\n self.language: str = data[\"lang\"]\n self.category: int = data[\"category\"]\n self.progress = data[\"progress\"]\n self.completed = data[\"completed\"]\n self.start_date: datetime = datetime.fromtimestamp(data[\"startdate\"])\n self.end_date: datetime = None if data[\"enddate\"] == 0 else datetime.fromtimestamp(data[\"enddate\"])\n self.marker = data[\"marker\"]\n self.last_access: datetime = datetime.fromtimestamp(data[\"lastaccess\"])\n self.overview_files: List[dict] = data[\"overviewfiles\"]\n self._set_resolved(\"init\")\n except KeyError:\n pass\n\n self.data: Optional[dict] = None\n self.resources: Optional[List[MoodleResource]] = None\n\n def __repr__(self) -> str:\n return f\"MoodleCourse[id={self.id}, display_name=\\\"{self.display_name}\\\"]\"\n\n async def fetch(self) -> None:\n data = {\n \"options[0][name]\": \"excludemodules\",\n \"options[0][value]\": 0,\n \"options[1][name]\": \"excludecontents\",\n \"options[1][value]\": 1,\n \"options[2][name]\": \"includestealthmodules\",\n \"options[2][value]\": 0\n }\n json = await self.moodle.api_request(\"core_course_get_contents\", courseid=self.id, **data)\n self.data = json\n self._set_resolved()\n\n async def fetch_resources(self) -> None:\n json = await self.moodle.api_request(\"mod_resource_get_resources_by_courses\", **{\"courseids[0]\": 14})\n self.resources = []\n self._set_resolved(\"resources\")\n for res in json[\"resources\"]:\n self.resources.append(MoodleResource(self.moodle, res))\n\n @requires_resolved(\"resources\")\n async def get_resource(self, id_) -> MoodleResource:\n return filter(self.resources, id=id_)\n", "id": "6830373", "language": "Python", "matching_score": 3.4557278156280518, "max_stars_count": 0, "path": "src/moodle/course.py" }, { "content": "from __future__ import annotations\n\nfrom .modulebase import MoodleModule\n\n\nclass SiteInfo(MoodleModule):\n \"\"\"\n Includes all important config values for moodle\n \"\"\"\n def __init__(self, moodle):\n super(SiteInfo, self).__init__(moodle)\n self.site_name: str = \"\"\n self.first_name: str = \"\"\n self.last_name: str = \"\"\n self.language: str = \"\"\n self.user_id: int = 0\n self.private_access_key: str = \"\"\n self.is_admin: bool = False\n self.functions: list = []\n\n async def fetch(self) -> None:\n json = await self.moodle.api_request(\"core_webservice_get_site_info\")\n self.site_name = json[\"sitename\"]\n self.first_name = json[\"firstname\"]\n self.last_name = json[\"lastname\"]\n self.language = json[\"lang\"]\n self.user_id = json[\"userid\"]\n self.private_access_key = json[\"userprivateaccesskey\"]\n self.is_admin = json[\"userissiteadmin\"]\n self.functions = json[\"functions\"]\n self._set_resolved()\n", "id": "798939", "language": "Python", "matching_score": 0.8041585683822632, "max_stars_count": 0, "path": "src/moodle/siteinfo.py" }, { "content": "import setuptools\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as f:\n long_desc = f.read()\n\n\nsetuptools.setup(\n name=\"moodle-xImAnton\",\n version=\"1.0.0\",\n author=\"xImAnton_\",\n description=\"A Python Wrapper for the Moodle Mobile API\",\n long_description=long_desc,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/xImAnton/moodlecrawler\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/xImAnton/moodlecrawler/issues\"\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.8\"\n)\n", "id": "886971", "language": "Python", "matching_score": 0.6592764258384705, "max_stars_count": 0, "path": "setup.py" }, { "content": "from __future__ import annotations\n\nimport functools\n\nimport aiohttp\n\nfrom .calendar import MoodleCalendar\nfrom .course import CourseManager\nfrom .siteinfo import SiteInfo\n\n\ndef requires_login(func):\n \"\"\"\n Decorator for Methods in MoodleCrawler that require the user to be logged in.\n \"\"\"\n @functools.wraps(func)\n async def wrapper(crawler: MoodleCrawler, *args, **kwargs):\n if not crawler.logged_in:\n raise ValueError(\"MoodleCrawler must be logged in to use this function\")\n return await func(crawler, *args, **kwargs)\n return wrapper\n\n\nclass MoodleCrawler:\n \"\"\"\n Base Class of the Moodle Crawler.\n \"\"\"\n def __init__(self, username, password, site_url):\n self._logged_in: bool = False\n self.username: str = username\n self.password: str = password\n self.site_url: str = site_url\n self.site_info: SiteInfo = SiteInfo(self)\n self.courses: CourseManager = CourseManager(self)\n self.token: str = \"\"\n self.private_token: str = \"\"\n\n async def login(self) -> None:\n \"\"\"\n Tries to login with the specified credentials\n \"\"\"\n data = {\n \"username\": self.username,\n \"password\": <PASSWORD>,\n \"service\": \"moodle_mobile_app\"\n }\n async with aiohttp.ClientSession() as s:\n async with s.post(self.site_url + \"/login/token.php\", data=data) as r:\n json = await r.json()\n if \"token\" not in json.keys():\n raise ValueError(\"login wasn't successful\")\n self._logged_in = True\n self.token, self.private_token = json[\"token\"], json[\"privatetoken\"]\n await self._load_modules()\n\n @requires_login\n async def _load_modules(self) -> None:\n \"\"\"\n Loads all Modules\n \"\"\"\n await self.site_info.fetch()\n await self.courses.fetch()\n\n @property\n def logged_in(self) -> bool:\n \"\"\"\n Whether the crawler is logged in or not.\n :return:\n \"\"\"\n return self._logged_in\n\n @requires_login\n async def fetch_calendar(self, year: int, month: int) -> MoodleCalendar:\n \"\"\"\n Fetches a Calendar for the specified month\n :param year: the year of the calendar\n :param month: the month of the calendar (1-12)\n :return: the fetched and resolved MoodleCalendar\n \"\"\"\n c = MoodleCalendar(self, year, month)\n await c.fetch()\n return c\n\n @requires_login\n async def api_request(self, function: str, path: str = \"/webservice/rest/server.php\", **kwargs) -> dict:\n \"\"\"\n Shorthand for making REST function calls to moodle.\n :param function: the function to execute\n :param path: the path where the webservice is running at\n :param kwargs: attributes to include in the post data\n :return: the json response of the server\n \"\"\"\n data = {\n \"moodlewssettingfilter\": True,\n \"moodlewssettingfileurl\": False,\n \"wsfunction\": function,\n \"wstoken\": self.token,\n **kwargs\n }\n async with aiohttp.ClientSession() as s:\n async with s.post(self.site_url + path + f\"?moodlewsrestformat=json&wsfunction={function}\", data=data) as r:\n return await r.json()\n", "id": "2529179", "language": "Python", "matching_score": 2.9031741619110107, "max_stars_count": 0, "path": "src/moodle/moodle.py" }, { "content": "from __future__ import annotations\n\nfrom .modulebase import MoodleModule\nfrom typing import Optional, TYPE_CHECKING\nif TYPE_CHECKING:\n from .moodle import MoodleCrawler\n\n\nclass MoodleCalendar(MoodleModule):\n \"\"\"\n Class that represents the Moodle Calendar and its Events.\n \"\"\"\n IGNORE_FIELDS = {\"default\": [\"year\", \"month\"]}\n\n def __init__(self, moodle: MoodleCrawler, year: int, month: int):\n \"\"\"\n :param moodle: the main moodle class\n :param year: the year that should be fetched\n :param month: the month that should be fetched\n \"\"\"\n super(MoodleCalendar, self).__init__(moodle)\n self.year: int = year\n self.month: int = month\n self.data: Optional[dict] = None\n\n async def fetch(self):\n \"\"\"\n Fetches the Calendar for the current moodle account.\n After calling this MoodleCalendar#data can be accessed.\n \"\"\"\n json = await self.moodle.api_request(\"core_calendar_get_calendar_monthly_view\", year=self.year, month=self.month, mini=1)\n self.data = json\n self._set_resolved()\n", "id": "6723802", "language": "Python", "matching_score": 3.5717666149139404, "max_stars_count": 0, "path": "src/moodle/calendar.py" }, { "content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, List, Dict, Optional, Any\n\nif TYPE_CHECKING:\n from .moodle import MoodleCrawler\nfrom asyncio import iscoroutinefunction\nimport functools\n\n\ndef requires_resolved(state=\"default\"):\n \"\"\"\n When using this as decorator in a method of a MoodleModule, the method can only be used when the given state is resolved.\n :param state: the state that has to be resolved to use this method\n \"\"\"\n def decorator(func):\n @functools.wraps(func)\n async def wrapper(module: MoodleModule, *args, **kwargs):\n if state not in module._resolve_state:\n raise ValueError(f\"{state} is not resolved in {module.__class__.__name__}\")\n return await func(module, *args, **kwargs)\n return wrapper\n return decorator\n\n\nclass MoodleModule:\n \"\"\"\n Base Class for all Moodle Modules.\n Manages resolving states.\n \"\"\"\n\n _IGNORE_FIELDS: List[str] = [\"__class__\", \"_resolve_state\", \"IGNORE_FIELDS\", \"moodle\", \"ALL\"]\n IGNORE_FIELDS: Dict[str, List[str]] = {}\n ALL: List[str] = []\n\n @classmethod\n def get_all(cls) -> List[str]:\n \"\"\"\n used to get all attributes specified in MoodleModule#IGNORE_FIELDS and MoodleModule#_IGNORE_FIELDS.\n When a attribute is not in this list, it is only accessible when the \"default\" state is resolved.\n :return: a list of all specified attributes\n \"\"\"\n cls.ALL = cls._IGNORE_FIELDS[:]\n for state in cls.IGNORE_FIELDS.values():\n cls.ALL.extend(state)\n return cls.ALL\n\n def __init__(self, moodle: MoodleCrawler):\n self._resolve_state = []\n self.moodle = moodle\n\n def _set_resolved(self, state=\"default\") -> None:\n \"\"\"\n Resolves a specified state.\n Should only be called inside a Subclass of MoodleModule.\n :param state: the state to set resolved\n \"\"\"\n if state not in self._resolve_state:\n self._resolve_state.append(state)\n\n async def fetch(self) -> None:\n \"\"\"\n Default Fetching Method of this Module\n :return:\n \"\"\"\n pass\n\n def __getattribute__(self, item) -> Optional[Any]:\n \"\"\"\n Makes attributes only accessible when their state is resolved.\n \"\"\"\n value = object.__getattribute__(self, item)\n if callable(value) or iscoroutinefunction(value):\n return value\n if item not in MoodleModule._IGNORE_FIELDS:\n all_ = self.ALL if len(self.ALL) > 0 else self.get_all()\n if item not in all_:\n return value\n # handle defined attributes\n for k, v in self.IGNORE_FIELDS.items():\n if k not in self._resolve_state and k != \"default\":\n raise ValueError(f\"\\\"{item}\\\" on {self.__class__.__name__} is not resolved\")\n if item in v:\n return value\n if \"default\" not in self._resolve_state and item not in all_:\n raise ValueError(f\"\\\"{item}\\\" on {self.__class__.__name__} is not resolved\")\n return value\n", "id": "9358440", "language": "Python", "matching_score": 2.015774965286255, "max_stars_count": 0, "path": "src/moodle/modulebase.py" }, { "content": "from typing import Union, List, Any, Optional, Iterable\n\n\ndef check(obj: Union[object, dict], check_as_dict: bool, **kwargs) -> bool:\n \"\"\"\n checks a specific object for attributes\n :param obj: the object to check\n :param check_as_dict: whether the values should be accessed as values of a dict or attributes of an object\n :param kwargs: the attributes to check\n :return: whether the query matches this object\n \"\"\"\n for k, v in kwargs.items():\n if check_as_dict:\n if obj[k] != v:\n return False\n else:\n if getattr(obj, k) != v:\n return False\n return True\n\n\ndef filter(seq: Iterable, check_as_dict: bool = False, only_one: bool = True, **kwargs) -> Optional[Union[List[Any], Any]]:\n \"\"\"\n Filters an iterable for items with specified attributes.\n :param seq: The sequence to filter\n :param check_as_dict: whether the attributes of the objects should be accessed as values of a dict or attributes of an object\n :param only_one: whether only one object or a list of all matching objects should be returned.\n :param kwargs: the attributes to check\n :return: Depending on only_one\n \"\"\"\n out = []\n for item in seq:\n if check(item, check_as_dict, **kwargs):\n if only_one:\n return item\n else:\n out.append(item)\n return None if only_one else out\n", "id": "2873178", "language": "Python", "matching_score": 0.5087446570396423, "max_stars_count": 0, "path": "src/moodle/util.py" }, { "content": "from __future__ import annotations\nimport os.path\nfrom asyncio.coroutines import iscoroutine\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom operator import attrgetter\nfrom typing import Dict, Any, List, TYPE_CHECKING\nif TYPE_CHECKING:\n from .moodle import MoodleCrawler\n\nimport aiofiles\nimport aiohttp\n\n\nclass MoodleResource:\n \"\"\"\n Represents a Moodle Resource that can have multiple files.\n \"\"\"\n\n def __init__(self, moodle: MoodleCrawler, data: Dict[str, Any]):\n self.moodle: MoodleCrawler = moodle\n\n self.id: int = data[\"id\"]\n self.course_id: int = data[\"course\"]\n self.name: str = data[\"name\"]\n self.content_files: List[ContentFile] = [ContentFile(self.moodle, x) for x in data[\"contentfiles\"]]\n self.revision: int = data[\"revision\"]\n self.modified: datetime = datetime.fromtimestamp(data[\"timemodified\"])\n\n async def get_latest_file(self) -> ContentFile:\n \"\"\"\n :return: the last file edited\n \"\"\"\n return min(self.content_files, key=attrgetter(\"modified\"))\n\n\nclass ContentFile:\n \"\"\"\n Represents a single file of a Resource\n \"\"\"\n\n def __init__(self, moodle: MoodleCrawler, data: Dict[str, Any]):\n self.moodle: MoodleCrawler = moodle\n\n self.file_name: str = data[\"filename\"]\n self.file_path: str = data[\"filepath\"]\n self.file_url: str = data[\"fileurl\"]\n self.modified: datetime = datetime.fromtimestamp(data[\"timemodified\"])\n self.mimetype: str = data[\"mimetype\"]\n self.is_external: bool = data[\"isexternalfile\"]\n\n async def download(self, out_steam, binary: bool = True) -> None:\n \"\"\"\n Downloads this resource and writes it to the specified stream.\n :param out_steam: the stream to write to output to\n :param binary: whether the data should not be decoded\n \"\"\"\n async with aiohttp.ClientSession() as s:\n async with s.get(f\"{self.file_url}?token={self.moodle.token}\") as r:\n data = await r.read()\n if not binary:\n data = data.decode()\n res = out_steam.write(data)\n if iscoroutine(res):\n await res\n\n async def save(self, path: str) -> str:\n \"\"\"\n Downloads and saves this file to the specified directory\n :param path: the directory to save the file at\n :return: the file output file path\n \"\"\"\n out_path = os.path.join(path, self.file_name)\n async with aiofiles.open(out_path, mode=\"wb\") as f:\n await self.download(f)\n return out_path\n\n async def get_modification_delta(self) -> timedelta:\n \"\"\"\n Calculates before what time this file was last modified.\n \"\"\"\n return datetime.now() - self.modified\n", "id": "6388210", "language": "Python", "matching_score": 2.0732924938201904, "max_stars_count": 0, "path": "src/moodle/resources.py" }, { "content": "from .moodle import MoodleCrawler\n", "id": "2462310", "language": "Python", "matching_score": 0.6066499948501587, "max_stars_count": 0, "path": "src/moodle/__init__.py" } ]
2.015775
zforkdump
[ { "content": "from twisted.trial import unittest\n\nfrom mock import Mock\nfrom stimpy import Statebox\n\ndef mockClock(maxTime=10):\n clock = Mock()\n\n returns = range(maxTime)\n def side_effect():\n return returns.pop(0)\n\n clock.side_effect = side_effect\n return clock\n\n\nclass StateboxTestCase(unittest.TestCase):\n def test_defaults(self):\n sb = Statebox(_clock=mockClock())\n self.assertEquals(sb.queue, [])\n self.assertEquals(sb.value, None)\n self.assertEquals(sb.last_modified, 0)\n\n\n def test_serializeEmpty(self):\n sb = Statebox(_clock=mockClock())\n self.assertEquals(sb.serialize(sort_keys=True),\n '{\"last_modified\": 0, \"queue\": [], \"value\": null}')\n\n\n def test_modifyValue(self):\n sb = Statebox(set(), _clock=mockClock())\n sb.modify(set.add, \"foo\")\n self.assertEquals(sb.value, set([\"foo\"]))\n self.assertEquals(sb.queue, [(1, set.add, (\"foo\",), {})])\n\n\n def test_mergeBoxes(self):\n clock = mockClock()\n sb1 = Statebox(set(), _clock=clock)\n sb2 = Statebox(set(), _clock=clock)\n\n sb1.modify(set.add, \"foo\")\n sb2.modify(set.add, \"bar\")\n sb2.modify(set.add, \"baz\")\n\n sb1.merge(sb2)\n\n self.assertEquals(sb1.value, set([\"foo\", \"bar\", \"baz\"]))\n self.assertEquals(sorted(sb1.queue),\n [(2, set.add, (\"foo\",), {}),\n (3, set.add, (\"bar\",), {}),\n (4, set.add, (\"baz\",), {})\n ])\n\n self.assertEquals(sb1.last_modified, 4)\n\n def test_serializeOps(self):\n sb = Statebox(set(), _clock=mockClock())\n sb.modify(set.add, \"foo\")\n\n self.assertEquals(sb.serialize(sort_keys=True),\n ('{\"last_modified\": 1, \"queue\": [[1, \"__builtin__.set.add\", [\"foo\"], {}]], '\n '\"value\": {\"__set__\": [\"foo\"]}}'))\n\n\n def test_unseralizeOps(self):\n sb = Statebox.unserialize(\n ('{\"last_modified\": 1, \"queue\": '\n '[[1, \"__builtin__.set.add\", [\"foo\"], {}]], '\n '\"value\": {\"__set__\": [\"foo\"]}}'))\n\n self.assertEquals(sb.value, set([\"foo\"]))\n self.assertEquals(sb.last_modified, 1)\n self.assertEquals(sb.queue, [(1, set.add, [\"foo\"], {})])\n", "id": "5067924", "language": "Python", "matching_score": 2.5111355781555176, "max_stars_count": 1, "path": "test_stimpy.py" }, { "content": "\"\"\"\nSTatebox In My PYthon.\n\"\"\"\n\njson = None\ntry:\n import simplejson\n json = simplejson\nexcept ImportError:\n import json as stdjson\n json = stdjson\n\nimport time\nimport inspect\nfrom twisted.python.reflect import namedAny\nfrom twisted.python.reflect import fullyQualifiedName as txFullyQualifiedName\n\ndef fullyQualifiedName(obj):\n if inspect.ismethoddescriptor(obj):\n objclass = fullyQualifiedName(obj.__objclass__)\n\n return '%s.%s' % (objclass, obj.__name__)\n return txFullyQualifiedName(obj)\n\n\ndef statebox_object_hook(dct):\n if '__set__' in dct:\n return set(dct['__set__'])\n\n return dct\n\n\ndef statebox_default_encoder(obj):\n if isinstance(obj, set):\n return {'__set__': list(obj)}\n\n return obj\n\n\nclass Statebox(object):\n def __init__(self, value=None, last_modified=None, queue=None,\n _clock=time.time):\n self._clock = _clock\n\n if last_modified is None:\n last_modified = self._clock()\n\n if queue is None:\n queue = []\n\n self._value = value\n self._last_modified = last_modified\n self._queue = queue\n\n\n def modify(self, op, *args, **kwargs):\n self._last_modified = now = self._clock()\n self._queue.append((now, op, args, kwargs))\n op(self._value, *args, **kwargs)\n\n\n def merge(self, *boxes):\n new_queue = []\n new_queue.extend(self._queue)\n\n for box in boxes:\n new_queue.extend(box.queue)\n\n for (t, op, args, kwargs) in sorted(new_queue):\n op(self._value, *args, **kwargs)\n self._last_modified = t\n\n self._queue = new_queue\n\n\n def expire(self, age):\n for op in self._queue:\n (t, _op, _args, _kwargs) = op\n if self._last_modified - age > t:\n self._queue.remove(op)\n\n\n def truncate(self, count):\n self._queue = list(sorted(self._queue))[:count]\n\n\n @property\n def last_modified(self):\n return self._last_modified\n\n\n @property\n def value(self):\n return self._value\n\n\n @property\n def queue(self):\n return self._queue\n\n\n @classmethod\n def unserialize(klass, json_str, _clock=time.time, **loadKwargs):\n raw = json.loads(json_str,\n object_hook=statebox_object_hook, **loadKwargs)\n\n return klass(raw['value'], raw['last_modified'],\n [(t, namedAny(op), args, kwargs)\n for (t, op, args, kwargs) in raw['queue']],\n _clock=_clock)\n\n\n def serialize(self, **dumpKwargs):\n return json.dumps({\n 'value': self.value,\n 'queue': [(t, fullyQualifiedName(op), args, kwargs)\n for (t, op, args, kwargs) in self._queue],\n 'last_modified': self.last_modified\n }, default=statebox_default_encoder, **dumpKwargs)\n", "id": "422600", "language": "Python", "matching_score": 1.6719483137130737, "max_stars_count": 1, "path": "stimpy.py" }, { "content": "from setuptools import setup\n\nsetup(\n name=\"stimpy\",\n version=\"0.0.1\",\n description=\"STatebox In My PYthon\",\n py_modules=[\"stimpy\"],\n install_requires=[\"Twisted\"]\n)\n", "id": "5717674", "language": "Python", "matching_score": 0.2770346403121948, "max_stars_count": 1, "path": "setup.py" } ]
1.671948
NamWoo
[ { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport asyncio\nimport argparse\nimport pathlib\n\nimport json\nimport socket\nimport webbrowser\n\nimport version\nimport dart_fss as dart\n\n\nfrom halo import Halo\nfrom flask import Flask, render_template, request, jsonify, make_response, send_from_directory\n\nver = version.ver\n\nif getattr(sys, 'frozen', False):\n template_folder = os.path.join(sys._MEIPASS, 'templates')\n favicon_folder = template_folder\n js_folder = os.path.join(template_folder, 'js')\n css_folder = os.path.join(template_folder, 'css')\n img_folder = os.path.join(template_folder, 'img')\n app = Flask(__name__, template_folder=template_folder)\nelse:\n favicon_folder = 'templates'\n js_folder = 'templates/js'\n css_folder = 'templates/css'\n img_folder = 'templates/img'\n app = Flask(__name__)\n\n\ndef get_config_path():\n if sys.platform == 'darwin':\n app_dir = os.path.join(os.path.expanduser('~/Library/Application Support'), 'dart-scraper')\n elif sys.platform == 'win32':\n app_dir = os.path.join(os.getenv('appdata'), 'dart-scraper')\n else:\n app_dir = os.path.join(os.path.expanduser('~'), '.dart-scraper')\n config_path = os.path.join(app_dir, 'dart-setting.json')\n return app_dir, config_path\n\n\[email protected]('/favicon.ico')\ndef favicon():\n return send_from_directory(favicon_folder, 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\[email protected]('/js/<path:path>')\ndef send_js(path):\n global js_folder\n return send_from_directory(js_folder, path)\n\n\[email protected]('/css/<path:path>')\ndef send_css(path):\n global css_folder\n return send_from_directory(css_folder, path)\n\n\[email protected]('/img/<path:path>')\ndef send_img(path):\n global img_folder\n return send_from_directory(img_folder, path)\n\n\ndef save_config_file(api_key):\n app_dir, config_path = get_config_path()\n if not os.path.exists(app_dir):\n app.logger.info(\"Save folder not found\")\n os.makedirs(app_dir)\n \n data = {'API_KEY': api_key}\n\n with open(config_path, 'w') as config_file:\n json.dump(data, config_file)\n\n\ndef read_config_file():\n app.logger.info('Reading Config file')\n _, config_path = get_config_path()\n if not os.path.exists(config_path):\n app.logger.info('Config file not found')\n return None\n\n with open(config_path, 'r') as config_file:\n data = json.load(config_file)\n\n return data['API_KEY']\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected]('/apikey', methods=['POST', 'GET'])\ndef key():\n data = {}\n if request.method == 'GET':\n api_key = read_config_file()\n data['api_key'] = api_key\n\n if request.method == 'POST':\n res_data = request.json\n api_key = res_data.get('api_key')\n if api_key and len(api_key) == 40:\n try:\n dart.dart_set_api_key(api_key)\n save_config_file(api_key)\n data['ret_code'] = 'success'\n except dart.errors.DartAPIError:\n data['ret_code'] = 'error'\n else:\n data['ret_code'] = 'error'\n return jsonify(data)\n\n\[email protected]('/version')\ndef version():\n global ver\n ret_code = {\n 'version': ver,\n 'ret_code':'success'\n }\n return jsonify(ret_code)\n\n\[email protected]('/company')\ndef company():\n data = request.json\n if data is None:\n name = ''\n else:\n name = data.get('name', '')\n crps = crp_list.find_by_name(name)\n \n crps_list = []\n for crp in crps:\n crp_data = {'code': crp.crp_cd, 'name': crp.crp_nm}\n crps_list.append(crp_data)\n \n ret_code = {'ret_code': 'success', 'crp_list': crps_list}\n return jsonify(ret_code)\n\[email protected]('/download' , methods=['POST'])\ndef download():\n ret_code = {}\n data = request.json\n api_key = data['api_key']\n if api_key is None:\n ret_code['msg'] = 'DART API KEY can not be None or empty'\n ret_code['ret_code'] = 'invalid'\n return jsonify(ret_code)\n\n crp_cd = data.get('crp_cd', None)\n if crp_cd is None:\n ret_code['msg'] = 'crp_cd can not be None or empty'\n ret_code['ret_code'] = 'invalid'\n return jsonify(ret_code)\n \n start_dt = data.get('start_dt', '20120101')\n end_dt = data.get('end_dt', None)\n path = data.get('path', None)\n \n crp = crp_list.find_by_crp_cd(crp_cd)\n if crp is None:\n ret_code['msg'] = 'Invalid crp_cd'\n ret_code['ret_code'] = 'error'\n return jsonify(ret_code)\n separate = data.get('separate', False)\n report_tp = data.get('report_tp', 'annual')\n report_tp = report_tp.lower()\n \n # Make Folder\n pathlib.Path(path).mkdir(parents=True, exist_ok=True)\n\n filename = '{}_{}_{}.xlsx'.format(crp_cd, 'separate' if separate else 'consolidated', report_tp)\n fs = crp.get_financial_statement(start_dt=start_dt, end_dt=end_dt, separate=separate, report_tp=report_tp)\n \n fs.save(path=path,filename=filename)\n ret_code['ret_code'] = 'success'\n ret_code['msg'] = 'Successfully added to download list'\n return jsonify(ret_code)\n\[email protected]('/path', methods=['GET'])\ndef path():\n ret_code = {}\n ret_code['path'] = os.path.join(os.getcwd(),'fsdata')\n ret_code['ret_code'] = 'success'\n return jsonify(ret_code)\n\n# @app.after_request\n# def after_request(response):\n# response.headers.add('Access-Control-Allow-Origin', '*')\n# response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n# response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n# return response\n\n\nparser = argparse.ArgumentParser(description=' Dart-Scraper')\nparser.add_argument('-p', '--port', type=int, help=\"Dart-Scraper port\")\nargs = vars(parser.parse_args())\nport = args.get('port')\nif port is None:\n port = 5000\n \nspinner = Halo(text='Downloading list of companies', spinner='dots')\nspinner.start()\ncrp_list = dart.get_crp_list()\nspinner.stop()\n\nurl = \"http://127.0.0.1:{}\".format(port)\nwebbrowser.open_new(url)\ncli = sys.modules['flask.cli']\ncli.show_server_banner = lambda *x: None\napp.config['JSON_AS_ASCII'] = False\napp.run(host='127.0.0.1', port=port)\n\n", "id": "2476001", "language": "Python", "matching_score": 2.806828498840332, "max_stars_count": 3, "path": "dart_scraper.py" }, { "content": "import os\nfrom dearpygui.core import *\nfrom dearpygui.simple import *\n\ninclude_directories = []\n\n\nclass IncludeNode:\n\n count = 0\n level = 0\n levels = {0: 0}\n root = \"\"\n\n def __init__(self, name, level):\n self.raw_name = name\n self.name = name + \"##\" + str(IncludeNode.count)\n self.links = []\n self.level = level\n IncludeNode.levels[level] = 0\n\n IncludeNode.count += 1\n\n found = False\n for directory in include_directories:\n if os.path.exists(directory + name):\n self.file_path = directory + name\n found = True\n break\n if not found:\n print(\"could not find file: \", name)\n\n self.parse_node()\n\n def parse_node(self):\n\n lines = []\n with open(self.file_path, \"r\") as file:\n lines = file.readlines()\n\n for line in lines:\n if line.__contains__('#include \"'):\n split_line = line.split('\"')\n if split_line[1] not in get_item_configuration(\"Ignore\")[\"items\"]:\n add_file_if_new(split_line[1])\n self.links.append(IncludeNode(split_line[1], self.level+1))\n\n def start_tree(self):\n IncludeNode.root = self.raw_name\n self.create_ui_node(self.name)\n\n def create_ui_node(self, parent):\n\n y = IncludeNode.levels[self.level]\n IncludeNode.levels[self.level] += 80\n\n with node(self.name, x_pos=250*self.level, y_pos=y, parent=\"Editor\"):\n if self.level > 0:\n with node_attribute(self.name + \"-in\"):\n add_text(\"from\")\n if len(self.links) > 0:\n with node_attribute(self.name + \"-out\", output=True):\n add_text(\"Includes:\")\n\n add_node_link(\"Editor\", parent + \"-out\", self.name + \"-in\")\n\n for i in range(0, len(self.links)):\n self.links[i].create_ui_node(self.name)\n\n\ndef callback(sender):\n delete_item(\"Editor\", children_only=True)\n IncludeNode.count = 0\n IncludeNode.level = 0\n IncludeNode.levels = {0: 0}\n\n if sender is not None:\n selection = get_value(sender)\n file = get_item_configuration(sender)[\"items\"][selection]\n else:\n file = IncludeNode.root\n\n inode = IncludeNode(file, 0)\n inode.start_tree()\n\n\ndef ignore():\n\n file = get_value(\"Files\")\n if file is not None:\n items = get_item_configuration(\"Files\")[\"items\"]\n ignore_files = get_item_configuration(\"Ignore\")[\"items\"]\n ignore_files.append(items[file])\n configure_item(\"Ignore\", items=ignore_files)\n del items[file]\n configure_item(\"Files\", items=items)\n callback(None)\n\n\ndef add_file_if_new(file):\n\n items = get_item_configuration(\"Files\")[\"items\"]\n new_items = [file]\n for item in items:\n if item != file:\n new_items.append(item)\n\n configure_item(\"Files\", items=new_items)\n\n\ndef unignore():\n file = get_value(\"Ignore\")\n if file is not None:\n items = get_item_configuration(\"Ignore\")[\"items\"]\n files = get_item_configuration(\"Files\")[\"items\"]\n files.append(items[file])\n configure_item(\"Files\", items=files)\n del items[file]\n configure_item(\"Ignore\", items=items)\n callback(None)\n\n\ndef search_directory():\n\n header_files = []\n cpp_files = []\n other_files = []\n include_directories.clear()\n for dirName, subdirList, fileList in os.walk(get_value(\"Root\")):\n include_directories.append(dirName + \"/\")\n for fname in fileList:\n if fname.endswith(\".h\"):\n header_files.append(fname)\n elif fname.endswith(\".cpp\"):\n cpp_files.append(fname)\n else:\n other_files.append(fname)\n\n configure_item(\"Headers\", items=header_files)\n configure_item(\"Cpp\", items=cpp_files)\n configure_item(\"Other\", items=other_files)\n show_item(\"All Files\")\n\n\nwith window(\"All Files\", x_pos=75, y_pos=65, show=False, no_resize=True, autosize=True):\n add_listbox(\"Headers\", width=200, num_items=15, callback=callback)\n add_same_line()\n add_listbox(\"Cpp\", width=200, num_items=15, callback=callback)\n add_same_line()\n add_listbox(\"Other\", width=200, num_items=15, callback=callback)\n\nwith window(\"Ignore Options\", x_pos=75, y_pos=400, show=False, no_resize=True, autosize=True):\n\n with group(\"g2\"):\n add_button(\"Ignore File\", callback=ignore)\n add_same_line()\n add_button(\"Unignore File\", callback=unignore)\n add_listbox(\"Ignore\", width=200, num_items=15)\n add_same_line()\n add_listbox(\"Files\", width=200, num_items=15)\n\nwith window(\"Main\"):\n\n with menu_bar(\"mb\"):\n with menu(\"Settings\"):\n add_menu_item(\"List Files\", callback=lambda: show_item(\"All Files\"))\n add_menu_item(\"Ignore Settings\", callback=lambda: show_item(\"Ignore Options\"))\n\n with menu(\"Tools\"):\n add_menu_item(\"Show Logger\", callback=show_logger)\n add_menu_item(\"Show About\", callback=show_about)\n add_menu_item(\"Show Metrics\", callback=show_metrics)\n add_menu_item(\"Show Documentation\", callback=show_documentation)\n add_menu_item(\"Show Debug\", callback=show_debug)\n add_menu_item(\"Show Style Editor\", callback=show_style_editor)\n\n add_input_text(\"Root\", default_value=\"C:\\\\dev\\\\DearPyGui\\\\DearPyGui\\\\\")\n add_same_line()\n add_button(\"Initialize\", callback=search_directory)\n\n with node_editor(\"Editor\"):\n pass\n\nset_main_window_title(\"Header Analyzer\")\nstart_dearpygui(primary_window=\"Main\")\n\n", "id": "2314283", "language": "Python", "matching_score": 1.380849003791809, "max_stars_count": 1, "path": "Tools/header_analyzer.py" }, { "content": "from setuptools import setup, find_packages, Distribution\nfrom codecs import open\nfrom os import path\n\n# import readme content\nwith open(\"../docs/README.md\", encoding='utf-8') as f:\n long_description = f.read()\n\n# use info file created by BuildPythonWheel.py\nwith open(\"distinfo.txt\", encoding='utf-8') as file:\n lines = file.readlines()\n DEARPYGUI_VERSION = lines[0].rstrip(\"\\n\")\n\nclass BinaryDistribution(Distribution):\n \"\"\"Distribution which always forces a binary package with platform name\"\"\"\n def has_ext_modules(foo):\n return True\n\nsetup(\n name='dearpygui', # Required\n version=DEARPYGUI_VERSION, # Required\n author=\"<NAME> and <NAME>\", # Optional\n author_email=\"<EMAIL>\", # Optional\n description='DearPyGui: A simple Python GUI Toolkit', # Required\n long_description=long_description, # Optional\n long_description_content_type='text/markdown', # Optional\n url='https://github.com/hoffstadt/DearPyGui', # Optional\n license = 'MIT',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows :: Windows 10',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Software Development :: User Interfaces',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required\n package_data={ # Optional\n 'dearpygui': [\"core.so\", \"core.pyd\", \"core.pyi\", \"simple.py\", \"contexts.py\",\n \"themes.py\", \"demo.py\", \"vcruntime140_1.dll\"],\n },\n distclass=BinaryDistribution\n)\n", "id": "2963697", "language": "Python", "matching_score": 1.0836808681488037, "max_stars_count": 0, "path": "Distribution/setup.py" }, { "content": "import dearpygui.core as dpg\nimport dearpygui.simple as smpl\nfrom dearpygui.demo import show_demo\n\ndpg.add_font(\"google\", \"../../Resources/NotoSerifCJKjp-Medium.otf\", 20)\ndpg.set_font(\"google\", 20)\n\ndpg.add_texture_container(id=\"mvTextureContainer\")\n\nshow_demo()\n\nsmpl.start_dearpygui()", "id": "3894367", "language": "Python", "matching_score": 0.39946603775024414, "max_stars_count": 0, "path": "DearSandbox/sandbox.py" } ]
1.232265
aschueth
[ { "content": "# Copyright (c) 2017 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\nr\"\"\"Tests the operation of MetPy's unit support code.\"\"\"\n\nfrom distutils.version import LooseVersion\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pint\nimport pytest\n\nfrom metpy.testing import assert_array_almost_equal, assert_array_equal\nfrom metpy.testing import assert_nan, set_agg_backend # noqa: F401\nfrom metpy.units import check_units, concatenate, pandas_dataframe_to_unit_arrays, units\n\n\ndef test_concatenate():\n \"\"\"Test basic functionality of unit-aware concatenate.\"\"\"\n result = concatenate((3 * units.meter, 400 * units.cm))\n assert_array_equal(result, np.array([3, 4]) * units.meter)\n assert not isinstance(result.m, np.ma.MaskedArray)\n\n\ndef test_concatenate_masked():\n \"\"\"Test concatenate preserves masks.\"\"\"\n d1 = units.Quantity(np.ma.array([1, 2, 3], mask=[False, True, False]), 'degC')\n result = concatenate((d1, 32 * units.degF))\n\n truth = np.ma.array([1, np.inf, 3, 0])\n truth[1] = np.ma.masked\n\n assert_array_almost_equal(result, units.Quantity(truth, 'degC'), 6)\n assert_array_equal(result.mask, np.array([False, True, False, False]))\n\n\[email protected](pint.__version__ == '0.9', reason=('Currently broken upstream (see '\n 'pint#751'))\[email protected]_image_compare(tolerance=0, remove_text=True)\ndef test_axhline():\n r\"\"\"Ensure that passing a quantity to axhline does not error.\"\"\"\n fig, ax = plt.subplots()\n ax.axhline(930 * units('mbar'))\n ax.set_ylim(900, 950)\n ax.set_ylabel('')\n return fig\n\n\[email protected](pint.__version__ == '0.9', reason=('Currently broken upstream (see '\n 'pint#751'))\[email protected]_image_compare(tolerance=0, remove_text=True)\ndef test_axvline():\n r\"\"\"Ensure that passing a quantity to axvline does not error.\"\"\"\n fig, ax = plt.subplots()\n ax.axvline(0 * units('degC'))\n ax.set_xlim(-1, 1)\n ax.set_xlabel('')\n return fig\n\n\n#\n# Tests for unit-checking decorator\n#\n\n\ndef unit_calc(temp, press, dens, mixing, unitless_const):\n r\"\"\"Stub calculation for testing unit checking.\"\"\"\n pass\n\n\ntest_funcs = [\n check_units('[temperature]', '[pressure]', dens='[mass]/[volume]',\n mixing='[dimensionless]')(unit_calc),\n check_units(temp='[temperature]', press='[pressure]', dens='[mass]/[volume]',\n mixing='[dimensionless]')(unit_calc),\n check_units('[temperature]', '[pressure]', '[mass]/[volume]',\n '[dimensionless]')(unit_calc)]\n\n\[email protected]('func', test_funcs, ids=['some kwargs', 'all kwargs', 'all pos'])\ndef test_good_units(func):\n r\"\"\"Test that unit checking passes good units regardless.\"\"\"\n func(30 * units.degC, 1000 * units.mbar, 1.0 * units('kg/m^3'), 1, 5.)\n\n\ntest_params = [((30 * units.degC, 1000 * units.mb, 1 * units('kg/m^3'), 1, 5 * units('J/kg')),\n {}, [('press', '[pressure]', 'millibarn')]),\n ((30, 1000, 1.0, 1, 5.), {}, [('press', '[pressure]', 'none'),\n ('temp', '[temperature]', 'none'),\n ('dens', '[mass]/[volume]', 'none')]),\n ((30, 1000 * units.mbar),\n {'dens': 1.0 * units('kg / m'), 'mixing': 5 * units.m, 'unitless_const': 2},\n [('temp', '[temperature]', 'none'),\n ('dens', '[mass]/[volume]', 'kilogram / meter'),\n ('mixing', '[dimensionless]', 'meter')])]\n\n\[email protected](sys.version_info < (3, 3), reason='Unit checking requires Python >= 3.3')\[email protected]('func', test_funcs, ids=['some kwargs', 'all kwargs', 'all pos'])\[email protected]('args,kwargs,bad_parts', test_params,\n ids=['one bad arg', 'all args no units', 'mixed args'])\ndef test_bad(func, args, kwargs, bad_parts):\n r\"\"\"Test that unit checking flags appropriate arguments.\"\"\"\n with pytest.raises(ValueError) as exc:\n func(*args, **kwargs)\n\n message = str(exc.value)\n assert func.__name__ in message\n for param in bad_parts:\n assert '`{}` requires \"{}\" but given \"{}\"'.format(*param) in message\n\n # Should never complain about the const argument\n assert 'unitless_const' not in message\n\n\ndef test_pandas_units_simple():\n \"\"\"Simple unit attachment to two columns.\"\"\"\n df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])\n df_units = {'cola': 'kilometers', 'colb': 'degC'}\n res = pandas_dataframe_to_unit_arrays(df, column_units=df_units)\n cola_truth = np.array([1, 2, 3]) * units.km\n colb_truth = np.array([4, 5, 6]) * units.degC\n assert_array_equal(res['cola'], cola_truth)\n assert_array_equal(res['colb'], colb_truth)\n\n\[email protected](\"ignore:Pandas doesn't allow columns to be created\")\ndef test_pandas_units_on_dataframe():\n \"\"\"Unit attachment based on a units attribute to a dataframe.\"\"\"\n df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])\n df.units = {'cola': 'kilometers', 'colb': 'degC'}\n res = pandas_dataframe_to_unit_arrays(df)\n cola_truth = np.array([1, 2, 3]) * units.km\n colb_truth = np.array([4, 5, 6]) * units.degC\n assert_array_equal(res['cola'], cola_truth)\n assert_array_equal(res['colb'], colb_truth)\n\n\[email protected](\"ignore:Pandas doesn't allow columns to be created\")\ndef test_pandas_units_on_dataframe_not_all_united():\n \"\"\"Unit attachment with units attribute with a column with no units.\"\"\"\n df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])\n df.units = {'cola': 'kilometers'}\n res = pandas_dataframe_to_unit_arrays(df)\n cola_truth = np.array([1, 2, 3]) * units.km\n colb_truth = np.array([4, 5, 6])\n assert_array_equal(res['cola'], cola_truth)\n assert_array_equal(res['colb'], colb_truth)\n\n\ndef test_pandas_units_no_units_given():\n \"\"\"Ensure unit attachment fails if no unit information is given.\"\"\"\n df = pd.DataFrame(data=[[1, 4], [2, 5], [3, 6]], columns=['cola', 'colb'])\n with pytest.raises(ValueError):\n pandas_dataframe_to_unit_arrays(df)\n\n\ndef test_added_degrees_units():\n \"\"\"Test that our added degrees units are present in the registry.\"\"\"\n # Test equivalence of abbreviations/aliases to our defined names\n assert str(units('degrees_N').units) == 'degrees_north'\n assert str(units('degreesN').units) == 'degrees_north'\n assert str(units('degree_north').units) == 'degrees_north'\n assert str(units('degree_N').units) == 'degrees_north'\n assert str(units('degreeN').units) == 'degrees_north'\n assert str(units('degrees_E').units) == 'degrees_east'\n assert str(units('degreesE').units) == 'degrees_east'\n assert str(units('degree_east').units) == 'degrees_east'\n assert str(units('degree_E').units) == 'degrees_east'\n assert str(units('degreeE').units) == 'degrees_east'\n\n # Test equivalence of our defined units to base units\n assert units('degrees_north') == units('degrees')\n assert units('degrees_north').to_base_units().units == units.radian\n assert units('degrees_east') == units('degrees')\n assert units('degrees_east').to_base_units().units == units.radian\n\n\ndef test_gpm_unit():\n \"\"\"Test that the gpm unit does alias to meters.\"\"\"\n x = 1 * units('gpm')\n assert str(x.units) == 'meter'\n\n\ndef test_assert_nan():\n \"\"\"Test that assert_nan actually fails when not given a NaN.\"\"\"\n with pytest.raises(AssertionError):\n assert_nan(1.0 * units.m, units.inches)\n\n\ndef test_assert_nan_checks_units():\n \"\"\"Test that assert_nan properly checks units.\"\"\"\n with pytest.raises(AssertionError):\n assert_nan(np.nan * units.m, units.second)\n\n\[email protected](LooseVersion(pint.__version__) < LooseVersion('0.10'),\n reason='Custom preprocessors only available in Pint 0.10')\ndef test_percent_units():\n \"\"\"Test that percent sign units are properly parsed and interpreted.\"\"\"\n assert str(units('%').units) == 'percent'\n\n\[email protected](LooseVersion(pint.__version__) < LooseVersion('0.10'),\n reason='Custom preprocessors only available in Pint 0.10')\ndef test_udunits_power_syntax():\n \"\"\"Test that UDUNITS style powers are properly parsed and interpreted.\"\"\"\n assert units('m2 s-2').units == units.m ** 2 / units.s ** 2\n", "id": "2126962", "language": "Python", "matching_score": 2.0335440635681152, "max_stars_count": 1, "path": "tests/units/test_units.py" }, { "content": "# Copyright (c) 2018 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"\nxarray with MetPy Tutorial\n==========================\n\n`xarray <http://xarray.pydata.org/>`_ is a powerful Python package that provides N-dimensional\nlabeled arrays and datasets following the Common Data Model. While the process of integrating\nxarray features into MetPy is ongoing, this tutorial demonstrates how xarray can be used\nwithin the current version of MetPy. MetPy's integration primarily works through accessors\nwhich allow simplified projection handling and coordinate identification. Unit and calculation\nsupport is currently available in a limited fashion, but should be improved in future\nversions.\n\"\"\"\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport matplotlib.pyplot as plt\nimport xarray as xr\n\n# Any import of metpy will activate the accessors\nimport metpy.calc as mpcalc\nfrom metpy.testing import get_test_data\nfrom metpy.units import units\n\n#########################################################################\n# Getting Data\n# ------------\n#\n# While xarray can handle a wide variety of n-dimensional data (essentially anything that can\n# be stored in a netCDF file), a common use case is working with model output. Such model\n# data can be obtained from a THREDDS Data Server using the siphon package, but for this\n# tutorial, we will use an example subset of GFS data from <NAME> (September 5th,\n# 2017).\n\n# Open the netCDF file as a xarray Dataset\ndata = xr.open_dataset(get_test_data('irma_gfs_example.nc', False))\n\n# View a summary of the Dataset\nprint(data)\n\n#########################################################################\n# Preparing Data\n# --------------\n#\n# To make use of the data within MetPy, we need to parse the dataset for projection\n# information following the CF conventions. For this, we use the\n# ``data.metpy.parse_cf()`` method, which will return a new, parsed ``DataArray`` or\n# ``Dataset``.\n#\n# Additionally, we rename our data variables for easier reference.\n\n# To parse the full dataset, we can call parse_cf without an argument, and assign the returned\n# Dataset.\ndata = data.metpy.parse_cf()\n\n# If we instead want just a single variable, we can pass that variable name to parse_cf and\n# it will return just that data variable as a DataArray.\ndata_var = data.metpy.parse_cf('Temperature_isobaric')\n\n# If we want only a subset of variables, we can pass a list of variable names as well.\ndata_subset = data.metpy.parse_cf(['u-component_of_wind_isobaric',\n 'v-component_of_wind_isobaric'])\n\n# To rename variables, supply a dictionary between old and new names to the rename method\ndata = data.rename({\n 'Vertical_velocity_pressure_isobaric': 'omega',\n 'Relative_humidity_isobaric': 'relative_humidity',\n 'Temperature_isobaric': 'temperature',\n 'u-component_of_wind_isobaric': 'u',\n 'v-component_of_wind_isobaric': 'v',\n 'Geopotential_height_isobaric': 'height'\n})\n\n#########################################################################\n# Units\n# -----\n#\n# MetPy's DataArray accessor has a ``unit_array`` property to obtain a ``pint.Quantity`` array\n# of just the data from the DataArray (metadata is removed) and a ``convert_units`` method to\n# convert the the data from one unit to another (keeping it as a DataArray). For now, we'll\n# just use ``convert_units`` to convert our temperature to ``degC``.\n\ndata['temperature'] = data['temperature'].metpy.convert_units('degC')\n\n#########################################################################\n# Coordinates\n# -----------\n#\n# You may have noticed how we directly accessed the vertical coordinates above using their\n# names. However, in general, if we are working with a particular DataArray, we don't have to\n# worry about that since MetPy is able to parse the coordinates and so obtain a particular\n# coordinate type directly. There are two ways to do this:\n#\n# 1. Use the ``data_var.metpy.coordinates`` method\n# 2. Use the ``data_var.metpy.x``, ``data_var.metpy.y``, ``data_var.metpy.longitude``,\n# ``data_var.metpy.latitude``, ``data_var.metpy.vertical``, ``data_var.metpy.time``\n# properties\n#\n# The valid coordinate types are:\n#\n# - x\n# - y\n# - longitude\n# - latitude\n# - vertical\n# - time\n#\n# (Both approaches are shown below)\n#\n# The ``x``, ``y``, ``vertical``, and ``time`` coordinates cannot be multidimensional,\n# however, the ``longitude`` and ``latitude`` coordinates can (which is often the case for\n# gridded weather data in its native projection). Note that for gridded data on an\n# equirectangular projection, such as the GFS data in this example, ``x`` and ``longitude``\n# will be identical (as will ``y`` and ``latitude``).\n\n# Get multiple coordinates (for example, in just the x and y direction)\nx, y = data['temperature'].metpy.coordinates('x', 'y')\n\n# If we want to get just a single coordinate from the coordinates method, we have to use\n# tuple unpacking because the coordinates method returns a generator\nvertical, = data['temperature'].metpy.coordinates('vertical')\n\n# Or, we can just get a coordinate from the property\ntime = data['temperature'].metpy.time\n\n# To verify, we can inspect all their names\nprint([coord.name for coord in (x, y, vertical, time)])\n\n#########################################################################\n# Indexing and Selecting Data\n# ---------------------------\n#\n# MetPy provides wrappers for the usual xarray indexing and selection routines that can handle\n# quantities with units. For DataArrays, MetPy also allows using the coordinate axis types\n# mentioned above as aliases for the coordinates. And so, if we wanted 850 hPa heights,\n# we would take:\n\nprint(data['height'].metpy.sel(vertical=850 * units.hPa))\n\n#########################################################################\n# For full details on xarray indexing/selection, see\n# `xarray's documentation <http://xarray.pydata.org/en/stable/indexing.html>`_.\n\n#########################################################################\n# Projections\n# -----------\n#\n# Getting the cartopy coordinate reference system (CRS) of the projection of a DataArray is as\n# straightforward as using the ``data_var.metpy.cartopy_crs`` property:\n\ndata_crs = data['temperature'].metpy.cartopy_crs\nprint(data_crs)\n\n#########################################################################\n# The cartopy ``Globe`` can similarly be accessed via the ``data_var.metpy.cartopy_globe``\n# property:\n\ndata_globe = data['temperature'].metpy.cartopy_globe\nprint(data_globe)\n\n#########################################################################\n# Calculations\n# ------------\n#\n# Most of the calculations in `metpy.calc` will accept DataArrays by converting them\n# into their corresponding unit arrays. While this may often work without any issues, we must\n# keep in mind that because the calculations are working with unit arrays and not DataArrays:\n#\n# - The calculations will return unit arrays rather than DataArrays\n# - Broadcasting must be taken care of outside of the calculation, as it would only recognize\n# dimensions by order, not name\n#\n# As an example, we calculate geostropic wind at 500 hPa below:\n\nlat, lon = xr.broadcast(y, x)\nf = mpcalc.coriolis_parameter(lat)\ndx, dy = mpcalc.lat_lon_grid_deltas(lon, lat, initstring=data_crs.proj4_init)\nheights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]\nu_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)\nprint(u_geo)\nprint(v_geo)\n\n#########################################################################\n# Also, a limited number of calculations directly support xarray DataArrays or Datasets (they\n# can accept *and* return xarray objects). Right now, this includes\n#\n# - Derivative functions\n# - ``first_derivative``\n# - ``second_derivative``\n# - ``gradient``\n# - ``laplacian``\n# - Cross-section functions\n# - ``cross_section_components``\n# - ``normal_component``\n# - ``tangential_component``\n# - ``absolute_momentum``\n# - Smoothing functions\n# - ``smooth_gaussian``\n# - ``smooth_n_point``\n# - ``smooth_window``\n# - ``smooth_rectangular``\n# - ``smooth_circular``\n#\n# More details can be found by looking at the documentation for the specific function of\n# interest.\n\n#########################################################################\n# There is also the special case of the helper function, ``grid_deltas_from_dataarray``, which\n# takes a ``DataArray`` input, but returns unit arrays for use in other calculations. We could\n# rewrite the above geostrophic wind example using this helper function as follows:\n\nheights = data['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}]\nlat, lon = xr.broadcast(y, x)\nf = mpcalc.coriolis_parameter(lat)\ndx, dy = mpcalc.grid_deltas_from_dataarray(heights)\nu_geo, v_geo = mpcalc.geostrophic_wind(heights, f, dx, dy)\nprint(u_geo)\nprint(v_geo)\n\n#########################################################################\n# Plotting\n# --------\n#\n# Like most meteorological data, we want to be able to plot these data. DataArrays can be used\n# like normal numpy arrays in plotting code, which is the recommended process at the current\n# point in time, or we can use some of xarray's plotting functionality for quick inspection of\n# the data.\n#\n# (More detail beyond the following can be found at `xarray's plotting reference\n# <http://xarray.pydata.org/en/stable/plotting.html>`_.)\n\n# A very simple example example of a plot of 500 hPa heights\ndata['height'].metpy.loc[{'time': time[0], 'vertical': 500. * units.hPa}].plot()\nplt.show()\n\n#########################################################################\n\n# Let's add a projection and coastlines to it\nax = plt.axes(projection=ccrs.LambertConformal())\ndata['height'].metpy.loc[{'time': time[0],\n 'vertical': 500. * units.hPa}].plot(ax=ax, transform=data_crs)\nax.coastlines()\nplt.show()\n\n#########################################################################\n\n# Or, let's make a full 500 hPa map with heights, temperature, winds, and humidity\n\n# Select the data for this time and level\ndata_level = data.metpy.loc[{time.name: time[0], vertical.name: 500. * units.hPa}]\n\n# Create the matplotlib figure and axis\nfig, ax = plt.subplots(1, 1, figsize=(12, 8), subplot_kw={'projection': data_crs})\n\n# Plot RH as filled contours\nrh = ax.contourf(x, y, data_level['relative_humidity'], levels=[70, 80, 90, 100],\n colors=['#99ff00', '#00ff00', '#00cc00'])\n\n# Plot wind barbs, but not all of them\nwind_slice = slice(5, -5, 5)\nax.barbs(x[wind_slice], y[wind_slice],\n data_level['u'].metpy.unit_array[wind_slice, wind_slice].to('knots'),\n data_level['v'].metpy.unit_array[wind_slice, wind_slice].to('knots'),\n length=6)\n\n# Plot heights and temperature as contours\nh_contour = ax.contour(x, y, data_level['height'], colors='k', levels=range(5400, 6000, 60))\nh_contour.clabel(fontsize=8, colors='k', inline=1, inline_spacing=8,\n fmt='%i', rightside_up=True, use_clabeltext=True)\nt_contour = ax.contour(x, y, data_level['temperature'], colors='xkcd:deep blue',\n levels=range(-26, 4, 2), alpha=0.8, linestyles='--')\nt_contour.clabel(fontsize=8, colors='xkcd:deep blue', inline=1, inline_spacing=8,\n fmt='%i', rightside_up=True, use_clabeltext=True)\n\n# Add geographic features\nax.add_feature(cfeature.LAND.with_scale('50m'), facecolor=cfeature.COLORS['land'])\nax.add_feature(cfeature.OCEAN.with_scale('50m'), facecolor=cfeature.COLORS['water'])\nax.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='#c7c783', zorder=0)\nax.add_feature(cfeature.LAKES.with_scale('50m'), facecolor=cfeature.COLORS['water'],\n edgecolor='#c7c783', zorder=0)\n\n# Set a title and show the plot\nax.set_title('500 hPa Heights (m), Temperature (\\u00B0C), Humidity (%) at '\n + time[0].dt.strftime('%Y-%m-%d %H:%MZ').item())\nplt.show()\n\n#########################################################################\n# What Could Go Wrong?\n# --------------------\n#\n# Depending on your dataset and what you are trying to do, you might run into problems with\n# xarray and MetPy. Below are examples of some of the most common issues\n#\n# - Multiple coordinate conflict\n# - An axis not being available\n# - An axis not being interpretable\n# - Arrays not broadcasting in calculations\n#\n# **Coordinate Conflict**\n#\n# Code:\n#\n# ::\n#\n# x = data['Temperature'].metpy.x\n#\n# Error Message:\n#\n# ::\n#\n# /home/user/env/MetPy/metpy/xarray.py:305: UserWarning: More than\n# one x coordinate present for variable \"Temperature\".\n#\n# Fix:\n#\n# Manually assign the coordinates using the ``assign_coordinates()`` method on your DataArray,\n# or by specifying the ``coordinates`` argument to the ``parse_cf()`` method on your Dataset,\n# to map the ``time``, ``vertical``, ``y``, ``latitude``, ``x``, and ``longitude`` axes (as\n# applicable to your data) to the corresponding coordinates.\n#\n# ::\n#\n# data['Temperature'].assign_coordinates({'time': 'time', 'vertical': 'isobaric',\n# 'y': 'y', 'x': 'x'})\n# x = data['Temperature'].metpy.x\n#\n# or\n#\n# ::\n#\n# temperature = data.metpy.parse_cf('Temperature',\n# coordinates={'time': 'time', 'vertical': 'isobaric',\n# 'y': 'y', 'x': 'x'})\n# x = temperature.metpy.x\n#\n# **Axis Unavailable**\n#\n# Code:\n#\n# ::\n#\n# data['Temperature'].metpy.vertical\n#\n# Error Message:\n#\n# ::\n#\n# AttributeError: vertical attribute is not available.\n#\n# This means that your data variable does not have the coordinate that was requested, at\n# least as far as the parser can recognize. Verify that you are requesting a\n# coordinate that your data actually has, and if it still is not available,\n# you will need to manually specify the coordinates as discussed above.\n#\n# **Axis Not Interpretable**\n#\n# Code:\n#\n# ::\n#\n# x, y, ensemble = data['Temperature'].metpy.coordinates('x', 'y', 'ensemble')\n#\n# Error Message:\n#\n# ::\n#\n# AttributeError: 'ensemble' is not an interpretable axis\n#\n# This means that you are requesting a coordinate that MetPy is (currently) unable to parse.\n# While this means it cannot be recognized automatically, you can still obtain your desired\n# coordinate directly by accessing it by name. If you have a need for systematic\n# identification of a new coordinate type, we welcome pull requests for such new functionality\n# on GitHub!\n#\n# **Broadcasting in Calculations**\n#\n# Code:\n#\n# ::\n#\n# theta = mpcalc.potential_temperature(data['isobaric3'], data['temperature'])\n#\n# Error Message:\n#\n# ::\n#\n# ValueError: operands could not be broadcast together with shapes (9,31,81,131) (31,)\n#\n# This is a symptom of the incomplete integration of xarray with MetPy's calculations; the\n# calculations currently convert the DataArrays to unit arrays that do not recognize which\n# coordinates match with which. And so, we must do some manipulations.\n#\n# Fix 1 (xarray broadcasting):\n#\n# ::\n#\n# pressure, temperature = xr.broadcast(data['isobaric3'], data['temperature'])\n# theta = mpcalc.potential_temperature(pressure, temperature)\n#\n# Fix 2 (unit array broadcasting):\n#\n# ::\n#\n# theta = mpcalc.potential_temperature(\n# data['isobaric3'].metpy.unit_array[None, :, None, None],\n# data['temperature'].metpy.unit_array\n# )\n#\n", "id": "7401002", "language": "Python", "matching_score": 2.423119306564331, "max_stars_count": 1, "path": "tutorials/xarray_tutorial.py" }, { "content": "# Copyright (c) 2018 MetPy Developers.\n# Distributed under the terms of the BSD 3-Clause License.\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"Tools to help with mapping/geographic applications.\n\nCurrently this includes tools for working with CartoPy projections.\n\n\"\"\"\nimport cartopy.crs as ccrs\n\nfrom ..cbook import Registry\n\n\nclass CFProjection(object):\n \"\"\"Handle parsing CF projection metadata.\"\"\"\n\n # mapping from Cartopy to CF vocabulary\n _default_attr_mapping = [('false_easting', 'false_easting'),\n ('false_northing', 'false_northing'),\n ('central_latitude', 'latitude_of_projection_origin'),\n ('central_longitude', 'longitude_of_projection_origin')]\n\n projection_registry = Registry()\n\n def __init__(self, attrs):\n \"\"\"Initialize the CF Projection handler with a set of metadata attributes.\"\"\"\n self._attrs = attrs\n\n @classmethod\n def register(cls, name):\n \"\"\"Register a new projection to handle.\"\"\"\n return cls.projection_registry.register(name)\n\n @classmethod\n def build_projection_kwargs(cls, source, mapping):\n \"\"\"Handle mapping a dictionary of metadata to keyword arguments.\"\"\"\n return cls._map_arg_names(source, cls._default_attr_mapping + mapping)\n\n @staticmethod\n def _map_arg_names(source, mapping):\n \"\"\"Map one set of keys to another.\"\"\"\n return {cartopy_name: source[cf_name] for cartopy_name, cf_name in mapping\n if cf_name in source}\n\n @property\n def cartopy_globe(self):\n \"\"\"Initialize a `cartopy.crs.Globe` from the metadata.\"\"\"\n if 'earth_radius' in self._attrs:\n kwargs = {'ellipse': 'sphere', 'semimajor_axis': self._attrs['earth_radius'],\n 'semiminor_axis': self._attrs['earth_radius']}\n else:\n attr_mapping = [('semimajor_axis', 'semi_major_axis'),\n ('semiminor_axis', 'semi_minor_axis'),\n ('inverse_flattening', 'inverse_flattening')]\n kwargs = self._map_arg_names(self._attrs, attr_mapping)\n\n # WGS84 with semi_major==semi_minor is NOT the same as spherical Earth\n # Also need to handle the case where we're not given any spheroid\n kwargs['ellipse'] = None if kwargs else 'sphere'\n\n return ccrs.Globe(**kwargs)\n\n def to_cartopy(self):\n \"\"\"Convert to a CartoPy projection.\"\"\"\n globe = self.cartopy_globe\n proj_name = self._attrs['grid_mapping_name']\n try:\n proj_handler = self.projection_registry[proj_name]\n except KeyError:\n raise ValueError('Unhandled projection: {}'.format(proj_name))\n\n return proj_handler(self._attrs, globe)\n\n def to_dict(self):\n \"\"\"Get the dictionary of metadata attributes.\"\"\"\n return self._attrs.copy()\n\n def __str__(self):\n \"\"\"Get a string representation of the projection.\"\"\"\n return 'Projection: ' + self._attrs['grid_mapping_name']\n\n def __getitem__(self, item):\n \"\"\"Return a given attribute.\"\"\"\n return self._attrs[item]\n\n def __eq__(self, other):\n \"\"\"Test equality (CFProjection with matching attrs).\"\"\"\n return self.__class__ == other.__class__ and self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Test inequality (not equal to).\"\"\"\n return not self.__eq__(other)\n\n\[email protected]('geostationary')\ndef make_geo(attrs_dict, globe):\n \"\"\"Handle geostationary projection.\"\"\"\n attr_mapping = [('satellite_height', 'perspective_point_height'),\n ('sweep_axis', 'sweep_angle_axis')]\n kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)\n\n # CartoPy can't handle central latitude for Geostationary (nor should it)\n # Just remove it if it's 0.\n if not kwargs.get('central_latitude'):\n kwargs.pop('central_latitude', None)\n\n # If sweep_angle_axis is not present, we should look for fixed_angle_axis and adjust\n if 'sweep_axis' not in kwargs:\n kwargs['sweep_axis'] = 'x' if attrs_dict['fixed_angle_axis'] == 'y' else 'y'\n\n return ccrs.Geostationary(globe=globe, **kwargs)\n\n\[email protected]('lambert_conformal_conic')\ndef make_lcc(attrs_dict, globe):\n \"\"\"Handle Lambert conformal conic projection.\"\"\"\n attr_mapping = [('central_longitude', 'longitude_of_central_meridian'),\n ('standard_parallels', 'standard_parallel')]\n kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)\n if 'standard_parallels' in kwargs:\n try:\n len(kwargs['standard_parallels'])\n except TypeError:\n kwargs['standard_parallels'] = [kwargs['standard_parallels']]\n return ccrs.LambertConformal(globe=globe, **kwargs)\n\n\[email protected]('albers_conical_equal_area')\ndef make_aea(attrs_dict, globe):\n \"\"\"Handle Albers Equal Area.\"\"\"\n attr_mapping = [('central_longitude', 'longitude_of_central_meridian'),\n ('standard_parallels', 'standard_parallel')]\n kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)\n if 'standard_parallels' in kwargs:\n try:\n len(kwargs['standard_parallels'])\n except TypeError:\n kwargs['standard_parallels'] = [kwargs['standard_parallels']]\n return ccrs.AlbersEqualArea(globe=globe, **kwargs)\n\n\[email protected]('latitude_longitude')\ndef make_latlon(attrs_dict, globe):\n \"\"\"Handle plain latitude/longitude mapping.\"\"\"\n # TODO: Really need to use Geodetic to pass the proper globe\n return ccrs.PlateCarree()\n\n\[email protected]('mercator')\ndef make_mercator(attrs_dict, globe):\n \"\"\"Handle Mercator projection.\"\"\"\n attr_mapping = [('latitude_true_scale', 'standard_parallel'),\n ('scale_factor', 'scale_factor_at_projection_origin')]\n kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)\n\n # Work around the fact that in CartoPy <= 0.16 can't handle the easting/northing\n # or central_latitude in Mercator\n if not kwargs.get('false_easting'):\n kwargs.pop('false_easting', None)\n if not kwargs.get('false_northing'):\n kwargs.pop('false_northing', None)\n if not kwargs.get('central_latitude'):\n kwargs.pop('central_latitude', None)\n\n return ccrs.Mercator(globe=globe, **kwargs)\n\n\[email protected]('stereographic')\ndef make_stereo(attrs_dict, globe):\n \"\"\"Handle generic stereographic projection.\"\"\"\n attr_mapping = [('scale_factor', 'scale_factor_at_projection_origin')]\n kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)\n\n return ccrs.Stereographic(globe=globe, **kwargs)\n\n\[email protected]('polar_stereographic')\ndef make_polar_stereo(attrs_dict, globe):\n \"\"\"Handle polar stereographic projection.\"\"\"\n attr_mapping = [('central_longitude', 'straight_vertical_longitude_from_pole'),\n ('true_scale_latitude', 'standard_parallel'),\n ('scale_factor', 'scale_factor_at_projection_origin')]\n kwargs = CFProjection.build_projection_kwargs(attrs_dict, attr_mapping)\n\n return ccrs.Stereographic(globe=globe, **kwargs)\n", "id": "1128283", "language": "Python", "matching_score": 1.4359405040740967, "max_stars_count": 1, "path": "src/metpy/plots/mapping.py" } ]
2.033544
docknetwork
[ { "content": "from abc import abstractmethod, ABC\n\nfrom cost.models import CostCalculator\n\n\nclass BidCalculator(ABC):\n \"\"\"Abstract class to implement bid calculators for different scenarios.\"\"\"\n\n def __init__(self, cost_calculator: CostCalculator, roi_factor: int):\n self.cost_calculator = cost_calculator\n self.roi_factor = roi_factor\n\n @abstractmethod\n def calculate(self) -> float:\n \"\"\"Calculate bid in USD.\"\"\"\n pass\n\n\nclass SampleBidCalculator(BidCalculator):\n\n def calculate(self) -> float:\n return self.roi_factor * self.cost_calculator.calculate()\n", "id": "5329324", "language": "Python", "matching_score": 2.8395071029663086, "max_stars_count": 5, "path": "bidding/models.py" }, { "content": "from unittest import mock\n\nfrom bidding.models import SampleBidCalculator\nfrom cost.models import EthereumCostCalculator\n\n\nclass TestSampleBidding:\n\n @mock.patch('cost.models.EthereumCostCalculator.calculate', return_value=50)\n def test_calculate(self, _):\n cost_calculator = EthereumCostCalculator(\n cryptocompare_api_key='some_api_key',\n job_data={'recipients': [{'name': \"john\"}, {'name': \"ben\"}, {'name': \"lio\"}]}\n )\n bid_calculator_x5000 = SampleBidCalculator(\n cost_calculator=cost_calculator,\n roi_factor=5000,\n )\n assert bid_calculator_x5000.calculate() == cost_calculator.calculate() * 5000\n", "id": "5621721", "language": "Python", "matching_score": 2.8527894020080566, "max_stars_count": 5, "path": "tests/test_bidding.py" }, { "content": "from unittest import mock\n\nfrom cost.models import EthereumCostCalculator\n\n\nclass TestEthereumCosts:\n ETH_GASSTATION_JSON = {'fast': 60.0, 'fastest': 200.0, 'safeLow': 10.0, 'average': 10.0,\n 'block_time': 13.048780487804878, 'blockNum': 8356493, 'speed': 0.8607744113116175,\n 'safeLowWait': 3.8, 'avgWait': 3.8, 'fastWait': 0.5, 'fastestWait': 0.4}\n CRYPTOCOMPARE_JSON = {'USD': 184.6}\n\n @mock.patch('cost.models.EthereumCostCalculator._get_remote_json', return_value=ETH_GASSTATION_JSON)\n def test_single_tx_wei_cost(self, _):\n calc = EthereumCostCalculator(cryptocompare_api_key='some_api_key', job_data={})\n single_tx_cost = calc._get_single_tx_wei_cost()\n assert single_tx_cost == 25000000000000\n assert isinstance(single_tx_cost, float)\n\n @mock.patch('cost.models.EthereumCostCalculator._get_remote_json', return_value=CRYPTOCOMPARE_JSON)\n def test_eth_usd_price(self, _):\n calc = EthereumCostCalculator(cryptocompare_api_key='some_api_key', job_data={})\n eth_usd_price = calc._get_eth_usd_price()\n assert eth_usd_price == 184.6\n assert isinstance(eth_usd_price, float)\n\n @mock.patch('cost.models.EthereumCostCalculator._get_eth_gas_station_data', return_value=ETH_GASSTATION_JSON)\n @mock.patch('cost.models.EthereumCostCalculator._get_eth_usd_price', return_value=184.6)\n def test_get_fixed_costs(self, _, __):\n calc = EthereumCostCalculator(cryptocompare_api_key='some_api_key', job_data={})\n assert calc._get_fixed_costs() == 0.004615\n\n @mock.patch('cost.models.EthereumCostCalculator._get_eth_gas_station_data', return_value=ETH_GASSTATION_JSON)\n @mock.patch('cost.models.EthereumCostCalculator._get_eth_usd_price', return_value=184.6)\n def test_get_variable_costs(self, _, __):\n calc = EthereumCostCalculator(\n cryptocompare_api_key='some_api_key',\n job_data={'recipients': [{'name': \"john\"}, {'name': \"ben\"}, {'name': \"lio\"}]}\n )\n assert calc._get_variable_costs() == 3\n\n @mock.patch('cost.models.EthereumCostCalculator._get_eth_gas_station_data', return_value=ETH_GASSTATION_JSON)\n @mock.patch('cost.models.EthereumCostCalculator._get_eth_usd_price', return_value=184.6)\n def test_calculate(self, _, __):\n calc = EthereumCostCalculator(\n cryptocompare_api_key='some_api_key',\n job_data={'recipients': [{'name': \"john\"}, {'name': \"ben\"}, {'name': \"lio\"}]}\n )\n usd_cost = calc.calculate()\n assert usd_cost == 3.004615\n", "id": "10736248", "language": "Python", "matching_score": 4.686378002166748, "max_stars_count": 5, "path": "tests/test_cost.py" }, { "content": "from abc import abstractmethod, ABC\nfrom typing import Dict\n\nimport requests\n\n\nclass CostCalculator(ABC):\n \"\"\"Abstract class to implement cost calculators for different scenarios.\"\"\"\n\n def __init__(self, job_data: Dict):\n self.job_data = job_data\n\n @abstractmethod\n def calculate(self) -> float:\n \"\"\"Calculate final cost in USD.\"\"\"\n pass\n\n\nclass EthereumCostCalculator(CostCalculator):\n DEFAULT_GAS_LIMIT_FOR_SINGLE_TX = 25000\n GAS_STATION_URL = 'https://ethgasstation.info/json/ethgasAPI.json'\n TO_WEI_FACTOR = 100000000\n WEI_TO_ETH_FACTOR = 1 / 1000000000000000000\n\n def __init__(self, cryptocompare_api_key: str, *args, **kwargs):\n self.CRYPTOCOMPARE_URL = f'https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD&api_key={cryptocompare_api_key}'\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def _get_remote_json(url: str) -> Dict:\n \"\"\"Retrieve a remote json.\"\"\"\n return requests.get(url).json()\n\n def _get_eth_gas_station_data(self) -> Dict:\n \"\"\"Retrieve json from Ethgasstation\"\"\"\n return self._get_remote_json(self.GAS_STATION_URL)\n\n def _get_single_tx_wei_cost(self) -> float:\n \"\"\"Calculate the wei cost of a single anchoring TX on Ethereum.\"\"\"\n gas_price = float(self._get_eth_gas_station_data()['average'])\n wei_gas_price = gas_price * self.TO_WEI_FACTOR\n return wei_gas_price * self.DEFAULT_GAS_LIMIT_FOR_SINGLE_TX\n\n def _get_eth_usd_price(self) -> float:\n \"\"\"Retrieve ETH cost in USD\"\"\"\n return float(self._get_remote_json(self.CRYPTOCOMPARE_URL)['USD'])\n\n def _get_fixed_costs(self) -> float:\n \"\"\"Calculate fixed costs for any given job to be anchored on Ethereum blockchain\"\"\"\n return self._get_single_tx_wei_cost() * self.WEI_TO_ETH_FACTOR * self._get_eth_usd_price()\n\n def _get_variable_costs(self) -> int:\n \"\"\"Calculate costs based on the time/energy/money this job will take to process.\"\"\"\n return len(self.job_data['recipients']) # TODO: implement properly, right now simply $1/recipient\n\n def calculate(self) -> float:\n return self._get_fixed_costs() + self._get_variable_costs()\n", "id": "10034544", "language": "Python", "matching_score": 0.5865468978881836, "max_stars_count": 5, "path": "cost/models.py" }, { "content": "import base64\nimport json\nfrom functools import partial\nfrom typing import Dict\nfrom flask import Response\nfrom werkzeug.test import Client\n\nfrom blockcerts.const import DEFAULT_ENCODING\n\n\nclass JsonFlaskClient:\n \"\"\"\n A tool for making tests easier, built as a wrapper around FlaskClient.\n\n It adds common JSON-related operations to calls and performs some of the well-known\n calls to the application views.\n \"\"\"\n\n def __init__(self, client: Client):\n self.client = client\n\n # register `self.<verb>` methods for common HTTP verbs\n for verb in ['get', 'post', 'put', 'delete']:\n setattr(self, verb, partial(self._make_call, verb))\n\n def _make_call(\n self,\n verb: str,\n url: str,\n data: Dict = None,\n email: str = \"\",\n password: str = \"\",\n content_type='application/json',\n ) -> Response:\n action_fn = getattr(self.client, verb)\n data = data or dict()\n json_data = json.dumps(data)\n headers = self._make_headers(email, password) if email else {}\n return action_fn(\n url,\n data=json_data,\n headers=headers,\n content_type=content_type,\n )\n\n @staticmethod\n def _make_headers(email=\"\", password=\"\") -> Dict:\n \"\"\"Add optional auth headers.\"\"\"\n return {\n 'Authorization': 'Basic ' + base64.b64encode(\n bytes(email + \":\" + password, DEFAULT_ENCODING)\n ).decode(DEFAULT_ENCODING)\n }\n\n\n# Dummy classes for requests.Response objects:\n\nclass Response200:\n def __init__(self):\n self.status_code = 200\n self.text = 'OK'\n\n\nclass Response201:\n def __init__(self):\n self.status_code = 201\n self.text = 'Created'\n\n\nclass Response300:\n def __init__(self):\n self.status_code = 300\n self.text = 'Multiple Choices'\n\n\nclass Response400:\n def __init__(self):\n self.status_code = 400\n self.text = 'Bad Request'\n\n\nclass Response500:\n def __init__(self):\n self.status_code = 500\n self.text = 'Internal Server Error'\n", "id": "509814", "language": "Python", "matching_score": 1.7745521068572998, "max_stars_count": 5, "path": "tests/helpers.py" }, { "content": "import re\nfrom typing import Any\n\nimport voluptuous\nfrom flask import jsonify, Response\nfrom werkzeug.exceptions import HTTPException\n\n\ndef register_errors(app):\n @app.errorhandler(404)\n def handle_not_found_error(error: Exception) -> Response:\n return ResourceNotFound(details=str(error)).jsonify()\n\n @app.errorhandler(500)\n def handle_validation_error(error: Exception) -> Response:\n return ServerError(details=str(error)).jsonify()\n\n @app.errorhandler(AppError)\n def handle_invalid_usage(error: Exception) -> Response:\n return error.jsonify()\n\n @app.errorhandler(voluptuous.MultipleInvalid)\n @app.errorhandler(voluptuous.Invalid)\n def handle_validation_error(error: Exception) -> Response:\n return ValidationError(details=str(error)).jsonify()\n\n\nclass AppError(HTTPException):\n code = 400\n\n def __init__(self, key: str = None, details: Any = None, code: int = None):\n super(AppError, self).__init__()\n self.key = key\n if code:\n self.code = code\n self.details = details\n\n @classmethod\n def slugify_exception_name(cls):\n return re.sub(r'(?<=[a-z])(?=[A-Z])', '-', cls.__name__).lower()\n\n def get_response(self, environ=None):\n return self.jsonify()\n\n def jsonify(self):\n error_obj = {\n 'error': self.slugify_exception_name(),\n 'key': self.key,\n 'details': self.details,\n }\n\n res = jsonify(error_obj)\n res.status_code = self.code\n\n return res\n\n\nclass ValidationError(AppError):\n pass\n\n\nclass ObjectExists(AppError):\n code = 409\n\n\nclass Unauthorized(AppError):\n code = 401\n\n\nclass Forbidden(AppError):\n code = 403\n\n\nclass ResourceNotFound(AppError):\n code = 404\n\n\nclass ServerError(AppError):\n code = 500\n", "id": "2635005", "language": "Python", "matching_score": 0.33026614785194397, "max_stars_count": 5, "path": "flaskapp/errors.py" }, { "content": "from typing import Dict\n\nfrom flask import Config\n\n# All config passed to `create_app()` is filtered through this list. That means that if you wish to\n# introduce a new config variable, you need to add it here, otherwise it will not be available. \n# Each triple represents a config variable: (<name>, <type>, <default value>).\nCONFIG_VARS = [\n ('TESTING', bool, False),\n ('DEBUG', bool, False),\n ('ETH_PUBLIC_KEY', str, None),\n ('ETH_PRIVATE_KEY', str, None),\n ('ETH_KEY_CREATED_AT', str, None),\n ('ETH_NODE_URL_ROPSTEN', str, None),\n ('ETH_NODE_URL_MAINNET', str, None),\n ('ETHERSCAN_API_TOKEN', str, None),\n]\n\n_global_config = None\n\n\ndef parse_config(config_data: Dict) -> Dict:\n config = {}\n for var_name, var_type, default_value in CONFIG_VARS:\n value = config_data.get(var_name, default_value)\n value = _convert(value, var_type)\n config[var_name] = value\n return config\n\n\ndef set_config(config: Config) -> None:\n global _global_config\n _global_config = config\n\n\ndef get_config() -> Config:\n return _global_config\n\n\ndef _convert(value, var_type):\n if type(value) is var_type or value is None:\n return value\n\n if var_type in [str, int, float]:\n return var_type(value)\n\n if var_type is bool:\n if isinstance(value, str):\n return value.lower() in ['true', 'yes', 'y', '1', 't']\n else:\n return bool(value)\n\n raise NotImplementedError('Unknown config var type', var_type)\n", "id": "11029188", "language": "Python", "matching_score": 2.0522446632385254, "max_stars_count": 5, "path": "flaskapp/config.py" }, { "content": "from typing import Dict\n\nfrom flask import Flask\n\nfrom blockcerts.misc import write_private_key_file\nfrom flaskapp.config import parse_config, set_config\nfrom flaskapp.errors import register_errors\nfrom flaskapp.routes import setup_routes\n\n\ndef create_app(config_data: Dict) -> Flask:\n app = Flask(__name__)\n\n app.config.update(parse_config(config_data))\n set_config(app.config)\n register_errors(app)\n setup_routes(app)\n write_private_key_file(app.config.get('ETH_PRIVATE_KEY'))\n return app\n", "id": "11112720", "language": "Python", "matching_score": 1.7623764276504517, "max_stars_count": 5, "path": "flaskapp/app.py" }, { "content": "import logging\nimport os\n\nfrom flaskapp.app import create_app\n\nlog = logging.getLogger(__name__)\napp = create_app(config_data=os.environ)\n\nif __name__ == '__main__':\n log.info('Serving requests')\n app.run(host='0.0.0.0', port=80)\n", "id": "1672134", "language": "Python", "matching_score": 0.04548090696334839, "max_stars_count": 5, "path": "wsgi.py" }, { "content": "from cert_issuer.simple import SimplifiedCertificateBatchIssuer\n\n\ndef test_simplfied_issuing_process(config, unsigned_certs, write_private_key_file):\n \"\"\"Please note this test actually anchors to Ropsten so you need internet access and funds in the given account.\"\"\"\n simple_certificate_batch_issuer = SimplifiedCertificateBatchIssuer(config, unsigned_certs)\n tx_id, signed_certs = simple_certificate_batch_issuer.issue()\n one_cert_id = list(signed_certs.keys())[0]\n merkle_root = signed_certs[one_cert_id]['signature']['merkleRoot']\n print(f'Check https://ropsten.etherscan.io/tx/{tx_id} to confirm it contains the merkle root \"{merkle_root}\"')\n assert tx_id\n assert merkle_root == 'cffe57bac8b8f47df9f5bb89e88dda893774b45b77d6600d5f1836d309505b61'\n\n\ndef test_simplfied_issuing_process_with_private_key(config_priv, unsigned_certs):\n \"\"\"Please note this test actually anchors to Ropsten so you need internet access and funds in the given account.\"\"\"\n simple_certificate_batch_issuer = SimplifiedCertificateBatchIssuer(config_priv, unsigned_certs)\n tx_id, signed_certs = simple_certificate_batch_issuer.issue()\n one_cert_id = list(signed_certs.keys())[0]\n merkle_root = signed_certs[one_cert_id]['signature']['merkleRoot']\n print(f'Check https://ropsten.etherscan.io/tx/{tx_id} to confirm it contains the merkle root \"{merkle_root}\"')\n assert tx_id\n assert merkle_root == 'cffe57bac8b8f47df9f5bb89e88dda893774b45b77d6600d5f1836d309505b61'\n", "id": "7260399", "language": "Python", "matching_score": 2.0232057571411133, "max_stars_count": 5, "path": "blockcerts/issuer/tests/test_no_fs.py" }, { "content": "import copy\nimport os\nfrom typing import Generator, Dict, Tuple\n\nfrom cert_core import Chain\nfrom cert_issuer.merkle_tree_generator import MerkleTreeGenerator\nfrom cert_schema import normalize_jsonld\nfrom eth_account.datastructures import AttributeDict\nfrom web3 import Web3\n\nfrom blockcerts.issuer.cert_issuer.helpers import _get_random_from_csv\n\n\nclass SimplifiedCertificateBatchIssuer:\n \"\"\"\n Class to issue blockcerts without relying on filesystem usage.\n\n Please note that it currently only supports anchoring to Ethereum.\n \"\"\"\n\n def __init__(self, config: 'AttrDict', unsigned_certs: dict):\n # 1- Prepare config and unsigned certs (These come from my latest changes in cert-tools\n self.config = config\n self.config.original_chain = self.config.chain\n self.config.chain = Chain.parse_from_chain(self.config.chain)\n\n self.path_to_secret = os.path.join(config.usb_name, config.key_file)\n\n self.unsigned_certs = unsigned_certs\n self.cert_generator = self._create_cert_generator()\n\n # 2- Calculate Merkle Tree and Root\n self.merkle_tree_generator = MerkleTreeGenerator()\n self.merkle_tree_generator.populate(self.cert_generator)\n self.merkle_root = self.merkle_tree_generator.get_blockchain_data()\n\n def issue(self) -> Tuple[str, Dict]:\n \"\"\"Anchor the merkle root in a blockchain transaction and add the tx id and merkle proof to each cert.\"\"\"\n tx_id = self._broadcast_transaction()\n signed_certs = self._add_proof_to_certs(tx_id)\n return tx_id, signed_certs\n\n def _add_proof_to_certs(self, tx_id) -> Dict:\n \"\"\"Add merkle proof to the JSON of the certificates.\"\"\"\n proof_generator = self.merkle_tree_generator.get_proof_generator(tx_id, self.config.chain)\n signed_certs = copy.deepcopy(self.unsigned_certs)\n for _, cert in signed_certs.items():\n proof = next(proof_generator)\n cert['signature'] = proof\n return signed_certs\n\n def _broadcast_transaction(self) -> str:\n \"\"\"Broadcast the tx used to anchor a merkle root to a given blockchain.\"\"\"\n self.transaction_handler = SimplifiedEthereumTransactionHandler(\n chain=self.config.original_chain.split('_')[1],\n path_to_secret=self.path_to_secret,\n private_key=self.config.get('eth_private_key'),\n recommended_max_cost=self.config.gas_price * self.config.gas_limit,\n account_from=self.config.get('eth_public_key') or self.config.issuing_address,\n )\n tx_id = self.transaction_handler.issue_transaction(self.merkle_root)\n return tx_id\n\n def _create_cert_generator(self) -> Generator:\n \"\"\"Return a generator of jsonld-normalized unsigned certs.\"\"\"\n for uid, cert in self.unsigned_certs.items():\n normalized = normalize_jsonld(cert, detect_unmapped_fields=False)\n yield normalized.encode('utf-8')\n\n\nclass SimplifiedEthereumTransactionHandler:\n \"\"\"Class to handle anchoring to the Ethereum network.\"\"\"\n\n def __init__(\n self,\n chain: str,\n path_to_secret: str,\n private_key: str,\n recommended_max_cost: int,\n account_from: str,\n account_to: str = '0xdeaDDeADDEaDdeaDdEAddEADDEAdDeadDEADDEaD',\n max_retry=3\n\n ):\n self.max_retry = max_retry\n self.account_from = account_from\n self.account_to = account_to\n self.path_to_secret = path_to_secret\n\n self.eth_node_url = self._get_node_url(chain)\n\n self.web3 = Web3(Web3.HTTPProvider(self.eth_node_url))\n assert self.web3.isConnected()\n\n self._ensure_balance(recommended_max_cost)\n self.private_key = private_key or self._read_private_key()\n\n def issue_transaction(self, merkle_root: str, gas_price: int = 20000000000, gas_limit: int = 25000) -> str:\n \"\"\"Broadcast a transaction with the merkle root as data and return the transaction id.\"\"\"\n for i in range(self.max_retry):\n signed_tx = self._get_signed_tx(merkle_root, gas_price, gas_limit, i)\n try:\n tx_hash = self.web3.eth.sendRawTransaction(signed_tx.rawTransaction)\n tx_id = self.web3.toHex(tx_hash)\n return tx_id\n except Exception as e:\n if i >= self.max_retry - 1:\n raise\n continue\n\n def _get_signed_tx(self, merkle_root: str, gas_price: int, gas_limit: int, try_count: int) -> AttributeDict:\n \"\"\"Prepare a raw transaction and sign it with the private key.\"\"\"\n nonce = self.web3.eth.getTransactionCount(self.account_from)\n tx_info = {\n 'nonce': nonce,\n 'to': self.account_to,\n 'value': 0,\n 'gas': gas_limit,\n 'gasPrice': gas_price,\n 'data': merkle_root,\n }\n if try_count:\n tx_info['nonce'] = tx_info['nonce'] + try_count\n tx_info['gas'] = self._factor_in_new_try(tx_info['gas'], try_count)\n tx_info['gasPrice'] = self._factor_in_new_try(tx_info['gasPrice'], try_count)\n signed_tx = self.web3.eth.account.sign_transaction(tx_info, self.private_key)\n return signed_tx\n\n @staticmethod\n def _factor_in_new_try(number, try_count) -> int:\n \"\"\"Increase the given number with 10% with each try.\"\"\"\n factor = float(f\"1.{try_count}\")\n return int(number * factor)\n\n def _read_private_key(self) -> str:\n \"\"\"Read private key from file.\"\"\"\n with open(self.path_to_secret) as key_file:\n key = key_file.read().strip()\n return key\n\n def _ensure_balance(self, recommended_max_cost) -> None:\n \"\"\"Make sure that the Ethereum account's balance is enough to cover the tx costs.\"\"\"\n assert self.web3.eth.getBalance(self.account_from) >= recommended_max_cost\n\n @staticmethod\n def _get_node_url(chain: str) -> str:\n \"\"\"Returns the url to a node for the chosen chain. It is possible to provide multiple values in the envvar.\"\"\"\n if chain == 'mainnet':\n return _get_random_from_csv(os.environ.get('ETH_NODE_URL_MAINNET'))\n elif chain == 'ropsten':\n return _get_random_from_csv(os.environ.get('ETH_NODE_URL_ROPSTEN'))\n", "id": "4565741", "language": "Python", "matching_score": 4.726688385009766, "max_stars_count": 5, "path": "blockcerts/issuer/cert_issuer/simple.py" }, { "content": "import copy\nfrom datetime import datetime\nfrom typing import List\n\nfrom attrdict import AttrDict\nfrom cert_core import to_certificate_model\nfrom cert_verifier.verifier import verify_certificate\nfrom web3 import Web3\nfrom web3.exceptions import TransactionNotFound\n\nfrom blockcerts.const import HTML_DATE_FORMAT, PLACEHOLDER_RECIPIENT_NAME, PLACEHOLDER_RECIPIENT_EMAIL, \\\n PLACEHOLDER_ISSUING_DATE, PLACEHOLDER_ISSUER_LOGO, PLACEHOLDER_ISSUER_SIGNATURE_FILE, PLACEHOLDER_EXPIRATION_DATE, \\\n PLACEHOLDER_CERT_TITLE, PLACEHOLDER_CERT_DESCRIPTION, ETH_PRIVATE_KEY_PATH, ETH_PRIVATE_KEY_FILE_NAME, \\\n HTML_PLACEHOLDERS, RECIPIENT_NAME_KEY, RECIPIENT_EMAIL_KEY, RECIPIENT_ADDITIONAL_FIELDS_KEY, RECIPIENT_EXPIRES_KEY\nfrom blockcerts.issuer.cert_issuer.simple import SimplifiedCertificateBatchIssuer\nfrom blockcerts.tools.cert_tools.create_v2_certificate_template import create_certificate_template\nfrom blockcerts.tools.cert_tools.instantiate_v2_certificate_batch import create_unsigned_certificates_from_roster\nfrom flaskapp.config import get_config\nfrom flaskapp.errors import ValidationError\n\n\ndef write_private_key_file(private_key: str) -> None:\n \"\"\"Write the given ETH Private Key to the default key file.\"\"\"\n with open(f\"{ETH_PRIVATE_KEY_PATH}/{ETH_PRIVATE_KEY_FILE_NAME}\", mode='w') as private_key_file:\n private_key_file.write(private_key)\n\n\ndef get_display_html_for_recipient(recipient: AttrDict, template: AttrDict, issuer: AttrDict) -> str:\n \"\"\"Take the template's displayHtml and replace placeholders in it.\"\"\"\n expiration = recipient.get(RECIPIENT_ADDITIONAL_FIELDS_KEY, {}).get(RECIPIENT_EXPIRES_KEY) or template.get(\n 'expires_at') or 'None'\n result = copy.deepcopy(template.display_html)\n replacements = [\n (PLACEHOLDER_RECIPIENT_NAME, recipient.get(RECIPIENT_NAME_KEY)),\n (PLACEHOLDER_RECIPIENT_EMAIL, recipient.get(RECIPIENT_EMAIL_KEY)),\n (PLACEHOLDER_ISSUING_DATE, datetime.utcnow().strftime(HTML_DATE_FORMAT)),\n (PLACEHOLDER_ISSUER_LOGO, str(issuer.logo_file)),\n (PLACEHOLDER_ISSUER_SIGNATURE_FILE, issuer.signature_file),\n (PLACEHOLDER_EXPIRATION_DATE, expiration),\n (PLACEHOLDER_CERT_TITLE, template.title),\n (PLACEHOLDER_CERT_DESCRIPTION, template.description),\n ]\n for key in recipient.get(RECIPIENT_ADDITIONAL_FIELDS_KEY, {}).keys():\n if not key == RECIPIENT_EXPIRES_KEY:\n replacements.append(\n (f\"%{key.upper()}%\", recipient.get(RECIPIENT_ADDITIONAL_FIELDS_KEY, {}).get(key))\n )\n for key, value in replacements:\n if value:\n result = result.replace(key, value)\n return result\n\n\ndef issue_certificate_batch(issuer_data: AttrDict, template_data: AttrDict, recipients_data: List,\n job_data: AttrDict) -> List:\n \"\"\"Issue a batch of certificates and return them as a list.\"\"\"\n job_config = get_job_config(job_data)\n ensure_valid_issuer_data(issuer_data)\n ensure_valid_template_data(template_data)\n tools_config = get_tools_config(issuer_data, template_data, job_config)\n issuer_config = get_issuer_config(job_data, job_config)\n recipients = format_recipients(recipients_data, template_data, issuer_data)\n template = create_certificate_template(tools_config)\n unsigned_certs = create_unsigned_certificates_from_roster(\n template,\n recipients,\n False,\n tools_config.additional_per_recipient_fields,\n tools_config.hash_emails\n )\n simple_certificate_batch_issuer = SimplifiedCertificateBatchIssuer(issuer_config, unsigned_certs)\n tx_id, signed_certs = simple_certificate_batch_issuer.issue()\n return tx_id, signed_certs\n\n\ndef get_job_config(job_data: AttrDict) -> AttrDict:\n \"\"\"Returns the overall config modified by inputs in the job section\"\"\"\n config = get_config()\n config = AttrDict(dict((k.lower(), v) for k, v in config.items()))\n if job_data.get('eth_public_key') and job_data.get('eth_private_key') and job_data.get('eth_key_created_at'):\n config.eth_public_key = job_data.eth_public_key\n config.eth_private_key = job_data.eth_private_key\n config.eth_key_created_at = job_data.eth_key_created_at\n return AttrDict(config)\n\n\ndef get_tools_config(issuer: AttrDict, template: AttrDict, job_config: AttrDict) -> AttrDict:\n if template.get('expires_at'):\n template.additional_global_fields = template.additional_global_fields + (\n {\"path\": \"$.expires\", \"value\": template.get('expires_at')},\n )\n return AttrDict(\n no_files=True,\n issuer_logo_file=issuer.logo_file,\n cert_image_file=template.image,\n issuer_url=issuer.main_url,\n issuer_intro_url=issuer.intro_url,\n issuer_email=issuer.email,\n issuer_name=issuer.name,\n issuer_id=issuer.id,\n certificate_description=template.description,\n certificate_title=template.title,\n criteria_narrative=template.criteria_narrative,\n hash_emails=False,\n revocation_list_uri=issuer.revocation_list,\n issuer_public_key=job_config.get('eth_public_key'),\n badge_id=str(template.id),\n issuer_signature_lines=issuer.signature_lines,\n issuer_signature_file=issuer.signature_file,\n additional_global_fields=template.additional_global_fields,\n additional_per_recipient_fields=template.additional_per_recipient_fields,\n display_html=template.display_html,\n public_key_created_at=job_config.get('eth_key_created_at'),\n )\n\n\ndef get_issuer_config(job: AttrDict, job_config: AttrDict) -> AttrDict:\n eth_public_key = job_config.get('eth_public_key').split(':')[1] if \":\" in job_config.get(\n 'eth_public_key') else job_config.get('eth_public_key')\n return AttrDict(\n issuing_address=eth_public_key,\n chain=job.blockchain,\n usb_name=ETH_PRIVATE_KEY_PATH,\n key_file=ETH_PRIVATE_KEY_FILE_NAME,\n eth_private_key=job_config.get('eth_private_key'),\n unsigned_certificates_dir=\"\",\n blockchain_certificates_dir=\"\",\n work_dir=\"\",\n safe_mode=False,\n gas_price=job.gas_price,\n gas_limit=job.gas_limit,\n api_token=\"\",\n )\n\n\ndef ensure_valid_issuer_data(issuer: AttrDict) -> None:\n \"\"\"Validate the issuer object has all needed properties.\"\"\"\n if not issuer.logo_file:\n raise ValidationError('issuer needs a logo file before it is able to issue')\n\n\ndef ensure_valid_template_data(template: AttrDict) -> None:\n \"\"\"Validate the template object has all needed properties.\"\"\"\n if not template.image:\n raise ValidationError('template needs an image file before it can be used to issue')\n\n\ndef format_recipients(recipients_data: List, template_data: AttrDict, issuer_data: AttrDict) -> List:\n \"\"\"Replace placeholders with the right data the given template uses them in display_html.\"\"\"\n if any(word in template_data.display_html for word in HTML_PLACEHOLDERS):\n for recipient in recipients_data:\n recipient[RECIPIENT_ADDITIONAL_FIELDS_KEY]['displayHtml'] = get_display_html_for_recipient(\n recipient, template_data, issuer_data\n )\n return recipients_data\n\n\ndef get_tx_receipt(chain: str, tx_id: str) -> dict:\n \"\"\"\n Get a tx receipt given its hash.\n\n :param chain: One of mainnet or ropsten\n :param tx_id: Transaction hash.\n :return: dict with tx receipt.\n \"\"\"\n config = get_config()\n if chain.lower() == 'mainnet':\n provider = config.get('ETH_NODE_URL_MAINNET')\n elif chain.lower() == 'ropsten':\n provider = config.get('ETH_NODE_URL_ROPSTEN')\n else:\n raise ValidationError(f\"Unknown chain '{chain}'.\")\n\n if not provider:\n raise ValidationError(f\"Node url for chain '{chain}' not found in config.\")\n\n web3 = Web3(Web3.HTTPProvider(provider))\n assert web3.isConnected()\n\n try:\n receipt = web3.eth.getTransactionReceipt(tx_id)\n except TransactionNotFound:\n return None\n\n return _safe_hex_attribute_dict(receipt)\n\n\ndef _safe_hex_attribute_dict(hex_attrdict: AttrDict) -> dict:\n \"\"\"Convert any 'AttributeDict' type found to 'dict'.\"\"\"\n parsed_dict = dict(hex_attrdict)\n for key, val in parsed_dict.items():\n if 'dict' in str(type(val)).lower():\n parsed_dict[key] = _safe_hex_attribute_dict(val)\n elif 'HexBytes' in str(type(val)):\n parsed_dict[key] = val.hex()\n return parsed_dict\n\n\ndef verify_cert(cert_json):\n \"\"\"Run verification on the given cert, return a tuple with (overall_result, individual_results)\"\"\"\n config = get_config()\n certificate_model = to_certificate_model(certificate_json=cert_json)\n result = verify_certificate(\n certificate_model,\n dict(etherscan_api_token=config.get('ETHERSCAN_API_TOKEN', ''))\n )\n all_steps_passed = all(d.get('status') == 'passed' for d in result)\n return all_steps_passed, result\n", "id": "3799206", "language": "Python", "matching_score": 3.7611005306243896, "max_stars_count": 5, "path": "blockcerts/misc.py" }, { "content": "from attrdict import AttrDict\nfrom flask import jsonify, request\nfrom voluptuous import Schema, REMOVE_EXTRA\n\nfrom blockcerts.const import ISSUER_SCHEMA, TEMPLATE_SCHEMA, RECIPIENT_SCHEMA, JOB_SCHEMA\nfrom blockcerts.misc import issue_certificate_batch, get_tx_receipt, verify_cert\nfrom flaskapp.config import get_config\n\n\ndef setup_routes(app):\n @app.route('/ping')\n def ping_route():\n return jsonify({'reply': 'pong'})\n\n @app.route('/issue', methods=['POST'])\n def issue_certs():\n issuing_job_schema = Schema(\n {\n 'issuer': ISSUER_SCHEMA,\n 'template': TEMPLATE_SCHEMA,\n 'recipients': RECIPIENT_SCHEMA,\n 'job': JOB_SCHEMA,\n },\n required=True,\n extra=REMOVE_EXTRA,\n )\n payload = issuing_job_schema(request.get_json())\n tx_id, signed_certs = issue_certificate_batch(\n AttrDict(payload['issuer']),\n AttrDict(payload['template']),\n [AttrDict(rec) for rec in payload['recipients']],\n AttrDict(payload['job']),\n )\n return jsonify(dict(\n tx_id=tx_id,\n signed_certificates=list(signed_certs.values())\n ))\n\n @app.route('/config', methods=['GET'])\n def public_config():\n config = get_config()\n return jsonify(\n dict(\n ETH_PUBLIC_KEY=config.get('ETH_PUBLIC_KEY'),\n ETH_KEY_CREATED_AT=config.get('ETH_KEY_CREATED_AT'),\n )\n )\n\n @app.route('/tx/<chain>/<tx_id>', methods=['GET'])\n def tx_receipt(chain, tx_id):\n receipt = get_tx_receipt(chain, tx_id)\n if receipt:\n return jsonify(dict(receipt)), 200\n return f\"Tx '{tx_id}' not found in chain '{chain}'.\", 404\n\n @app.route('/verify', methods=['POST'])\n def verify():\n payload = request.get_json()\n results = verify_cert(payload)\n return jsonify(dict(\n verified=results[0],\n steps=results[1]\n ))\n", "id": "1348569", "language": "Python", "matching_score": 2.830878973007202, "max_stars_count": 5, "path": "flaskapp/routes.py" }, { "content": "from unittest import mock\n\nimport pytest\nfrom flask import url_for\n\nfrom blockcerts.const import RECIPIENT_NAME_KEY, RECIPIENT_EMAIL_KEY\nfrom blockcerts.misc import issue_certificate_batch, format_recipients, get_tx_receipt\n\n\ndef test_issuing(app, issuer, template, three_recipients, job):\n tx_id, issued_certs = issue_certificate_batch(issuer, template, three_recipients, job)\n assert isinstance(issued_certs, dict)\n assert len(issued_certs) == 3\n\n\ndef test_issuing_custom_keypair(app, issuer, template, three_recipients, job_custom_keypair_1):\n tx_id, issued_certs = issue_certificate_batch(issuer, template, three_recipients, job_custom_keypair_1)\n assert isinstance(issued_certs, dict)\n assert len(issued_certs) == 3\n\n\ndef test_verify(app, json_client, issued_cert):\n response = json_client.post(\n url_for('verify', _external=True),\n data=issued_cert\n )\n results = response.json\n assert isinstance(results, dict)\n assert isinstance(results['steps'], list)\n assert results['verified'] is True\n\n\ndef test_verify_tampered(app, json_client, issued_cert):\n issued_cert['recipientProfile']['name'] = 'Some Malicious Player Name'\n response = json_client.post(\n url_for('verify', _external=True),\n data=issued_cert\n )\n results = response.json\n assert isinstance(results, dict)\n assert isinstance(results['steps'], list)\n assert results['verified'] is False\n\n\ndef test_issuing_endpoint(app, issuer, template, three_recipients, job_custom_keypair_2, json_client):\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_2\n )\n )\n assert isinstance(response.json, dict)\n signed_certificates = response.json['signed_certificates']\n assert isinstance(signed_certificates, list)\n assert len(signed_certificates) == 3\n assert 'expires' not in signed_certificates[0].keys()\n assert 'expires' not in signed_certificates[1].keys()\n assert 'expires' not in signed_certificates[2].keys()\n\n\ndef test_issuing_endpoint_empty_recipients(app, issuer, template, job, json_client):\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=[],\n job=job\n )\n )\n assert response.status_code == 400\n assert response.json['details'] == \"length of value must be at least 1 for dictionary value @ data['recipients']\"\n\n\[email protected](\"missing_key\",\n [\"name\", \"main_url\", \"id\", \"email\", \"logo_file\", \"revocation_list\", \"intro_url\",\n \"signature_lines\", \"signature_file\"])\ndef test_issuing_endpoint_issuer_missing_field(app, issuer, template, three_recipients, job_custom_keypair_1,\n json_client, missing_key):\n issuer.pop(missing_key)\n assert missing_key not in issuer.keys()\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_1\n )\n )\n assert response.status_code == 400\n assert response.json['details'] == f\"required key not provided @ data['issuer']['{missing_key}']\"\n\n\[email protected](\"missing_key\",\n [\"id\", \"title\", \"description\", \"criteria_narrative\", \"image\", \"additional_global_fields\",\n \"additional_per_recipient_fields\", \"display_html\"])\ndef test_issuing_endpoint_template_missing_field(app, issuer, template, three_recipients, job_custom_keypair_2,\n json_client,\n missing_key):\n template.pop(missing_key)\n assert missing_key not in template.keys()\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_2\n )\n )\n assert response.status_code == 400\n assert response.json['details'] == f\"required key not provided @ data['template']['{missing_key}']\"\n\n\ndef test_issuing_endpoint_job_missing_field(app, issuer, template, three_recipients, job, json_client):\n job.pop('blockchain')\n assert 'blockchain' not in job.keys()\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job\n )\n )\n assert response.status_code == 400\n assert response.json['details'] == f\"required key not provided @ data['job']['blockchain']\"\n\n\[email protected](\"missing_key\", [\"name\", \"identity\", \"pubkey\", \"additional_fields\"])\ndef test_issuing_endpoint_recipient_missing_field(app, issuer, template, three_recipients, job_custom_keypair_1,\n json_client,\n missing_key):\n three_recipients[0].pop(missing_key)\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_1\n )\n )\n assert response.status_code == 400\n assert response.json['details'] == f\"required key not provided @ data['recipients'][0]['{missing_key}']\"\n\n\ndef test_issuing_endpoint_with_expiration(app, issuer, template, three_recipients, job_custom_keypair_2, json_client):\n template.expires_at = \"2028-02-07T23:52:16.636+00:00\"\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_2\n )\n )\n\n assert isinstance(response.json, dict)\n signed_certificates = response.json['signed_certificates']\n assert isinstance(signed_certificates, list)\n assert len(signed_certificates) == 3\n assert signed_certificates[0]['expires']\n assert signed_certificates[1]['expires']\n assert signed_certificates[2]['expires']\n\n\ndef test_recipient_specific_html_creation(app, issuer, template, three_recipients, json_client):\n template.expires_at = \"2028-02-07T23:52:16.636+00:00\"\n template.display_html = '\"%RECIPIENT_NAME%\" - \"%RECIPIENT_EMAIL%\" - \"%ISSUING_DATE%\" \"%ISSUER_LOGO%\" ' \\\n '\"%ISSUER_SIGNATURE_FILE%\" \"%EXPIRATION_DATE%\" \"%CERT_TITLE%\" \"%CERT_DESCRIPTION%\"'\n for recipient in three_recipients:\n assert not recipient['additional_fields']['displayHtml']\n recipients = format_recipients(three_recipients, template, issuer)\n for recipient in recipients:\n assert recipient['additional_fields']['displayHtml'].startswith(f'\"{recipient.get(RECIPIENT_NAME_KEY)}\" - '\n f'\"{recipient.get(RECIPIENT_EMAIL_KEY)}\" - \"')\n assert recipient['additional_fields']['displayHtml'].endswith(f'\"{template.expires_at}\" \"{template.title}\" '\n f'\"{template.description}\"')\n\n\ndef test_recipient_specific_html_creation_additional_fields(app, issuer, template, three_recipients, json_client):\n template.additional_per_recipient_fields = (\n template.additional_per_recipient_fields[0],\n {\"path\": \"$.expires\", \"value\": \"\", \"csv_column\": \"expires\"},\n {\"path\": \"$.some.custom.path\", \"value\": \"\", \"csv_column\": \"custom_field_1\"},\n {\"path\": \"$.some.other.custom.path\", \"value\": \"\", \"csv_column\": \"custom_field_2\"}\n )\n three_recipients[0].additional_fields = {\n 'expires': '2018-01-07T23:52:16.636+00:00', 'custom_field_1': 'foo1', 'custom_field_2': 'bar1'\n }\n three_recipients[1].additional_fields = {\n 'expires': '2028-02-07T23:52:16.636+00:00', 'custom_field_1': 'foo2', 'custom_field_2': 'bar2'\n }\n three_recipients[2].additional_fields = {\n 'expires': '2038-03-07T23:52:16.636+00:00', 'custom_field_1': 'foo3', 'custom_field_2': 'bar3'\n }\n template.expires_at = \"3028-02-07T23:52:16.636+00:00\" # This should not matter because of the per-recipient ones\n\n template.display_html = '\"%RECIPIENT_NAME%\" - ' \\\n '\"%EXPIRATION_DATE%\" - ' \\\n '\"%SOME_WRONG_PLACEHOLDER%\" - ' \\\n '\"%CUSTOM_FIELD_1%\" - ' \\\n '\"%CUSTOM_FIELD_2%\"'\n\n for recipient in three_recipients:\n assert not recipient['additional_fields'].get('displayHtml')\n\n recipients = format_recipients(three_recipients, template, issuer)\n\n for recipient in recipients:\n assert recipient['additional_fields']['displayHtml'] == f'\"{recipient.get(RECIPIENT_NAME_KEY)}\" - ' \\\n f'\"{recipient[\"additional_fields\"][\"expires\"]}\" - ' \\\n f'\"%SOME_WRONG_PLACEHOLDER%\" - ' \\\n f'\"{recipient[\"additional_fields\"][\"custom_field_1\"]}\" - ' \\\n f'\"{recipient[\"additional_fields\"][\"custom_field_2\"]}\"'\n\n\ndef test_issuing_endpoint_custom_keys(app, issuer, template, three_recipients, json_client, job, job_custom_keypair_1,\n job_custom_keypair_2):\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job\n )\n )\n\n assert isinstance(response.json, dict)\n signed_certificates = response.json['signed_certificates']\n assert isinstance(signed_certificates, list)\n assert len(signed_certificates) == 3\n\n response_1 = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_1\n )\n )\n\n assert isinstance(response_1.json, dict)\n signed_certificates_1 = response_1.json['signed_certificates']\n assert isinstance(signed_certificates_1, list)\n assert len(signed_certificates_1) == 3\n\n response_2 = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_2\n )\n )\n\n assert isinstance(response_2.json, dict)\n signed_certificates_2 = response_2.json['signed_certificates']\n assert isinstance(signed_certificates_2, list)\n assert len(signed_certificates_2) == 3\n\n assert signed_certificates[0]['verification']['publicKey'] != signed_certificates_1[0]['verification'][\n 'publicKey'] != signed_certificates_2[0]['verification']['publicKey']\n\n\ndef test_tx_receipt(app):\n tx_receipt = get_tx_receipt('ropsten', '0x36d7c25a79b3a32f0bfa59547f837f62ced399a8a700a6f00147ddd5339b2505')\n assert tx_receipt\n assert isinstance(tx_receipt, dict)\n\n\ndef test_tx_receipt_endpoint(app, json_client):\n response = json_client.get('/tx/ropsten/0x36d7c25a79b3a32f0bfa59547f837f62ced399a8a700a6f00147ddd5339b2505')\n assert response\n assert response.status_code == 200\n assert response.json['blockHash'] == '0x09f1b0e57f5e6a84280084d39da157cf806b28d090e78159d5e24041d8d93fe2'\n\n\[email protected]('flaskapp.routes.get_tx_receipt', return_value=None)\ndef test_tx_receipt_endpoint_missing_tx(app, json_client):\n response = json_client.get('/tx/ropsten/0x36d7c25a79b3a32f0bfa59547f837f62ced399a8a700a6f00147ddd5339b2505')\n assert not response.json\n assert response.status_code == 404\n\n\ndef test_issuing_endpoint_with_per_recipient_expiration(app, issuer, template, three_recipients, job_custom_keypair_1,\n json_client):\n three_recipients[0].additional_fields = {'expires': '2018-01-07T23:52:16.636+00:00'}\n three_recipients[1].additional_fields = {'expires': '2028-02-07T23:52:16.636+00:00'}\n three_recipients[2].additional_fields = {'expires': '2038-03-07T23:52:16.636+00:00'}\n template.additional_per_recipient_fields = (\n template.additional_per_recipient_fields[0],\n {\"path\": \"$.expires\", \"value\": \"\", \"csv_column\": \"expires\"}\n )\n\n response = json_client.post(\n url_for('issue_certs', _external=True),\n data=dict(\n issuer=issuer,\n template=template,\n recipients=three_recipients,\n job=job_custom_keypair_1\n )\n )\n\n assert isinstance(response.json, dict)\n signed_certificates = response.json['signed_certificates']\n assert isinstance(signed_certificates, list)\n assert len(signed_certificates) == 3\n\n assert signed_certificates[0]['expires'] == three_recipients[0].additional_fields['expires']\n assert signed_certificates[1]['expires'] == three_recipients[1].additional_fields['expires']\n assert signed_certificates[2]['expires'] == three_recipients[2].additional_fields['expires']\n", "id": "5322553", "language": "Python", "matching_score": 2.9176628589630127, "max_stars_count": 5, "path": "tests/test_issuing.py" }, { "content": "from flask import url_for\n\n\ndef test_issuing_endpoint(app, json_client):\n response = json_client.get(\n url_for('public_config', _external=True),\n )\n assert 'ETH_KEY_CREATED_AT' in response.json.keys()\n assert 'ETH_PUBLIC_KEY' in response.json.keys()\n", "id": "941058", "language": "Python", "matching_score": 0.9004089832305908, "max_stars_count": 5, "path": "tests/test_system.py" }, { "content": "#!/usr/bin/env python\n'''\nGenerates the issuer file (.json) thar represents the issues which is needed for issuing and validating certificates.\n\nCurrently, just not check for inputs' validity (e.g. valid address, URLs, etc.)\n'''\nimport json\nimport os\nimport sys\n\nimport configargparse\nfrom cert_schema import *\n\nfrom cert_tools import helpers\n\nISSUER_TYPE = 'Profile'\n\nOPEN_BADGES_V2_CONTEXT_JSON = OPEN_BADGES_V2_CANONICAL_CONTEXT\nBLOCKCERTS_V2_CONTEXT_JSON = BLOCKCERTS_V2_CANONICAL_CONTEXT\n\n\ndef generate_issuer_file(config, issuer):\n issuer_json = generate_issuer(config)\n\n output_handle = open(config.output_file, 'w') if (\n hasattr(config, 'output_file') and config.output_file) else sys.stdout\n\n output_handle.write(json.dumps(issuer_json, indent=2))\n\n if output_handle is not sys.stdout:\n output_handle.close()\n\n\ndef generate_issuer(config):\n if hasattr(config, 'public_key_created') and config.public_key_created:\n issued_on = config.public_key_created\n else:\n issued_on = helpers.create_iso8601_tz()\n context = [OPEN_BADGES_V2_CONTEXT_JSON, BLOCKCERTS_V2_CONTEXT_JSON]\n issuer = {\n '@context': context,\n 'id': config.issuer_id,\n 'url': config.issuer_url,\n 'name': config.issuer_name,\n 'email': config.issuer_email,\n 'image': helpers.get_b64encoded_image(config, 'issuer_logo_file'),\n 'publicKey': [{'id': config.issuer_public_key, \"created\": issued_on}],\n 'revocationList': config.revocation_list_uri,\n 'type': ISSUER_TYPE\n }\n if config.intro_url:\n issuer['introductionUrl'] = config.intro_url\n return issuer\n\n\ndef get_config():\n cwd = os.getcwd()\n p = configargparse.getArgumentParser(default_config_files=[os.path.join(cwd, 'conf.ini')])\n p.add('-c', '--my-config', required=True, is_config_file=True, help='config file path')\n p.add_argument('--no_files', action='store_true', help='avoid using files as intermediate or final results')\n p.add_argument('--data_dir', type=str, help='where data files are located')\n p.add_argument('-k', '--issuer_public_key', type=str, required=True,\n help='The key(s) an issuer uses to sign Assertions. See https://openbadgespec.org/#Profile for more details')\n p.add_argument('-k', '--public_key_created', type=str,\n help='ISO8601-formatted date the issuer public key should be considered active')\n p.add_argument('-r', '--revocation_list_uri', type=str, required=True,\n help='URI of the Revocation List used for marking revocation. See https://openbadgespec.org/#Profile for more details')\n p.add_argument('-d', '--issuer_id', type=str, required=True,\n help='the issuer\\'s publicly accessible identification file; i.e. URL of the file generated by this tool')\n p.add_argument('-u', '--issuer_url', type=str, help='the issuer\\'s main URL address')\n p.add_argument('-n', '--issuer_name', type=str, help='the issuer\\'s name')\n p.add_argument('-e', '--issuer_email', type=str, help='the issuer\\'s email')\n p.add_argument('-m', '--issuer_logo_file', type=str, help='the issuer\\' logo image')\n p.add_argument('-i', '--intro_url', required=False, type=str, help='the issuer\\'s introduction URL address')\n p.add_argument('-o', '--output_file', type=str, help='the output file to save the issuer\\'s identification file')\n args, _ = p.parse_known_args()\n args.abs_data_dir = os.path.abspath(os.path.join(cwd, args.data_dir))\n\n return args\n\n\ndef main():\n conf = get_config()\n issuer = generate_issuer(conf)\n generate_issuer_file(conf, issuer)\n\n\nif __name__ == \"__main__\":\n main()\n", "id": "11483874", "language": "Python", "matching_score": 1.8539899587631226, "max_stars_count": 5, "path": "blockcerts/tools/cert_tools/create_v2_issuer.py" }, { "content": "from cert_issuer import *\nfrom cert_tools import *\n", "id": "1268934", "language": "Python", "matching_score": 0, "max_stars_count": 5, "path": "blockcerts/tools/__init__.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom subprocess import PIPE, Popen\nfrom typing import Dict, List, NewType, Tuple, Union\n\nResult = NewType(\"Result\", dict)\n\nPINNED_VERSION = [2, 0, 0]\nPINNED_CURVE = \"sr25519\"\n\n\nclass SubstrateKeyManager:\n def __init__(self):\n self.check_args = [\"subkey\", \"--version\"]\n self.sk_generate_args = [\"subkey\", \"generate\"]\n self.sk_sign_args = []\n self.sk_verify_args = []\n self.verify_messagee = b\"Signature verifies correctly.\\n\"\n assert self._check_subkey()\n\n @staticmethod\n def _shproc(cmd_args: List) -> Tuple:\n ''' generic subprocess handler '''\n out, err = b\"\", b\"\"\n try:\n proc = Popen(cmd_args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n except FileNotFoundError as exc:\n err = \"Target {} not found: {}. Make sure it is spelled correclty and installed.\"\n err.format(cmd_args[0], exc.args).encode(\"utf8\")\n except Exception as exc:\n # TODO: raise straight up ?\n err = \"Target {} not found: {}. Make sure it is spelled correclty and installed.\"\n err.format(cmd_args[0], exc.args).encode(\"utf8\")\n\n return (out, err)\n\n def _verify_version(self, reference_version: List, version_bstring: bytes) -> bool:\n ''' subkey version checker '''\n _v = version_bstring.decode().strip(\"\\n\").split(\" \")[1].split(\".\")\n _v[:] = [int(i) for i in _v]\n return _v == reference_version\n\n def _check_subkey(self, ) -> Union[bool, Exception]:\n out, err = self._shproc(self.check_args)\n if not out:\n raise Exception(err)\n\n if not self._verify_version(PINNED_VERSION, out):\n msg = \"Invalid subkey version {} but expect {}\".format(out, PINNED_VERSION)\n raise Exception(msg)\n\n return True\n\n def sk_generate(self, chain: str = \"substrate\") -> Tuple:\n ''' generate a random substrate account for the specified chain '''\n # assert self._check_subkey()\n cmd_args = self.sk_generate_args\n cmd_args.insert(1, chain)\n cmd_args.insert(1, \"--network\")\n\n out, err = self._shproc(cmd_args)\n if err:\n return {}, err\n\n res = {}\n for s in out.decode().strip(\" \").split(\"\\n\"):\n if s:\n if s.startswith(\"Secret phrase\"):\n _ = s.split(\"Secret phrase \")\n res[\"Secret phrase\"] = _[1].strip(\"`\").rstrip(\" is account :``\")\n else:\n _ = s.split(\":\")\n res[_[0].strip(\" \")] = _[1].strip(\" \")\n return (res, err)\n\n def sk_sign(self, payload: str, seed: str) -> Tuple:\n ''' seed can be private key or mnemonic '''\n\n if not payload:\n return b\"\", b\"Need payloadto sign message\"\n\n if not seed:\n return b\"\", b\"Need seed or mnemonic to sign message\"\n\n proc = Popen([\"echo\", payload], stdout=PIPE)\n proc.wait()\n proc = Popen(['subkey', \"sign\", seed], stdin=proc.stdout, stdout=PIPE, stderr=PIPE)\n proc.wait()\n out, err = proc.communicate()\n if not err:\n out = out.decode().strip(\"\\n\")\n out.encode()\n return out, err\n\n def sk_verify(self, payload: str, signature: str, seed: str) -> Tuple:\n ''' seed can be private key or mnemonic '''\n if seed is None:\n return False, b\"Need seed or mnemonic to verify message\"\n\n proc = Popen([\"echo\", payload], stdout=PIPE)\n proc.wait()\n proc = Popen(['subkey', \"verify\", signature, seed], stdin=proc.stdout, stdout=PIPE, stderr=PIPE)\n proc.wait()\n out, err = proc.communicate()\n if out != self.verify_messagee:\n out = False\n else:\n out = True\n return out, err\n\n\n# tests\ndef test_sh_proc():\n out, err = SubstrateKeyManager._shproc([\"ls\"])\n assert not err\n\n out, err = SubstrateKeyManager._shproc([\"abcdec\"])\n assert not out\n assert err.startswith(\"Target\")\n\n\ndef test_generate():\n pass\n\n\ndef test_sign():\n skm = SubstrateKeyManager()\n g_out, g_err = skm.sk_generate()\n assert not g_err\n\n payload = \"hello, py test\"\n seed = g_out['Secret seed']\n mnemonic = g_out['Secret phrase']\n\n out, err = skm.sk_sign(\"\", seed)\n assert not out\n assert err.decode().startswith(\"Need payload\")\n\n out, err = skm.sk_sign(payload, \"\")\n assert not out\n assert err.decode().startswith(\"Need seed\")\n\n out, err = skm.sk_sign(payload, seed)\n assert not err\n assert len(out) == 128\n\n out, err = skm.sk_sign(payload, mnemonic)\n assert not err\n assert len(out) == 128\n\n\ndef test_verify():\n skm = SubstrateKeyManager()\n g_out, g_err = skm.sk_generate()\n assert not g_err\n\n payload = \"hello, py test\"\n seed = g_out['Secret seed']\n mnemonic = g_out['Secret phrase']\n\n out, err = skm.sk_sign(payload, seed)\n out, err = skm.sk_verify(payload, out, seed)\n assert not err\n assert out\n\n out, err = skm.sk_sign(payload, mnemonic)\n out, err = skm.sk_verify(payload, out, mnemonic)\n assert not err\n assert out\n", "id": "6027093", "language": "Python", "matching_score": 1.5525280237197876, "max_stars_count": 0, "path": "sk_wrapper.py" }, { "content": "#!/usr/bin/env python3\n\n# This script is run on a target machine. It expects to be in the \"./run\"\n# directory. It uses the config from run_config.yml to run a vasaplaten node.\n\nimport yaml\nimport pathlib\nimport subprocess\nimport threading\nimport time\nimport tempfile\n\n\nclass Validate:\n def __init__(self, parsed_toml):\n \"\"\"\n Take a dict parsed from toml and ensure it contains the correct fields\n with the correct types using own class members as a template\n results in a validated instance of Class with fields populated.\n \"\"\"\n\n fields = [f for f in dir(self) if not f.startswith(\n \"__\") and not f.endswith(\"__\")]\n\n if rawconf is None:\n raise \"Config is empty!\"\n for key in rawconf.keys():\n if key not in fields:\n raise Exception(\"Unexpected key, \\\"{}\\\"\".format(key))\n for field in fields:\n if field not in rawconf.keys():\n raise Exception(\"\\\"{}\\\" not in config\".format(field))\n\n for field in fields:\n validate_field = getattr(self.__class__, field)\n val = parsed_toml[field]\n validate_field(val)\n setattr(self, field, val)\n\n\nclass Config(Validate):\n \"\"\"\n Each element in this class is a named validator function. Validator\n functions are named callables that trow adescriptive exception on\n validation error.\n \"\"\"\n\n # List of bootstrap nodes to connect to\n def bootstrap(obj):\n if (\n type(obj) is not list or\n any(type(e) is not str for e in obj)\n ):\n raise Exception(\"bootstrap must be a list of strings\")\n\n # Private key used in p2p, edsa hex or None\n def p2p_secret_key(obj):\n allowed = \"<KEY>\"\n if obj is None:\n return\n if (\n type(obj) is not str or\n len(obj) != 64 or\n any(c not in allowed for c in obj)\n ):\n raise Exception(\"p2p_secret_key string must be a 64 character hex \"\n \"string or null\")\n\n # name, Chain to run (dev/local/ved)\n def chain(obj):\n options = [\"dev\", \"local\", \"ved\"]\n if obj not in options:\n raise Exception(\"chain must be one of \" + str(options))\n\n # Private Aura key, recovery phrase or None\n def aura_secret_key(obj):\n valid = type(obj) is str or obj is None\n if not valid:\n raise Exception(\"aura_secret_key must be either as string or null\")\n\n # Private Grandpa key, recovery phrase or None\n def grandpa_secret_key(obj):\n valid = type(obj) is str or obj is None\n if not valid:\n raise Exception(\"grandpa_secret_key must be either as string or \"\n \"null\")\n\n # Where to store chain state and secret keys. null indicates a temporary\n # directory should be used.\n def chain_storage_base_dir(obj):\n if obj is not None and type(obj) is not str:\n raise Exception(\"chain_storage_base_dir must be a path or null\")\n\n # port on which to listen for rpc over http\n def http_rpc_port(obj):\n if (\n type(obj) is not int or\n obj <= 0 or\n obj > 65535\n ):\n raise Exception(\"http_rpc_port must be an integer such that 0 < \"\n \"port <= 65535\")\n\n\ndef insert_sk(suri, keycode, typeflag, http_rpc_port):\n \"\"\"\n Add a secret keyphrase to the node keystore.\n \"\"\"\n subkey_exe = (pathlib.Path(__file__).parent / \"subkey\").resolve(True)\n PIPE = subprocess.PIPE\n start = time.time()\n timeout = 10\n command = [\n subkey_exe,\n typeflag,\n \"insert\",\n suri,\n keycode,\n f\"http://localhost:{http_rpc_port}\"\n ]\n\n assert typeflag in [\"--secp256k1\", \"--ed25519\", \"--sr25519\"]\n\n print(\"setting \" + keycode + \" key with command\", command)\n\n while time.time() < timeout + start:\n p = subprocess.run(command, stdout=PIPE, stderr=PIPE)\n if p.stderr != b\"\":\n raise Exception(p.stderr)\n if b\"ConnectionRefused\" not in p.stdout and not p.stdout == b\"\":\n raise Exception(\"unexpected output from subkey\\n\" + str(p.stdout))\n if p.stdout == b\"\":\n print(\"added key to keystore\")\n return\n raise Exception(\"timeout while trying to add \" +\n keycode + \" key to keystore\")\n\n\ndef vasaplatsen(config: Config):\n with tempfile.TemporaryDirectory() as tmp:\n vasaplatsen_exe = (pathlib.Path(__file__).parent /\n \"vasaplatsen\").resolve(True)\n base_storage_path = (\n tmp if config.chain_storage_base_dir is None\n else config.chain_storage_base_dir\n )\n command = [vasaplatsen_exe, \"--chain\", config.chain]\n command += [\"--base-path\", base_storage_path]\n command += [\"--rpc-port\", str(config.http_rpc_port)]\n for nodeaddr in config.bootstrap:\n command += [\"--bootnodes\", nodeaddr]\n if config.p2p_secret_key is not None:\n command += [\"--node-key\", config.p2p_secret_key]\n if (\n config.aura_secret_key is not None or\n config.grandpa_secret_key is not None\n ):\n command += [\"--validator\"]\n\n print(\"starting vasaplatsen with command\", command)\n child = threading.Thread(target=lambda: (subprocess.run(command)))\n child.start()\n if config.aura_secret_key is not None:\n insert_sk(config.aura_secret_key, \"aura\",\n \"--sr25519\", config.http_rpc_port)\n if config.grandpa_secret_key is not None:\n insert_sk(config.grandpa_secret_key, \"gran\",\n \"--ed25519\", config.http_rpc_port)\n child.join()\n\n\nif __name__ == \"__main__\":\n rundir = pathlib.Path(__file__).parent\n rawconf = yaml.safe_load(open(rundir / \"run_config.yml\"))\n config = Config(rawconf)\n vasaplatsen(config)\n", "id": "11096956", "language": "Python", "matching_score": 2.419322967529297, "max_stars_count": 0, "path": "run/run.py" }, { "content": "#!/usr/bin/env python3\n\nimport logging\nimport uuid\nfrom typing import List\n\nimport boto3\nimport click\nimport paramiko as paramiko\nimport yaml\nfrom botocore.exceptions import ClientError\n\nCOMMAND_DOWNLOAD = 'wget https://raw.githubusercontent.com/docknetwork/substrate-scaffold/master/install/download_run_dir.bash'\nCOMMAND_START = 'nohup bash download_run_dir.bash master run >/dev/null 2>&1'\nCOMMAND_KILL = \"pkill vasaplatsen\"\nCOMMAND_CLEAN = \"rm -rf download_run_dir.bash* substrate-scaffold vasaplatsen\"\n\n\[email protected]()\ndef main() -> None:\n \"\"\"Infrastructure tools for Docknetwork node owners\"\"\"\n logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(asctime)s: %(message)s')\n pass\n\n\ndef execute_commands_on_linux_instances(config: dict, commands: List[str], instance_ips: List[str]):\n \"\"\"SSh to and run commands on remote linux instances\n\n :param config: dict with config\n :param commands: a list of strings, each one a command to execute on the instances\n :param instance_ids: a list of instance_id strings, of the instances on which to execute the command\n :return: the response from the send_command function (check the boto3 docs for ssm client.send_command() )\n \"\"\"\n for instance_ip in instance_ips:\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n logging.info(f\"Connecting to {instance_ip}...\")\n client.connect(\n hostname=instance_ip,\n username=config['SSH_USER'],\n password=config['SSH_PASS'],\n look_for_keys=False\n )\n except Exception as e:\n logging.error(\n f\"Could not connect to {instance_ip}. Please make sure that port 22 is open in the instance and \"\n f\"the ssh credentials in config.yml are correct before retrying.\"\n )\n continue\n\n for command in commands:\n logging.info(f\"Running {command} on {instance_ip}...\")\n try:\n client.exec_command(command)\n except Exception as e:\n logging.error(f\"Could not run '{command}' on '{instance_ip}'.\")\n logging.error(e)\n continue\n client.close()\n\n\ndef create_ec2_instances(ec2_client, image_id: str, instance_type: str, keypair_name: str, max_count: int = 1):\n \"\"\"Launch an EC2 instance. Wait until it's running before returning.\n\n :param ec2_client: boto3 client for EC2\n :param image_id: ID of AMI to launch, such as 'ami-XXXX'\n :param instance_type: string, such as 't2.micro'\n :param keypair_name: string, name of the key pair\n :return Dictionary containing information about the instance. If error,\n \"\"\"\n\n logging.info(f\"Creating {max_count} EC2 instance(s)...\")\n try:\n reservation = ec2_client.run_instances(\n ImageId=image_id,\n InstanceType=instance_type,\n KeyName=keypair_name,\n MinCount=1,\n MaxCount=max_count,\n TagSpecifications=[\n {\n 'ResourceType': 'instance', 'Tags':\n [\n {\"Key\": \"Name\", \"Value\": f\"Dockchain - {str(uuid.uuid4())}\"},\n {\"Key\": \"Purpose\", \"Value\": \"Dockchain Test\"}\n ]\n }\n ]\n )\n except ClientError as e:\n logging.error(e)\n raise\n\n for instance in reservation['Instances']:\n waiter = ec2_client.get_waiter('instance_running')\n waiter.wait(InstanceIds=[instance['InstanceId']])\n logging.info(f\"Successfully created EC2 instance with id '{instance['InstanceId']}'.\")\n\n return instance\n\n\ndef get_client(config, type):\n \"\"\"Get a boto3 client of the given type.\"\"\"\n return boto3.client(\n type,\n region_name=config['REGION_NAME'],\n aws_access_key_id=config['ACCESS_KEY_ID'],\n aws_secret_access_key=config['SECRET_ACCESS_KEY'],\n )\n\n\ndef get_resource(config, type):\n \"\"\"Get a boto3 resource of the given type.\"\"\"\n return boto3.resource(\n type,\n region_name=config['REGION_NAME'],\n aws_access_key_id=config['ACCESS_KEY_ID'],\n aws_secret_access_key=config['SECRET_ACCESS_KEY'],\n )\n\n\ndef get_running_instances(config):\n \"\"\"Get EC2 instances running as Docknetwork nodes.\"\"\"\n logging.info('Getting running instances...')\n ec2_resource = get_resource(config, 'ec2')\n dock_running_instances = ec2_resource.instances.filter(\n Filters=[\n {'Name': 'instance-state-name', 'Values': ['running']},\n {'Name': 'tag:Purpose', 'Values': ['Dockchain Test']}\n ]\n )\n return dock_running_instances\n\n\ndef create_keypair(config, ec2_client, key_file_name) -> None:\n \"\"\"Try to create a keypair with the given name. Don't fail on duplication errors.\"\"\"\n try:\n response = ec2_client.create_key_pair(KeyName=config['KEY_PAIR_NAME'])\n with open(key_file_name, 'w') as keyfile:\n keyfile.write(response['KeyMaterial'])\n except ClientError as e:\n if not e.response['Error']['Code'] == 'InvalidKeyPair.Duplicate':\n raise\n\n\ndef load_config_file(path: str = \"config.yml\") -> dict:\n \"\"\"Load yml config file.\"\"\"\n config = None\n with open(path, 'r') as ymlfile:\n config = yaml.safe_load(ymlfile)\n return config\n\n\ndef print_formatted_instances(running_instances) -> None:\n \"\"\"Prints a table with Instance Number, Id and Public IP address for the given instances\"\"\"\n BASIC_FORMAT = \"{:^3} {:^20} {:^20} {:^20}\"\n headers = ['#', 'ID', 'Public IPv4', 'Launch Datetime']\n print(BASIC_FORMAT.format(*headers))\n print(BASIC_FORMAT.format(*[\"-\" * len(i) for i in headers]))\n for i, instance in enumerate(running_instances):\n print(BASIC_FORMAT.format(\n *[i + 1, instance.id, instance.public_ip_address, instance.launch_time.strftime(\"%Y/%m/%d %H:%M:%S\")])\n )\n\n\[email protected]()\[email protected]('count', default=1)\ndef start(count: int) -> None:\n \"\"\"Create EC2 instances and set them up as Docknetwork nodes\"\"\"\n config = load_config_file()\n key_file_name = f\"{config['KEY_PAIR_NAME']}.pem\"\n ec2_client = get_client(config, 'ec2')\n create_keypair(config, ec2_client, key_file_name)\n\n create_ec2_instances(\n ec2_client,\n config['AMI_IMAGE_ID'],\n config['INSTANCE_TYPE'],\n config['KEY_PAIR_NAME'],\n max_count=count\n )\n\n input(\n \"Please visit the AWS console and enable inbound tcp traffic from any source for ports 22 and 30333 on your \"\n \"newly created instance(s) before hitting Enter:\"\n )\n\n instance_ips = [i.public_ip_address for i in get_running_instances(config)]\n if not instance_ips:\n raise Exception('ERROR: No instances with public IPs found. Exiting.')\n try:\n execute_commands_on_linux_instances(\n config,\n [\n COMMAND_DOWNLOAD,\n COMMAND_START\n ],\n instance_ips\n )\n except Exception as e:\n logging.error(\"Something went wrong.\")\n raise\n\n logging.info(f\"Successfully launched Docknetwork node(s) at: {instance_ips}\")\n logging.info('Done!')\n\n\[email protected]()\ndef list() -> None:\n \"\"\"List my EC2 instances running as Docknetwork nodes\"\"\"\n config = load_config_file()\n running_instances = get_running_instances(config)\n print_formatted_instances(running_instances)\n logging.info('Done!')\n\n\[email protected]()\ndef restart() -> None:\n \"\"\"Restart the Docknetwork process inside the running instances\"\"\"\n config = load_config_file()\n instance_ips = [i.public_ip_address for i in get_running_instances(config)]\n if not instance_ips:\n raise Exception('ERROR: No instances with public IPs found. Exiting.')\n try:\n execute_commands_on_linux_instances(\n config,\n [\n COMMAND_KILL,\n COMMAND_CLEAN,\n COMMAND_DOWNLOAD,\n COMMAND_START\n ],\n instance_ips\n )\n except Exception as e:\n logging.error(\"Something went wrong.\")\n raise\n logging.info('Done!')\n\n\[email protected]()\ndef stop() -> None:\n \"\"\"Stop the Docknetwork process & leave the EC2 instances running\"\"\"\n config = load_config_file()\n instance_ips = [i.public_ip_address for i in get_running_instances(config)]\n if not instance_ips:\n raise Exception('ERROR: No instances with public IPs found. Exiting.')\n try:\n execute_commands_on_linux_instances(\n config,\n [\n COMMAND_KILL\n ],\n instance_ips\n )\n except Exception as e:\n logging.error(\"Something went wrong.\")\n raise\n logging.info('Done!')\n\n\[email protected]()\ndef terminate() -> None:\n \"\"\"Terminate the EC2 instances created to run Docknetwork nodes\"\"\"\n config = load_config_file()\n instance_ids = [i.id for i in get_running_instances(config)]\n if not instance_ids:\n raise Exception('ERROR: No running EC2 instances found. Exiting.')\n ec2 = get_resource(config=config, type='ec2')\n for instance_id in instance_ids:\n try:\n logging.info(f\"Terminating {instance_id}...\")\n instance = ec2.Instance(instance_id)\n instance.terminate()\n except Exception as e:\n logging.error(\"Something went wrong.\")\n raise\n logging.info('Done!')\n\n\nif __name__ == '__main__':\n main()\n", "id": "5379434", "language": "Python", "matching_score": 1.1780272722244263, "max_stars_count": 0, "path": "infra/dockinfra.py" }, { "content": "from setuptools import setup\n\nsetup(\n name='dockinfra',\n version='0.1',\n py_modules=['dockinfra'],\n install_requires=[\n 'awscli',\n 'boto3',\n 'paramiko',\n 'Click',\n ],\n entry_points='''\n [console_scripts]\n dockinfra=dockinfra:main\n ''',\n)\n", "id": "8907784", "language": "Python", "matching_score": 0.6031321883201599, "max_stars_count": 0, "path": "infra/setup.py" } ]
1.85399
Poojayadav25
[ { "content": "import pandas as pd\r\nmessages=pd.read_csv('smsspamcollection\\SMSSpamCollection', sep='\\t', names=['label','message'])\r\n\r\nimport re\r\nimport nltk\r\nnltk.download('stopwords')\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\nps = PorterStemmer()\r\n\r\ncorpus=[]\r\nfor i in range(0, len(messages)):\r\n review=re.sub('[^a-zA-Z]',' ',messages['message'][i])\r\n review=review.lower()\r\n review=review.split()\r\n \r\n review=[ps.stem(word) for word in review if not word in stopwords.words('english')]\r\n review=' '.join(review)\r\n corpus.append(review)\r\n \r\n \r\n## creating bag of words\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\ncv=CountVectorizer(max_features=5000)\r\nx=cv.fit_transform(corpus).toarray()\r\n\r\ny=pd.get_dummies(messages['label'])\r\ny=y.iloc[:,1].values\r\n\r\n#train test splitfrom sklearn.metrics import accuracy_score\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train,x_test,y_train,y_test=train_test_split(x ,y,test_size=0.20,random_state=0)\r\n\r\n#training the model\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nspam_detect_model=MultinomialNB().fit(x_train,y_train)\r\n\r\ny_pred=spam_detect_model.predict(x_test)\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\ncomfusion_m=confusion_matrix(y_test,y_pred)\r\n\r\n\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy=accuracy_score(y_test,y_pred)\r\n\r\n", "id": "9591281", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "smsspam.py" }, { "content": "import pandas as pd\r\nmessages=pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\\t',names=['label','message'])\r\n\r\n\r\n#clean data and preprocessing \r\nimport re\r\nimport nltk\r\n\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import precision_recall_fscore_support as score\r\n\r\nps = PorterStemmer()\r\nlm = WordNetLemmatizer()\r\ncorpus = []\r\nfor i in range(0, len(messages)):\r\n review = re.sub('[^a-zA-Z]', ' ', messages['message'][i])\r\n review = review.lower()\r\n review = review.split()\r\n \r\n #Stemming \r\n review = [ps.stem(word) for word in review if not word in stopwords.words('english')]\r\n \r\n #lemmatization\r\n #review = [lm.lemmatize(word) for word in review if not word in stopwords.words('english')]\r\n \r\n review = ' '.join(review)\r\n corpus.append(review)\r\n \r\n# creating the bag of words model\r\n#from sklearn.feature_extraction.text import CountVectorizer\r\n#cv = CountVectorizer(max_features=5000)\r\n\r\n# Creating the TF IDF\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\ncv = TfidfVectorizer()\r\n\r\nx = cv.fit_transform(corpus).toarray()\r\ny=pd.get_dummies(messages['label'])\r\ny=y.iloc[:,1].values\r\n\r\n\r\n#test train split\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.20, random_state = 0)\r\n\r\n\r\n# Training model using Navie bayes classifier\r\n#from sklearn.naive_bayes import MultinomialNB\r\n#spam_detect_model = MultinomialNB().fit(x_train, y_train)\r\n\r\n\r\n#Random forest\r\nrf = RandomForestClassifier(n_estimators=100,max_depth=None,n_jobs=1)\r\nspam_detect_model = rf.fit(x_train,y_train)\r\n# Now, let's see the predictions. I would be using predict function and calculating Precision, Recall , f- score, and Accuracy measure also.\r\n\r\ny_pred = spam_detect_model.predict(x_test)\r\n\r\n#confusion matrix\r\nfrom sklearn.metrics import confusion_matrix\r\nconfusion_m = confusion_matrix(y_test, y_pred)\r\n\r\n#Accuracy and ROC curve\r\nfrom sklearn.metrics import accuracy_score, plot_roc_curve\r\n#print('Accuracy :', accuracy_score(y_test, y_pred) * 100)\r\n\r\nplot_roc_curve(spam_detect_model, x_test, y_test)\r\n\r\nprecision,recall,fscore,support =score(y_test,y_pred,pos_label=1, average ='binary')\r\nprint('Precision : {} / Recall : {} / fscore : {} / Accuracy: {}'.format(round(precision,3),round(recall,3),round(fscore,3),round((y_pred==y_test).sum()/len(y_test) * 100,8)))\r\n", "id": "12397195", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "smsspamclassifier.py" } ]
0
OAboyewa
[ { "content": "import unittest\nfrom translator import english_to_french, french_to_english\n\nclass TestMachineTranslation(unittest.TestCase):\n\n def test_french_to_english_null(self):\n self.assertEqual(french_to_english(''), None)\n\n def test_english_to_french_null(self):\n self.assertEqual(english_to_french(''), None)\n\n def test_french_to_english_0(self):\n self.assertNotEqual(french_to_english('Hello'), 'Bonjour')\n \n def test_french_to_english_1(self):\n self.assertEqual(french_to_english('Bonjour'), 'Hello')\n\n def test_english_to_french_0(self):\n self.assertNotEqual(english_to_french('Bonjour'), 'Hello')\n\n def test_english_to_french_1(self):\n self.assertEqual(english_to_french('Hello'), 'Bonjour')\n\nif __name__ == '__main__':\n unittest.main()", "id": "12531449", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "final_project/machinetranslation/tests/tests.py" }, { "content": "\"\"\"machinetranslation\"\"\"\nimport json\nimport os\nfrom ibm_watson import LanguageTranslatorV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napikey = os.environ['apikey']\nurl = os.environ['url']\n\ndef english_to_french(english_text):\n \"\"\"Translates english to french\"\"\"\n authenticator = IAMAuthenticator(apikey)\n language_translator = LanguageTranslatorV3(version='2022-03-13', authenticator=authenticator)\n\n language_translator.set_service_url(url)\n\n if english_text == '':\n return None\n\n translation = language_translator.translate(text=english_text, model_id='en-fr').get_result()\n french_text = json.loads(json.dumps(translation))['translations'][0]['translation']\n\n return french_text\n\ndef french_to_english(french_text):\n \"\"\"Translates french to english\"\"\"\n authenticator = IAMAuthenticator(apikey)\n language_translator = LanguageTranslatorV3(version='2022-03-13', authenticator=authenticator)\n\n language_translator.set_service_url(url)\n\n if french_text == '':\n return None\n\n translation = language_translator.translate(text=french_text, model_id='fr-en').get_result()\n english_text = json.loads(json.dumps(translation))['translations'][0]['translation']\n\n return english_text\n", "id": "6196635", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "final_project/machinetranslation/translator.py" } ]
0
StuartNewcombe
[ { "content": "#Changes:\n# - added paths variables for ease of editing\n# - put code into functions\n\nimport pandas as pd\nimport matplotlib.pylab as plt\n\nanimal_rescue_hdfs_path = '/training/animal_rescue.csv'\noutput_chart_path = '/home/cdsw/rap-in-practice-python/4 Reusable Functions/cats_cost_by_year.png'\n\n#----------------------Level 1----------------------\n\ndef run_pipeline(input_path, output_path):\n rescue_incidents = read_data_to_pandas(input_path)\n rescue_cleaned = clean_data(rescue)\n cats_filtered = filter_data(rescue_cleaned)\n cats_by_year = get_cats_per_year(cats_filtered)\n cats_by_year_chart = make_chart(cats_by_year)\n save_chart(cats_by_year_chart, output_path)\n return 'Finished'\n\n#----------------------Level 2----------------------\n\ndef read_data_to_pandas(input_path):\n spark = create_spark_connection()\n rescue_spark = spark.read.csv(input_path, \n header=True, \n inferSchema=True)\n rescue_pandas = rescue_spark.toPandas()\n spark.stop()\n return rescue_pandas\n\n\ndef clean_data(rescue):\n rescue['AnimalGroupParent'] = rescue['AnimalGroupParent'].str.title()\n rescue_cleaned = df.dropna()\n return rescue_cleaned\n\n\ndef filter_data(rescue_cleaned):\n cats_filtered = rescue_cleaned[(rescue_cleaned['CalYear'] < 2019) &\\\n (rescue_cleaned['AnimalGroupParent'] == \"Cat\")]\n return cats_filtered\n\n\ndef get_cats_per_year(cats_filtered):\n aggregation_dictionary = {'IncidentNotionalCost(£)': 'sum'}\n return cats_filtered.groupby(['CalYear']).agg(aggregation_dictionary)\n\n\ndef make_chart(cats_by_year):\n chart = (\n (cats_by_year/1000)\n .plot(kind='bar', legend=None)\n .get_figure()\n )\n plt.title('London fire brigade cost of cat incidents by year')\n plt.ylabel('Cost (£ thousands)')\n plt.xlabel('Year')\n return chart\n\n\ndef save_chart(chart, output_path):\n chart.savefig(output_path, dpi = 300)\n \n\n#----------------------Level 3----------------------\n\ndef create_spark_connection():\n spark = (\n SparkSession.builder.appName(\"my-spark-app2\")\n .config(\"spark.executor.memory\", \"1500m\")\n .config(\"spark.executor.cores\", 2)\n .config(\"spark.dynamicAllocation.enabled\", 'true')\n .config('spark.dynamicAllocation.maxExecutors', 4)\n .config('spark.shuffle.service.enabled','true')\n .enableHiveSupport()\n .getOrCreate()\n )\n return spark\n\n\n#----------------------Call pipeline----------------------\n\nrun_pipeline(animal_rescue_hdfs_path, output_chart_path)", "id": "5684109", "language": "Python", "matching_score": 5.836752891540527, "max_stars_count": 16, "path": "20191217_RAP_in_DAP/4 Reusable Functions/pipeline.py" }, { "content": "#Changes: \r\n# - stripped out code that did not contribute to output\r\n# - more appropriate names\r\n# - readme file\r\n\r\n#imports\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\nfrom pyspark.sql import SparkSession\r\n\r\n#set up spark connection\r\nspark = (\r\n SparkSession.builder.appName(\"my-spark-app2\")\r\n .config(\"spark.executor.memory\", \"1500m\")\r\n .config(\"spark.executor.cores\", 2)\r\n .config(\"spark.dynamicAllocation.enabled\", 'true')\r\n .config('spark.dynamicAllocation.maxExecutors', 4)\r\n .config('spark.shuffle.service.enabled','true')\r\n .enableHiveSupport()\r\n .getOrCreate()\r\n)\r\n\r\n#import data\r\nrescue_spark = spark.read.csv(\r\n \"/training/animal_rescue.csv\", \r\n header=True, inferSchema=True, \r\n)\r\n\r\n#DECISION: the dataset is 1.8MB, use pandas\r\nrescue_incidents = rescue_spark.toPandas()\r\nspark.stop()\r\n\r\n#DECISION: drop nulls\r\nrescue_incidents = rescue_incidents.dropna()\r\n\r\n#DECISION: drop 2019\r\nrescue_incidents = rescue_incidents[rescue_incidents['CalYear'] < 2019]\r\n\r\n#Need to clean animal type variable\r\nrescue_incidents['AnimalGroupParent'] = rescue_incidents['AnimalGroupParent'].str.title()\r\n\r\n#DECISION: our report will be on the annual cost of cats for \r\n#the London Fire Brigade\r\n\r\ncat_incidents = rescue_incidents[rescue['AnimalGroupParent'] == 'Cat']\r\n\r\naggregation_dictionary = {'IncidentNotionalCost(£)': 'sum'}\r\n\r\ncats_chart = (\r\n (cat_incidents.groupby(['CalYear'])\r\n .agg(aggregation_dictionary)/1000)\r\n .plot(kind='bar', legend=False)\r\n .get_figure()\r\n)\r\n\r\nproject_path = '/home/cdsw/rap-in-practice-python/2 Clear Layout Structure/'\r\ncats_chart.savefig(project_path + 'cats_cost_by_year_chart.png', dpi = 300)", "id": "11786325", "language": "Python", "matching_score": 7.392566204071045, "max_stars_count": 16, "path": "20191217_RAP_in_DAP/2 Clear Layout Structure/pipeline.py" }, { "content": "#imports\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pylab as plt\r\nfrom pyspark.sql import SparkSession\r\n\r\n#set up spark connection\r\nspark = (\r\n SparkSession.builder.appName(\"my-spark-app2\")\r\n .config(\"spark.executor.memory\", \"1500m\")\r\n .config(\"spark.executor.cores\", 2)\r\n .config(\"spark.dynamicAllocation.enabled\", 'true')\r\n .config('spark.dynamicAllocation.maxExecutors', 4)\r\n .config('spark.shuffle.service.enabled','true')\r\n .enableHiveSupport()\r\n .getOrCreate()\r\n)\r\n\r\n#import data\r\nsdf = spark.read.csv(\r\n \"/training/animal_rescue.csv\", \r\n header=True, inferSchema=True, \r\n)\r\n\r\n#DECISION: the dataset is 1.8MB, use pandas\r\ndf = sdf.toPandas()\r\nspark.stop()\r\n\r\n#can we drop some columns?\r\ndf.columns\r\n#DECISION: don't need most geographical info or special service details\r\ndf.drop(['Easting_m', 'Northing_m', 'Easting_rounded', 'Northing_rounded',\r\n 'BoroughCode', 'Borough', 'StnGroundName','SpecialServiceType', \r\n 'WardCode', 'Ward','SpecialServiceTypeCategory'], \r\n axis=1, inplace=True)\r\ndf.columns\r\n\r\n#is incident number a unique column?\r\nlen(np.unique(df['IncidentNumber'])) == len(df)\r\n#True\r\n\r\n#are there any duplicates?\r\ndups = df.duplicated()\r\ndups.sum()\r\n#No\r\n\r\n#are there any missing values?\r\nnulls = df.isnull().sum()\r\nnulls[nulls>0]\r\nnulls.max() / len(df) * 100\r\n#0.6% of rows in 3 columns\r\n#DECISION: drop nulls\r\ndf = df.dropna()\r\nnulls_check = df.isnull().sum()\r\nnulls_check\r\n#Done\r\n\r\n#convert date column to datetime\r\ndf['DateTimeOfCall'] = pd.to_datetime(df['DateTimeOfCall'], \r\n infer_datetime_format=True)\r\n#span of dates?\r\ndf['DateTimeOfCall'].min()\r\n#start 1st Jan 2009\r\ndf['DateTimeOfCall'].max()\r\n#ends 1st Nov 2019\r\n\r\n#Not a full year in 2019, is there an obvious drop in counts?\r\nagg_dict = {'IncidentNumber':'count'}\r\ndf.groupby('CalYear').agg(agg_dict).plot(kind='bar')\r\n#yes, obvious drop\r\n#DECISION: drop 2019\r\ndf = df[df['CalYear'] < 2019]\r\n#check\r\ndf.groupby('CalYear').agg(agg_dict).plot(kind='bar')\r\n\r\n#how many different animal types are there?\r\nlen(np.unique(df['AnimalGroupParent']))\r\n#27\r\n\r\n#what are the most frequently rescued animals?\r\nagg_dict = {'IncidentNumber': 'count'}\r\nanimal_count = df.groupby(['AnimalGroupParent']).agg(agg_dict)\r\nanimal_count.sort_values('IncidentNumber', inplace=True, ascending=False)\r\nanimal_count[:15]\r\n#Cats, but, there are some cases of Cat and cat in the data- need to clean\r\ndf['AnimalGroupParent'] = df['AnimalGroupParent'].str.title()\r\nanimal_count = df.groupby(['AnimalGroupParent']).agg(agg_dict)\r\nanimal_count.sort_values('IncidentNumber', inplace=True, ascending=False)\r\nanimal_count[:15]\r\n\r\n#DECISION: our report will be on the annual cost of cats for \r\n#the London Fire Brigade\r\n\r\ncats = df[df['AnimalGroupParent'] == 'Cat']\r\nagg_dict = {'IncidentNotionalCost(£)': 'sum'}\r\n\r\ncats_chart = (\r\n (cats.groupby(['CalYear'])\r\n .agg(aggregation_dictionary)/1000)\r\n .plot(kind='bar', legend=False)\r\n .get_figure()\r\n)\r\n\r\nproject_path = '/home/cdsw/rap-in-practice-python/2 Clear Layout Structure/'\r\ncats_chart.savefig(project_path + 'chart.png', dpi = 300)", "id": "2376723", "language": "Python", "matching_score": 0.13341784477233887, "max_stars_count": 16, "path": "20191217_RAP_in_DAP/2 Clear Layout Structure/explore.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 10:23:43 2019\n\n@author: pughd\n\nUpdated after spec changes\n\"\"\"\n\n\n# Test class to hold our tests\nclass TestClass:\n\n \n # test when x > y and x + y < 10\n def test_x_gtr_y_x_add_y_lt_10(self):\n assert example_function(5,1, 9) == 0 \n \n \n # test when x > y and x + y > = 10\n def test_x_gtr_y_x_add_y_gte_10(self):\n assert example_function(10,9, 9) == 10\n \n #test that x<=y\n def test_x_lte_y(self):\n assert example_function(2,9,2) == -14\n\n\n #test string integers\n def test_str_ints(self):\n assert example_function(\"2\",\"9\", 2) == -14 \n # test neg string integers \n def test_neg_str_ints(self):\n assert example_function(\"-2\",\"-1\", 2) == 6\n\n #test str inputs\n def test_str_word_entries(self):\n assert example_function(\"test\",\"6\", 4) == -24 \n\ndef example_function(x,y,m):\n #isdigit only detects positive so use cast and catch\n x = is_int(x)\n y = is_int(y)\n m = is_int(m)\n \n \n if x > y and x + y < 10:\n return 0\n elif x > y and x + y >=10 :\n return 10\n else:\n return (x*x) - (m*y)\n \n \ndef is_int(s):\n try:\n x = int(s)\n return x\n except ValueError:\n return 0 \n", "id": "1296721", "language": "Python", "matching_score": 2.5385029315948486, "max_stars_count": 16, "path": "20191112_code_testing_overview/04_exercise2_test.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 10:23:43 2019\n\n@author: pughd\n\"\"\"\n\n\n\nclass TestClass:\n # test when x <= y\n def test_x_less_eq_y(self):\n assert example_function(2,4) == -4\n \n # test when x > y\n def test_x_gtr_y(self):\n assert example_function(5,4) == 9 \n \n \n\n\ndef example_function(x,y):\n if x > y:\n return 0\n else:\n return (x*x) - (2*y)\n", "id": "3737756", "language": "Python", "matching_score": 2.0581297874450684, "max_stars_count": 16, "path": "20191112_code_testing_overview/03_exercise1_test.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 09:38:59 2019\n\n@author: pughd\n\"\"\"\n\n# Create a class to hold our tests\n# These should be testing the edge cases\nclass TestClass:\n\n def test_eq_100(self):\n assert small_function(100,5) == 100\n \n def test_gtr_100(self):\n assert small_function(102,6) == 100\n \n def test_lt_100(self):\n assert small_function(2,89) == 91\n \n \n# Define our function\ndef small_function(x,y):\n if x< 100:\n return x + y\n else:\n return 100\n", "id": "8987304", "language": "Python", "matching_score": 2.7299745082855225, "max_stars_count": 16, "path": "20191112_code_testing_overview/01_example_pytest.py" }, { "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 10:28:55 2019\n\n@author: pughd\n\"\"\"\n\n\nfrom datetime import datetime\n\n\nclass TestClass:\n # Difficult to automate test this as depends on time of day!\n def test_morning1(self):\n assert time_of_day() == \"Night\"\n \n # Far easier to test \n def test_morning2(self):\n assert time_of_day_2(9) == \"Morning\"\n assert time_of_day_2(13) == \"Afternoon\"\n assert time_of_day_2(0) == \"Night\"\n assert time_of_day_2(19) == \"Evening\"\n\n\ndef time_of_day_2(hour):\n \n # Return approproiate description\n if hour >= 0 and hour < 6:\n return 'Night'\n elif hour >= 6 and hour < 12:\n return \"Morning\"\n elif hour >= 12 and hour < 18:\n return \"Afternoon\" \n else:\n return \"Evening\" \n\n\ndef time_of_day():\n # Get the current hour\n hour = datetime.now().hour\n \n # Return approproiate description\n if hour >= 0 and hour < 6:\n return 'Night'\n elif hour >= 6 and hour < 12:\n return \"Morning\"\n elif hour >= 12 and hour < 18:\n return \"Afternoon\" \n else:\n return \"Evening\"\n", "id": "6353390", "language": "Python", "matching_score": 0.9999768733978271, "max_stars_count": 16, "path": "20191112_code_testing_overview/02_example_time_test.py" } ]
2.538503
data4goodlab
[ { "content": "import json\nimport logging\nimport os\nimport shutil\nimport sys\n\n\nfrom tqdm import tqdm\nimport requests\n\n\ndef download_file(url, output_path, exist_overwrite, min_size=0, verbose=True):\n # Todo handle requests.exceptions.ConnectionError\n if exist_overwrite or not os.path.exists(output_path):\n r = requests.get(url, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n size_read = 0\n if total_size - min_size > 0:\n with tqdm(\n total=total_size,\n unit='B',\n unit_scale=True,\n unit_divisor=1024,\n disable=not verbose\n ) as pbar:\n with open(output_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n size_read = min(total_size, size_read + 1024)\n pbar.update(len(chunk))\n\n", "id": "3665333", "language": "Python", "matching_score": 3.7983062267303467, "max_stars_count": 0, "path": "IMDb_dataset/utils.py" }, { "content": "import functools\nimport os\nimport pathlib\nimport re\nimport requests\nfrom tqdm import tqdm\nfrom turicreate import load_sframe\nfrom ScienceDynamics.datasets.configs import MAG_URL_DICT\n\ndef download_file(url, output_path, exist_overwrite=False, min_size=-1, verbose=True):\n # Todo handle requests.exceptions.ConnectionError\n if exist_overwrite or not os.path.exists(output_path):\n r = requests.get(url, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n size_read = 0\n if total_size > min_size:\n with tqdm(total=total_size, unit='B', unit_scale=True, unit_divisor=1024,desc=url, disable=not verbose) as pbar:\n with open(output_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n size_read = min(total_size, size_read + 1024)\n pbar.update(len(chunk))\n\n\ndef lazy_property(fn):\n \"\"\"\n Decorator that makes a property lazy-evaluated.\n \"\"\"\n attr_name = '_lazy_' + fn.__name__\n\n @property\n def _lazy_property(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fn(self))\n return getattr(self, attr_name)\n\n return _lazy_property\n\n\ndef save_sframe(sframe):\n def decorator_repeat(func):\n @functools.wraps(func)\n def wrapper_repeat(self, *args, **kwargs): \n sframe_path = pathlib.Path(self._sframe_dir).joinpath(sframe)\n if not sframe_path.exists():\n table_name = sframe.split(\".\")[0]\n if table_name in MAG_URL_DICT:\n url = MAG_URL_DICT[table_name]\n mag_file = self._dataset_dir / re.search(\".*files\\/(.*?)\\?\", url).group(1)\n if not pathlib.Path(mag_file).exists():\n download_file(url, mag_file)\n \n value = func(self, *args, **kwargs)\n value.save(str(sframe_path))\n else:\n value = load_sframe(str(sframe_path))\n return value\n\n return wrapper_repeat\n\n return decorator_repeat\n", "id": "9895877", "language": "Python", "matching_score": 2.19781494140625, "max_stars_count": 1, "path": "ScienceDynamics/datasets/utils.py" }, { "content": "import pathlib\nimport re\n\nfrom turicreate import SFrame\nfrom ScienceDynamics.datasets.configs import SJR_URLS, SJR_OPEN_URLS\nfrom ScienceDynamics.datasets.utils import download_file, save_sframe\nfrom ScienceDynamics.config import DATASETS_SJR_DIR\n\nclass SJR(object):\n def __init__(self, dataset_dir=None):\n if dataset_dir is None:\n dataset_dir = DATASETS_SJR_DIR\n self._dataset_dir = pathlib.Path(dataset_dir)\n self._dataset_dir.mkdir(exist_ok=True)\n self._sframe_dir = self._dataset_dir / \"sframes\"\n self._sframe_dir.mkdir(exist_ok=True)\n for y, url in SJR_URLS:\n sjr_file = self._dataset_dir / f'scimagojr {y}.csv'\n if not pathlib.Path(sjr_file).exists():\n download_file(url, sjr_file)\n for y, url in SJR_OPEN_URLS:\n sjr_file = self._dataset_dir / f'scimagojr_open {y}.csv'\n if not pathlib.Path(sjr_file).exists():\n download_file(url, sjr_file)\n\n def sjr_to_csv(self, regex):\n sjr_sf = SFrame()\n for p in self._dataset_dir.glob(regex):\n if p.suffix == \".csv\":\n y = int(re.match(r'.*([1-3][0-9]{3})', p.name).group(1))\n sf = SFrame.read_csv(str(p),delimiter=';')\n sf['Year'] = y\n sf = sf.rename({\"Total Docs. (%s)\" % y: \"Total Docs.\"})\n extra_cols = [\"Categories\"]\n for c in extra_cols:\n if c not in sf.column_names():\n sf[c] = ''\n sjr_sf = sjr_sf.append(sf)\n\n r_issn = re.compile('(\\\\d{8})')\n sjr_sf['Issn'] = sjr_sf['Issn'].apply(lambda i: r_issn.findall(i))\n return sjr_sf.stack('Issn', new_column_name='ISSN')\n\n @property\n @save_sframe(sframe=\"sjr.sframe\")\n def data(self):\n \"\"\"\n Creating the SJR SFrame from CSV files\n :note: please notice that each file name contains the SJR report year\n \"\"\"\n sjr_sf = self.sjr_to_csv(\"scimagojr [0-9][0-9][0-9][0-9].csv\")\n sjr_opens_sf =self.sjr_to_csv(\"scimagojr_open [0-9][0-9][0-9][0-9].csv\")\n sjr_opens_sf[\"Open\"] = 1\n return sjr_sf.join(sjr_opens_sf[[\"Sourceid\",\"Year\",\"Open\"]],on=[\"Sourceid\",\"Year\"], how=\"left\")\n\n", "id": "5494140", "language": "Python", "matching_score": 4.908206462860107, "max_stars_count": 1, "path": "ScienceDynamics/datasets/sjr.py" }, { "content": "import turicreate as tc\nimport os\nimport re\n\nfrom ScienceDynamics.config.configs import SJR_SFRAME, DATASETS_SJR_DIR\n\n\ndef create_sjr_sframe():\n \"\"\"\n Createing the SJR SFrame from CSV files\n :note: please notice that each file name contains the SJR report year\n \"\"\"\n sjr_sf = tc.SFrame()\n for p in os.listdir(DATASETS_SJR_DIR):\n if not p.endswith(\".csv\"):\n continue\n y = int(re.match(r'.*([1-3][0-9]{3})', p.split(os.path.sep)[-1]).group(1))\n sf = tc.SFrame.read_csv(\"%s/%s\" % (DATASETS_SJR_DIR, p))\n sf['Year'] = y\n sf = sf.rename({\"Total Docs. (%s)\" % y: \"Total Docs.\"})\n extra_cols = [\"Categories\"]\n for c in extra_cols:\n if c not in sf.column_names():\n sf[c] = ''\n sjr_sf = sjr_sf.append(sf)\n\n r_issn = re.compile('(\\\\d{8})')\n sjr_sf['Issn'] = sjr_sf['Issn'].apply(lambda i: r_issn.findall(i))\n sjr_sf = sjr_sf.stack('Issn', new_column_name='ISSN')\n sjr_sf.save(SJR_SFRAME)\n", "id": "5840055", "language": "Python", "matching_score": 1.2685202360153198, "max_stars_count": 1, "path": "ScienceDynamics/sframe_creators/create_sjr_sframe.py" }, { "content": "import pathlib\nimport re\n\nfrom ScienceDynamics.datasets.aminer import Aminer\nfrom ScienceDynamics.datasets.microsoft_academic_graph import MicrosoftAcademicGraph\nfrom ScienceDynamics.datasets.sjr import SJR\nfrom ScienceDynamics.datasets.utils import save_sframe\nimport turicreate.aggregate as agg\n\n\nclass JoinedDataset(object):\n def __init__(self, dataset_dir, sjr_path=None, aminer_path=None, mag_path=None):\n self._dataset_dir = pathlib.Path(dataset_dir)\n self._dataset_dir.mkdir(exist_ok=True)\n self._sframe_dir = self._dataset_dir / \"sframes\"\n self._sframe_dir.mkdir(exist_ok=True)\n if sjr_path is None:\n sjr_path = self._dataset_dir / \"SJR\"\n if aminer_path is None:\n aminer_path = self._dataset_dir / \"Aminer\"\n if mag_path is None:\n mag_path = self._dataset_dir / \"MAG\"\n self.aminer = Aminer(aminer_path)\n self.mag = MicrosoftAcademicGraph(mag_path)\n self.sjr = SJR(sjr_path)\n\n @property\n @save_sframe(sframe=\"PapersAMinerMagJoin.sframe\")\n def aminer_mag_links_by_doi(self):\n \"\"\"\n Create Links Sframe that match papers from the MAG dataset\n with papers from the AMiner dataset based on the papers DOI\n :return:\n \"\"\"\n extended_papers = self.mag.extended_papers\n g1 = extended_papers.groupby('Paper Document Object Identifier (DOI)', {'Count': agg.COUNT()})\n s1 = set(g1[g1['Count'] > 1]['Paper Document Object Identifier (DOI)'])\n extended_papers = extended_papers[\n extended_papers['Paper Document Object Identifier (DOI)'].apply(lambda doi: doi not in s1)]\n extended_papers.materialize()\n\n aminer = self.aminer.data\n g2 = aminer.groupby('doi', {'Count': agg.COUNT()})\n s2 = set(g2[g2['Count'] > 1]['doi'])\n aminer = aminer[aminer['doi'].apply(lambda doi: doi not in s2)]\n aminer.materialize()\n\n aminer_mag = extended_papers.join(aminer, {'Paper Document Object Identifier (DOI)': 'doi'})\n aminer_mag['title_len'] = aminer_mag['title'].apply(lambda t: len(t))\n aminer_mag['title_len2'] = aminer_mag['Original paper title'].apply(lambda t: len(t))\n aminer_mag = aminer_mag[aminer_mag['title_len'] > 0]\n aminer_mag = aminer_mag[aminer_mag['title_len2'] > 0]\n\n aminer_mag = aminer_mag.rename({\"Paper ID\": \"MAG Paper ID\", \"id\": \"Aminer Paper ID\"})\n return aminer_mag.remove_columns(['title_len', 'title_len2'])\n\n def aminer_mag_sjr(self, year):\n \"\"\"\n Creates a unified SFrame of AMiner, MAG, and the SJR datasets\n :param year: year to use for SJR data\n :return: SFrame with AMiner, MAG, and SJR data\n :rtype: SFrame\n \"\"\"\n sf = self.aminer_mag_links_by_doi\n sf = sf[sf['issn'] != None]\n sf = sf[sf['issn'] != 'null']\n sf.materialize()\n r = re.compile(r\"(\\d+)-(\\d+)\")\n sf['issn_str'] = sf['issn'].apply(lambda i: \"\".join(r.findall(i)[0]) if len(r.findall(i)) > 0 else None)\n sf = sf[sf['issn_str'] != None]\n sjr_sf = self.sjr.data\n sjr_sf = sjr_sf[sjr_sf['Year'] == year]\n return sf.join(sjr_sf, on={'issn_str': \"ISSN\"})\n", "id": "1181629", "language": "Python", "matching_score": 4.664179801940918, "max_stars_count": 1, "path": "ScienceDynamics/datasets/joined_dataset.py" }, { "content": "import sys\n\nfrom ScienceDynamics.config.configs import AMINER_PAPERS_SFRAME, AMINER_TXT_FILES, AMINER_MAG_JOIN_SFRAME, \\\n EXTENDED_PAPERS_SFRAME, SJR_SFRAME\nfrom ScienceDynamics.config.log_config import logger\n\nimport turicreate as tc\nimport turicreate.aggregate as agg\nimport os\nimport re\n\nsys.path.extend([\"..\"])\n\n\ndef create_aminer_sframe():\n \"\"\"\n Create AMiner Papers sFrame from the AMiner text files. After creating the SFrame, it is save to AMINER_PAPERS_SFRAME\n \"\"\"\n logger.info(\"Creating AMiner Papers SFrame\")\n if os.path.isdir(AMINER_PAPERS_SFRAME):\n return\n\n sf = tc.SFrame.read_json(AMINER_TXT_FILES, orient='lines')\n sf.save(AMINER_PAPERS_SFRAME)\n\n\ndef create_aminer_mag_links_by_doi_sframe():\n \"\"\"\n Create Links Sframe that match papers from the MAG dataset with papers from the AMiner dataset based on the papers\n DOI\n :return:\n \"\"\"\n if os.path.isdir(AMINER_MAG_JOIN_SFRAME):\n return\n sf = tc.load_sframe(EXTENDED_PAPERS_SFRAME)\n g1 = sf.groupby('Paper Document Object Identifier (DOI)', {'Count': agg.COUNT()})\n s1 = set(g1[g1['Count'] > 1]['Paper Document Object Identifier (DOI)'])\n sf = sf[sf['Paper Document Object Identifier (DOI)'].apply(lambda doi: doi not in s1)]\n sf.materialize()\n\n sf2 = tc.load_sframe(AMINER_PAPERS_SFRAME)\n g2 = sf2.groupby('doi', {'Count': agg.COUNT()})\n s2 = set(g2[g2['Count'] > 1]['doi'])\n sf2 = sf2[sf2['doi'].apply(lambda doi: doi not in s2)]\n sf2.materialize()\n\n j = sf.join(sf2, {'Paper Document Object Identifier (DOI)': 'doi'})\n j['title_len'] = j['title'].apply(lambda t: len(t))\n j['title_len2'] = j['Original paper title'].apply(lambda t: len(t))\n j = j[j['title_len'] > 0]\n j = j[j['title_len2'] > 0]\n\n j = j.rename({\"Paper ID\": \"MAG Paper ID\", \"id\": \"Aminer Paper ID\"})\n j = j.remove_columns(['title_len', 'title_len2'])\n j.save(AMINER_MAG_JOIN_SFRAME)\n\n\ndef create_aminer_mag_sjr_sframe(year):\n \"\"\"\n Creates a unified SFrame of AMiner, MAG, and the SJR datasets\n :param year: year to use for SJR data\n :return: SFrame with AMiner, MAG, and SJR data\n :rtype: tc.SFrame\n \"\"\"\n sf = tc.load_sframe(AMINER_MAG_JOIN_SFRAME)\n sf = sf[sf['issn'] != None]\n sf = sf[sf['issn'] != 'null']\n sf.materialize()\n r = re.compile(r\"(\\d+)-(\\d+)\")\n sf['issn_str'] = sf['issn'].apply(lambda i: \"\".join(r.findall(i)[0]) if len(r.findall(i)) > 0 else None)\n sf = sf[sf['issn_str'] != None]\n sjr_sf = tc.load_sframe(SJR_SFRAME)\n sjr_sf = sjr_sf[sjr_sf['Year'] == year]\n return sf.join(sjr_sf, on={'issn_str': \"ISSN\"})\n", "id": "8913172", "language": "Python", "matching_score": 1.1564948558807373, "max_stars_count": 1, "path": "ScienceDynamics/sframe_creators/create_aminer_sframes.py" }, { "content": "from ScienceDynamics.config.configs import SFRAMES_BASE_DIR\n\n\nMAG_URL_DICT = {\"Affiliations\":\"https://zenodo.org/record/2628216/files/Affiliations.txt.gz?download=1\",\n \"Authors\":\"https://zenodo.org/record/2628216/files/Authors.txt.gz?download=1\",\n \"ConferenceInstances\":\"https://zenodo.org/record/2628216/files/ConferenceInstances.txt.gz?download=1\",\n \"ConferenceSeries\":\"https://zenodo.org/record/2628216/files/ConferenceSeries.txt.gz?download=1\",\n \"FieldsOfStudy\":\"https://zenodo.org/record/2628216/files/FieldsOfStudy.txt.gz?download=1\",\n \"PaperFieldsOfStudy\": \"http://data4good.io/datasets/PaperFieldsOfStudy.txt.gz\",\n \"FieldOfStudyChildren\": \"http://data4good.io/datasets/FieldOfStudyChildren.txt.gz\",\n \"Journals\":\"https://zenodo.org/record/2628216/files/Journals.txt.gz?download=1\",\n \"PaperAuthorAffiliations\": \"https://zenodo.org/record/2628216/files/PaperAuthorAffiliations.txt.gz?download=1\",\n \"PaperReferences\":\"https://zenodo.org/record/2628216/files/PaperReferences.txt.gz?download=1\",\n \"PaperResources\":\"https://zenodo.org/record/2628216/files/PaperResources.txt.gz?download=1\",\n \"Papers\":\"https://zenodo.org/record/2628216/files/Papers.txt.gz?download=1\",\n \"PaperUrls\":\"https://zenodo.org/record/2628216/files/PaperUrls.txt.gz?download=1\"}\n\nAMINER_URLS = (\"https://academicgraphv2.blob.core.windows.net/oag/aminer/paper/aminer_papers_0.zip\",\n \"https://academicgraphv2.blob.core.windows.net/oag/aminer/paper/aminer_papers_1.zip\",\n \"https://academicgraphv2.blob.core.windows.net/oag/aminer/paper/aminer_papers_2.zip\",\n \"https://academicgraphv2.blob.core.windows.net/oag/aminer/paper/aminer_papers_3.zip\")\nSJR_URLS = ((year, f\"https://www.scimagojr.com/journalrank.php?year={year}&out=xls\") for year in range(1999, 2019))\nSJR_OPEN_URLS = ((year, f\"https://www.scimagojr.com/journalrank.php?openaccess=true&year={year}&out=xls\") for year in range(1999, 2019))\nFIRST_NAMES_SFRAME = SFRAMES_BASE_DIR.joinpath('first_names_gender.sframe')\n", "id": "12577975", "language": "Python", "matching_score": 3.9231598377227783, "max_stars_count": 1, "path": "ScienceDynamics/datasets/configs.py" }, { "content": "import multiprocessing\nfrom enum import Enum\nimport turicreate as tc\nimport pathlib\nfrom dotenv import load_dotenv\nimport os\n\nSTORAGE_DIR_NAME = \".scidyn2\"\nSTORAGE_PATH = pathlib.Path.home().joinpath(STORAGE_DIR_NAME)\nSTORAGE_PATH.mkdir(exist_ok=True)\n\nTMP_DIR = STORAGE_PATH.joinpath('tmp')\nTMP_DIR.mkdir(exist_ok=True)\n\nSFRAMES_BASE_DIR = STORAGE_PATH.joinpath(\"sframes\")\nSFRAMES_BASE_DIR.mkdir(exist_ok=True)\nDATASETS_BASE_DIR = STORAGE_PATH.joinpath(\"MAG\")\nDATASETS_AMINER_DIR = STORAGE_PATH.joinpath(\"AMiner\")\nDATASETS_AMINER_DIR.mkdir(exist_ok=True)\nDATASETS_SJR_DIR = STORAGE_PATH.joinpath(\"sjr\")\nDATASETS_SJR_DIR.mkdir(exist_ok=True)\n\ncores = multiprocessing.cpu_count() // 2\n\ntc.config.set_runtime_config('TURI_CACHE_FILE_LOCATIONS', str(TMP_DIR))\ntc.config.set_runtime_config('TURI_DEFAULT_NUM_PYLAMBDA_WORKERS', cores)\ntc.config.set_runtime_config('TURI_DEFAULT_NUM_GRAPH_LAMBDA_WORKERS', cores)\n\nPAPERS_ALL_FEATURES = SFRAMES_BASE_DIR.joinpath(\"PapersAllFeatures.sframe\")\nFIELDS_OF_STUDY_TXT = DATASETS_BASE_DIR.joinpath(\"FieldsOfStudy.txt.gz\")\nFIELDS_OF_STUDY_SFRAME = SFRAMES_BASE_DIR.joinpath(\"FieldsOfStudy.sframe\")\n\nFIELDS_OF_STUDY_HIERARCHY_TXT = DATASETS_BASE_DIR.joinpath(\"FieldOfStudyHierarchy.txt.gz\")\nFIELDS_OF_STUDY_HIERARCHY_SFRAME = SFRAMES_BASE_DIR.joinpath(\"FieldOfStudyHierarchy.sframe\")\n\nPAPERS_TXT = DATASETS_BASE_DIR.joinpath(\"Papers.txt.gz\")\nPAPERS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"Papers.sframe\")\nEXTENDED_PAPERS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"ExtendedPapers.sframe\")\n\nCLEAN_EXTENDED_PAPERS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"CleanExtendedPapers.sframe\")\nFEATURES_EXTENDED_PAPERS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"FeaturesCleanExtendedPapers.sframe\")\n\nPAPER_AUTHOR_AFFILIATIONS_TXT = DATASETS_BASE_DIR.joinpath(\"PaperAuthorAffiliations.txt.gz\")\nPAPER_AUTHOR_AFFILIATIONS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PaperAuthorAffiliations.sframe\")\n\nAUTHOR_NAMES_SFRAME = SFRAMES_BASE_DIR.joinpath(\"authors_names.sframe\")\n\nCONFERENCES_TAT = DATASETS_BASE_DIR.joinpath(\"Conferences.txt.gz\")\nCONFERENCES_SAME = SFRAMES_BASE_DIR.joinpath(\"Conferences.sframe\")\n\nJOURNALS_TXT = DATASETS_BASE_DIR.joinpath(\"Journals.txt.gz\")\nJOURNALS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"Journals.sframe\")\n\nPAPER_KEYWORDS_TXT = DATASETS_BASE_DIR.joinpath(\"PaperKeywords.txt.gz\")\nPAPER_KEYWORDS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PaperKeywords.sframe\")\nPAPER_KEYWORDS_LIST_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PaperKeywordsList.sframe\")\n\nPAPER_REFERENCES_TXT = DATASETS_BASE_DIR.joinpath(\"PaperReferences.txt.gz\")\nPAPER_REFERENCES_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PaperReferences.sframe\")\nPAPER_REFERENCES_COUNT_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PaperReferencesCount.sframe\")\n\nEXTENDED_PAPER_REFERENCES_SFRAME = SFRAMES_BASE_DIR.joinpath(\"ExtendedPaperReferences.sframe\")\nFIELD_OF_STUDY_HIERARCHY = SFRAMES_BASE_DIR.joinpath(\"FieldOfStudyHierarchy.sframe\")\nKEYWORDS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PaperKeywords.sframe\")\nPAPERS_CITATIONS_BYYEAR_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PapersCitationByYear.sframe\")\n\nJOURNALS_DETAILS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"sjr.sframe\")\n\nJOURNALS_PAPERS_SFRAMES_DIR = SFRAMES_BASE_DIR.joinpath(\"journals\")\nCONFERENCES_PAPERS_SFRAMES_DIR = SFRAMES_BASE_DIR.joinpath(\"conferences\")\n\nCO_AUTHORSHIP_LINK_SFRAME = SFRAMES_BASE_DIR.joinpath(\"co_authors_links.sframe\")\n\nL3_FIELD_PAPERS_LIST_SFRAME = SFRAMES_BASE_DIR.joinpath(\"L3DomainPapersLists.sframe\")\n\nAUTHORS_ACADEMIC_BIRTH_YEAR = SFRAMES_BASE_DIR.joinpath(\"AuthorsAcademicBirthYear.sframe\")\nPAPERS_FIELDS_OF_STUDY_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PapersFieldsOfStudy.sframe\")\nPAPERS_ORDERED_AUTHORS_LIST_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PapersOrderedAuthorsList.sframe\")\n\nJOURNAL_AUTHORS_ACADEMIC_BIRTHYEAR_PKL = SFRAMES_BASE_DIR.joinpath(\"journal_authors_academic_birthyear.pkl\")\nCONFERENCE_AUTHORS_ACADEMIC_BIRTHYEAR_PKL = SFRAMES_BASE_DIR.joinpath(\"conference_authors_academic_birthyear.pkl\")\n\nFIELD_OF_STUDY_PAPERS_ID_SFRAME = SFRAMES_BASE_DIR.joinpath(\"FieldsOfStudyPapersIds.sframe\")\n\nAUTHORS_NAMES_TXT = DATASETS_BASE_DIR.joinpath(\"Authors.txt.gz\")\nAUTHORS_NAMES_SFRAME = SFRAMES_BASE_DIR.joinpath(\"Authors.sframe\")\nPAPER_URLS_TXT = DATASETS_BASE_DIR.joinpath(\"PaperUrls.txt.gz\")\nPAPER_URLS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PaperUrls.sframe\")\nAUTHROS_FEATURES_SFRAME = SFRAMES_BASE_DIR.joinpath('authors_features.sframe')\nAMINER_PAPERS_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PapersAMiner.sframe\")\nAMINER_TXT_FILES = DATASETS_AMINER_DIR.joinpath(\"AMiner/*.txt\")\n\nAMINER_MAG_JOIN_SFRAME = SFRAMES_BASE_DIR.joinpath(\"PapersAMinerMagJoin.sframe\")\n\nSJR_SFRAME = DATASETS_SJR_DIR.joinpath(\"sframes\").joinpath(\"sjr.sframe\")\n\nclass VenueType(Enum):\n journal = 1\n conference = 2\n\n# Mongo\nload_dotenv()\nMONGO_IP = os.getenv(\"MONGO_IP\")\n\n", "id": "7444836", "language": "Python", "matching_score": 5.67364501953125, "max_stars_count": 1, "path": "ScienceDynamics/config/configs.py" }, { "content": "import sys\n\nfrom ScienceDynamics.config.configs import PAPER_REFERENCES_SFRAME, PAPER_REFERENCES_TXT, PAPER_REFERENCES_COUNT_SFRAME, \\\n PAPERS_SFRAME, PAPERS_TXT, PAPER_URLS_SFRAME, PAPER_URLS_TXT, PAPER_KEYWORDS_SFRAME, PAPER_KEYWORDS_TXT, \\\n PAPER_KEYWORDS_LIST_SFRAME, FIELDS_OF_STUDY_SFRAME, FIELDS_OF_STUDY_TXT, PAPER_AUTHOR_AFFILIATIONS_SFRAME, \\\n PAPER_AUTHOR_AFFILIATIONS_TXT, PAPERS_ORDERED_AUTHORS_LIST_SFRAME, FIELDS_OF_STUDY_HIERARCHY_TXT, \\\n FIELDS_OF_STUDY_HIERARCHY_SFRAME, TMP_DIR, AUTHORS_NAMES_TXT, AUTHOR_NAMES_SFRAME, PAPERS_FIELDS_OF_STUDY_SFRAME, \\\n KEYWORDS_SFRAME, EXTENDED_PAPER_REFERENCES_SFRAME, PAPERS_CITATIONS_BYYEAR_SFRAME, EXTENDED_PAPERS_SFRAME, \\\n FIELD_OF_STUDY_PAPERS_ID_SFRAME\nfrom ScienceDynamics.config.log_config import logger\nfrom ScienceDynamics.sframe_creators.fields_of_study_hieararchy_analyzer import FieldsHierarchyAnalyzer\n\nimport turicreate as tc\nimport turicreate.aggregate as agg\nimport os\n\nsys.path.extend([\"..\"])\n\n\"\"\"\nThe code creates all the SFrame objects from the MAG KDD Cup 2016 Dataset\n\"\"\"\n\n\ndef create_references_sframe():\n \"\"\"Creating the references SFrame from txt files\"\"\"\n logger.info(\"Creating References SFrame\")\n if os.path.isdir(PAPER_REFERENCES_SFRAME):\n return\n sf = tc.SFrame.read_csv(PAPER_REFERENCES_TXT, header=False, delimiter=\"\\t\")\n sf = sf.rename({\"X1\": \"Paper ID\", \"X2\": \"Paper reference ID\"})\n sf.save(PAPER_REFERENCES_SFRAME)\n\n\ndef create_references_count_sframe():\n \"\"\"Creating SFrame with the number of references in each paper\"\"\"\n logger.info(\"Creating References Count SFrame\")\n if os.path.isdir(PAPER_REFERENCES_COUNT_SFRAME):\n return\n r_sf = tc.load_sframe(PAPER_REFERENCES_SFRAME)\n sf = r_sf.groupby(\"Paper ID\", {\"Ref Number\": agg.COUNT()})\n sf.save(PAPER_REFERENCES_COUNT_SFRAME)\n\n\ndef create_papers_sframe():\n \"\"\"\n Create the Papers SFrame object from txt files which contains information on each paper\n \"\"\"\n logger.info(\"Creating Papers SFrame\")\n if os.path.isdir(PAPERS_SFRAME):\n return\n sf = tc.SFrame.read_csv(PAPERS_TXT, header=False, delimiter=\"\\t\")\n\n sf = sf.rename({\"X1\": \"Paper ID\", \"X2\": \"Original paper title\", \"X3\": \"Normalized paper title\",\n \"X4\": \"Paper publish year\", \"X5\": \"Paper publish date\",\n \"X6\": \"Paper Document Object Identifier (DOI)\",\n \"X7\": \"\", \"X8\": \"Normalized venue name\", \"X9\": \"Journal ID mapped to venue name\",\n \"X10\": \"Conference ID mapped to venue name\", \"X11\": \"Paper rank\"})\n sf[\"Paper publish year\"] = sf[\"Paper publish year\"].astype(int)\n sf.save(PAPERS_SFRAME)\n\n\ndef create_urls_sframe():\n \"\"\"\n Creating URLs SFrame from txt files\n \"\"\"\n logger.info(\"Creating urls SFrame\")\n if os.path.isdir(PAPER_URLS_SFRAME):\n return\n sf = tc.SFrame.read_csv(PAPER_URLS_TXT, header=False, delimiter=\"\\t\")\n sf = sf.rename({\"X1\": \"Paper ID\", \"X2\": \"Url\"})\n g = sf.groupby(\"Paper ID\", {\"Urls\": agg.CONCAT(\"Url\")})\n g.save(PAPER_URLS_SFRAME)\n\n\ndef create_keywords_sframe():\n \"\"\"\n Creating Keywords SFrame from txt files\n \"\"\"\n logger.info(\"Creating Keywords SFrame\")\n if os.path.isdir(PAPER_KEYWORDS_SFRAME):\n return\n sf = tc.SFrame.read_csv(PAPER_KEYWORDS_TXT, header=False, delimiter=\"\\t\")\n sf = sf.rename({\"X1\": \"Paper ID\", \"X2\": \"Keyword name\", \"X3\": \"Field of study ID mapped to keyword\"})\n sf.save(PAPER_KEYWORDS_SFRAME)\n\n\ndef create_paper_keywords_list_sframe():\n \"\"\"\n Creating Paper Keywords List SFrame\n \"\"\"\n logger.info(\"Creating Papers' Keywords List SFrame\")\n if os.path.isdir(PAPER_KEYWORDS_LIST_SFRAME):\n return\n\n sf = tc.load_sframe(PAPER_KEYWORDS_SFRAME)\n g = sf.groupby(\"Paper ID\", {\"Keywords List\": agg.CONCAT(\"Keyword name\")})\n g.save(PAPER_KEYWORDS_LIST_SFRAME)\n\n\ndef create_fields_of_study_sframe():\n \"\"\"\n Creating Field of study SFrame from txt files\n \"\"\"\n logger.info(\"Creating Fields of Study SFrame\")\n if os.path.isdir(FIELDS_OF_STUDY_SFRAME):\n return\n sf = tc.SFrame.read_csv(FIELDS_OF_STUDY_TXT, header=False, delimiter=\"\\t\")\n sf = sf.rename({\"X1\": \"Field of study ID\", \"X2\": \"Field of study name\"})\n sf.save(FIELDS_OF_STUDY_SFRAME)\n\n\ndef create_paper_author_affiliations_sframe():\n \"\"\"\n Creating authors affilation SFrame from txt files\n :return:\n \"\"\"\n logger.info(\"Creating Author Affilliations SFrame\")\n if os.path.isdir(PAPER_AUTHOR_AFFILIATIONS_SFRAME):\n return\n sf = tc.SFrame.read_csv(PAPER_AUTHOR_AFFILIATIONS_TXT, header=False, delimiter=\"\\t\")\n sf = sf.rename({\"X1\": \"Paper ID\", \"X2\": \"Author ID\", \"X3\": \"Affiliation ID\", \"X4\": \"Original affiliation name\",\n \"X5\": \"Normalized affiliation name\", \"X6\": \"Author sequence number\"})\n sf.save(PAPER_AUTHOR_AFFILIATIONS_SFRAME)\n\n\ndef create_papers_authors_lists_sframe():\n \"\"\"\n Create SFrame in which each row contains paper id and a sorted list of the paper's authors\n \"\"\"\n logger.info(\"Creating Authors Lists SFrame\")\n if os.path.isdir(PAPERS_ORDERED_AUTHORS_LIST_SFRAME):\n return\n authors_sf = tc.load_sframe(PAPER_AUTHOR_AFFILIATIONS_SFRAME)\n authors_sf = authors_sf[\"Paper ID\", \"Author ID\", \"Author sequence number\"]\n authors_sf['Author_Seq'] = authors_sf.apply(lambda r: [r[\"Author ID\"], r[\"Author sequence number\"]])\n g = authors_sf.groupby(\"Paper ID\", {\"Authors List\": agg.CONCAT('Author_Seq')})\n g['Authors List Sorted'] = g[\"Authors List\"].apply(lambda l: sorted(l, key=lambda i: i[1]))\n g['Authors List Sorted'] = g['Authors List Sorted'].apply(lambda l: [i[0] for i in l])\n g = g.remove_column(\"Authors List\")\n g = g[\"Paper ID\", 'Authors List Sorted']\n g['Authors Number'] = g['Authors List Sorted'].apply(lambda l: len(l))\n g.save(PAPERS_ORDERED_AUTHORS_LIST_SFRAME)\n\n\ndef create_field_of_study_hierarchy_sframe():\n \"\"\"\n Creates field of study hierarchy sframe from txt files\n \"\"\"\n logger.info(\"Creating Field of Study Hierarchy SFrame\")\n if os.path.isdir(FIELDS_OF_STUDY_HIERARCHY_SFRAME):\n return\n h_sf = tc.SFrame.read_csv(FIELDS_OF_STUDY_HIERARCHY_TXT, header=False, delimiter=\"\\t\")\n h_sf = h_sf.rename({\"X1\": \"Child field of study ID\", \"X2\": \"Child field of study level\",\n \"X3\": \"Parent field of study ID\", \"X4\": \"Parent field of study level\",\n \"X5\": \"Confidence\"})\n h_sf.save(FIELDS_OF_STUDY_HIERARCHY_SFRAME)\n\n\ndef _get_tmp_papers_sframe_path(min_ref_num, start_year, end_year):\n \"\"\"\n Get Papers SFrame path according to years and references number filters\n :param min_ref_num: paper's minimal references number\n :param start_year: start year\n :param end_year: end year\n :return: a path to the Papers SFrame which contains papers with the above filter\n :rtype: str\n \"\"\"\n return f\"{TMP_DIR}/papers_sframe_minref_{min_ref_num}_{start_year}_{end_year}\"\n\n\ndef get_papers_sframe(min_ref_num=None, start_year=None, end_year=None):\n \"\"\"\n Return SFrame with Papers data accoring to the input filter variables\n :param min_ref_num: paper's minimal references number\n :param start_year: start year (only include paper that were published after start year)\n :param end_year: end year (only include paper that were published before end year)\n :return: SFrame with paper data\n :rtype: tc.SFrame\n :note: after the SFrame is created it is saved to the TMP_DIR to future use\n \"\"\"\n sf = tc.load_sframe(PAPER_REFERENCES_SFRAME)\n tmp_papers_sf_path = _get_tmp_papers_sframe_path(min_ref_num, start_year, end_year)\n if os.path.isdir(tmp_papers_sf_path):\n return tc.load_sframe(tmp_papers_sf_path)\n\n if min_ref_num is not None:\n logger.info(f\"Getting papers ids with at least refrences {min_ref_num}\")\n sf = sf.groupby('Paper ID', {'Ref Count': agg.COUNT()}) # There are 30058322 in the list\n sf = sf[sf['Ref Count'] >= min_ref_num] # left with 22,083,058\n sf.__materialize__()\n p_sf = tc.load_sframe(PAPERS_SFRAME)\n sf = p_sf.join(sf)\n if start_year is not None:\n logger.info(\"Getting papers with from %s \" % start_year)\n sf = sf[sf['Paper publish year'] >= start_year]\n if end_year is not None:\n logger.info(\"Getting papers with util %s \" % end_year)\n sf = sf[sf['Paper publish year'] <= end_year]\n sf.__materialize__()\n\n if not os.path.isdir(tmp_papers_sf_path):\n sf.save(tmp_papers_sf_path)\n\n return sf\n\n\ndef create_authors_names_sframe():\n \"\"\"\n Creates authors names SFrames from txt files\n \"\"\"\n logger.info(\"Creating Authors Names SFrame\")\n if os.path.isdir(AUTHORS_NAMES_TXT):\n return\n a_sf = tc.SFrame.read_csv(AUTHORS_NAMES_TXT, header=False, delimiter=\"\\t\")\n a_sf = a_sf.rename({'X1': 'Author ID', 'X2': 'Author name'})\n a_sf['First name'] = a_sf['Author name'].apply(lambda s: s.split()[0])\n a_sf['Last name'] = a_sf['Author name'].apply(lambda s: s.split()[-1])\n a_sf.save(AUTHOR_NAMES_SFRAME)\n\n\ndef create_papers_fields_of_study(flevels=(0, 1, 2, 3)):\n \"\"\"\n Create SFrame with each paper fields of study by hierarchical levels\n :param flevels: list of levels, for each level add the papers fields of study in this level\n \"\"\"\n logger.info(\"Creating Papers Fields of Study SFrame\")\n if os.path.isdir(PAPERS_FIELDS_OF_STUDY_SFRAME):\n return\n k_sf = tc.load_sframe(KEYWORDS_SFRAME)\n g = k_sf.groupby('Paper ID', {'Field of study list': agg.CONCAT(\"Field of study ID mapped to keyword\")})\n fh = FieldsHierarchyAnalyzer()\n\n # add fileds of study names from ID\n names = []\n for l in g['Field of study list']:\n names.append([fh.get_field_name(i) for i in l])\n g['Field of study list names'] = names\n\n for flevel in flevels:\n logger.info(\"Adding papers fields of study level %s\" % flevel)\n parent_list = []\n for paper_field_of_study_list in g['Field of study list']:\n parent_list.append(\n list(set.union(*[fh.get_parents_field_of_study(field, flevel) for field in paper_field_of_study_list])))\n g['Fields of study parent list (L%s)' % flevel] = parent_list\n\n names = []\n for paper_field_of_study_parents_list in g['Fields of study parent list (L%s)' % flevel]:\n names.append([fh.get_field_name(field_of_study) for field_of_study in paper_field_of_study_parents_list])\n g['Fields of study parent list names (L%s)' % flevel] = names\n g.save(PAPERS_FIELDS_OF_STUDY_SFRAME)\n\n\ndef create_extended_references_sframe():\n \"\"\"\n Create SFrame with references data with additional column that state if the reference is self-citation\n \"\"\"\n logger.info(\"Creating Extended References SFrame\")\n if os.path.isdir(EXTENDED_PAPER_REFERENCES_SFRAME):\n return\n ref_sf = tc.load_sframe(PAPER_REFERENCES_SFRAME)\n p_sf = tc.load_sframe(PAPERS_ORDERED_AUTHORS_LIST_SFRAME)\n ref_sf = ref_sf.join(p_sf, on='Paper ID', how=\"left\")\n ref_sf = ref_sf.join(p_sf, on={'Paper reference ID': 'Paper ID'}, how=\"left\")\n ref_sf = ref_sf.fillna('Authors List Sorted.1', [])\n ref_sf = ref_sf.fillna('Authors List Sorted', [])\n ref_sf.__materialize__()\n ref_sf['self citation'] = ref_sf.apply(\n lambda r: len(set(r['Authors List Sorted.1']) & set(r['Authors List Sorted'])))\n ref_sf.__materialize__()\n ref_sf = ref_sf.remove_columns(['Authors List Sorted.1', 'Authors List Sorted'])\n\n ref_sf.save(EXTENDED_PAPER_REFERENCES_SFRAME)\n\n\ndef _get_total_citation_by_year(l, max_year=2015):\n \"\"\"\n Calculate the total citation by year\n :param l: list of (year, citation) tuple\n :param max_year: the maximal year\n :return: dict with the totatl number of citation in each year\n \"\"\"\n min_year = int(min([y for y, v in l]))\n total_citations_dict = {}\n for i in range(min_year, int(max_year + 1)):\n total_citations_dict[str(i)] = sum([v for y, v in l if y <= i])\n return total_citations_dict\n\n\ndef _papers_citations_number_by_year_sframe(without_self_citation=True):\n \"\"\"\n Get papers total number of citation in each year\n :param without_self_citation: if True calculate only non-self citations, other calculate with self-citations\n :return: SFrame with a column that contains citations_dict by year\n \"\"\"\n logger.info(\"Creating Paper Citations by Year (without_self_citation=%s)\" % without_self_citation)\n ref_sf = tc.load_sframe(EXTENDED_PAPER_REFERENCES_SFRAME)\n if without_self_citation:\n ref_sf = ref_sf[ref_sf['self citation'] == 0]\n\n sf = tc.load_sframe(PAPERS_SFRAME)[\"Paper ID\", \"Paper publish year\"]\n sf = ref_sf.join(sf, on=\"Paper ID\")\n g = sf.groupby([\"Paper reference ID\", \"Paper publish year\"], {\"Citation Number\": agg.COUNT()})\n g = g.rename({\"Paper publish year\": \"Year\", \"Paper reference ID\": \"Paper ID\"})\n g['Citation by Year'] = g.apply(lambda r: (r[\"Year\"], r[\"Citation Number\"]))\n h = g.groupby('Paper ID', {'Citation by Years': tc.aggregate.CONCAT('Citation by Year')})\n if without_self_citation:\n h['Total Citations by Year without Self Citations'] = h['Citation by Years'].apply(\n lambda l: _get_total_citation_by_year(l))\n else:\n h['Total Citations by Year'] = h['Citation by Years'].apply(lambda l: _get_total_citation_by_year(l))\n h = h.remove_column(\"Citation by Years\")\n return h\n\n\ndef create_papers_citation_number_by_year_sframe():\n \"\"\"\n Create SFrame with each paper's citation numbers by year dict (one dict with self-citations and the other without)\n \"\"\"\n if os.path.isdir(PAPERS_CITATIONS_BYYEAR_SFRAME):\n return\n r_sf = _papers_citations_number_by_year_sframe(False)\n r_sf2 = _papers_citations_number_by_year_sframe(True)\n sf = r_sf.join(r_sf2, on=\"Paper ID\")\n sf.save(PAPERS_CITATIONS_BYYEAR_SFRAME)\n\n\ndef create_extended_papers_sframe():\n \"\"\"\n Created extended papers SFrame which contains various papers features, such as paper citation numbers, authors list, urls,.. etc\n :return:\n \"\"\"\n logger.info(\"Creating Extended Papers SFrame\")\n if os.path.isdir(EXTENDED_PAPERS_SFRAME):\n return\n sf = tc.load_sframe(PAPERS_SFRAME)\n\n sframes_list = [PAPER_REFERENCES_COUNT_SFRAME, PAPERS_CITATIONS_BYYEAR_SFRAME, PAPERS_ORDERED_AUTHORS_LIST_SFRAME,\n PAPER_KEYWORDS_LIST_SFRAME, PAPERS_FIELDS_OF_STUDY_SFRAME, PAPER_URLS_SFRAME]\n\n for s in sframes_list:\n t = tc.load_sframe(s)\n sf = sf.join(t, how=\"left\", on=\"Paper ID\")\n sf.save(EXTENDED_PAPERS_SFRAME)\n sf = sf.fillna(\"Ref Number\", 0)\n sf.save(EXTENDED_PAPERS_SFRAME)\n\n\ndef _create_field_of_study_paper_ids_sframe(level):\n \"\"\"\n Create SFrame in which each row contains a field of study and it's matching list of paper ids\n :param level: field of study level\n :return: SFrame with the fields of stuyd in the input level papers ids\n :rtype: tc.SFrame\n \"\"\"\n logger.info(\"Creating fields os study paper ids SFrame level - %s \" % level)\n\n col = 'Fields of study parent list (L%s)' % level\n sf = tc.load_sframe(EXTENDED_PAPERS_SFRAME)\n new_col_name = \"Field ID\"\n sf = sf.stack(col, new_column_name=new_col_name)\n sf = sf[sf[col] != None]\n g = sf.groupby(new_col_name, {'Paper IDs': agg.CONCAT(\"Paper ID\")})\n f_sf = tc.load_sframe(FIELDS_OF_STUDY_SFRAME)\n g = g.join(f_sf, on={new_col_name: \"Field of study ID\"})\n g['Number of Paper'] = g['Paper IDs'].apply(lambda l: len(l))\n g['Level'] = level\n g = g.rename({new_col_name: \"Field of study ID\"})\n return g\n\n\ndef create_fields_of_study_papers_ids_sframes(levels=(1, 2, 3)):\n \"\"\"\n Creates SFrames with each Fields of study paper ids\n :param levels: list of fields of study level\n\n \"\"\"\n if os.path.isdir(FIELD_OF_STUDY_PAPERS_ID_SFRAME):\n return\n\n sf = tc.SFrame()\n for level in levels:\n sf = sf.append(_create_field_of_study_paper_ids_sframe(level))\n sf.save(FIELD_OF_STUDY_PAPERS_ID_SFRAME)\n return sf\n\n\ndef create_all_sframes():\n \"\"\"\n Creates all SFrame from txt files\n \"\"\"\n create_papers_sframe()\n\n create_references_sframe()\n create_references_count_sframe()\n\n create_paper_author_affiliations_sframe()\n create_papers_authors_lists_sframe()\n\n create_keywords_sframe()\n create_paper_keywords_list_sframe()\n create_field_of_study_hierarchy_sframe()\n create_fields_of_study_sframe()\n create_papers_fields_of_study()\n\n create_references_sframe()\n create_extended_references_sframe()\n\n create_extended_papers_sframe()\n create_fields_of_study_papers_ids_sframes()\n", "id": "10135529", "language": "Python", "matching_score": 7.936328887939453, "max_stars_count": 1, "path": "ScienceDynamics/sframe_creators/create_mag_sframes.py" }, { "content": "import pathlib\nfrom turicreate import SFrame, load_sframe\nfrom ScienceDynamics.datasets.utils import download_file, save_sframe\nimport turicreate.aggregate as agg\nfrom tqdm import tqdm\nfrom ScienceDynamics.sframe_creators.fields_of_study_hieararchy_analyzer import FieldsHierarchyAnalyzer\nfrom ScienceDynamics.fetchers.wikipedia_fetcher import WikiLocationFetcher\nfrom ScienceDynamics.datasets.configs import MAG_URL_DICT\nfrom ScienceDynamics.config import DATASETS_BASE_DIR\n\n\nimport pandas as pd\nimport re\nfrom array import array\n\n\nclass MicrosoftAcademicGraph(object):\n def __init__(self, dataset_dir=None, download=False):\n if dataset_dir is None:\n dataset_dir = DATASETS_BASE_DIR\n self._dataset_dir = pathlib.Path(dataset_dir)\n self._dataset_dir.mkdir(exist_ok=True)\n self._sframe_dir = self._dataset_dir / \"sframes\"\n self._sframe_dir.mkdir(exist_ok=True)\n if download:\n for i, url in enumerate(MAG_URL_DICT.values()):\n mag_file = self._dataset_dir / re.search(\".*files\\/(.*?)\\?\", url).group(1)\n if not pathlib.Path(mag_file).exists():\n download_file(url, mag_file)\n # with zipfile.ZipFile(mag_file, 'r') as f:\n # f.extractall(self._dataset_dir)\n\n @property\n @save_sframe(sframe=\"Papers.sframe\")\n def papers(self):\n \"\"\"\n Create the Papers SFrame object from.txt.gz files which contains information on each paper\n \"\"\"\n cols = [\"PaperId\", \"Rank\", \"Doi\", \"DocType\", \"PaperTitle\", \"OriginalTitle\", \"BookTitle\", \"Year\", \"Date\",\n \"Publisher\", \"JournalId\", \"ConferenceSeriesId\", \"ConferenceInstanceId\", \"Volume\", \"Issue\", \"FirstPage\",\n \"LastPage\", \"ReferenceCount\", \"CitationCount\", \"EstimatedCitation\", \"OriginalVenue\", \n \"CreatedDate\"]\n papers = SFrame.read_csv(str(self._dataset_dir / \"Papers.txt.gz\"),header=False, sep=\"\\t\")\n papers = papers.rename(dict(zip([f\"X{i+1}\" for i in range(len(cols))], cols)))\n papers[\"Year\"] = papers[\"Year\"].astype(int)\n return papers\n \n\n @property\n @save_sframe(sframe=\"Journals.sframe\")\n def journals(self):\n \"\"\"\n Create the Papers SFrame object from.txt.gz files which contains information on each paper\n \"\"\"\n cols = [\"JournalId\", \"Rank\", \"NormalizedName\", \"DisplayName\", \"Issn\", \"Publisher\", \"Webpage\", \"PaperCount\", \"CitationCount\",\n \"CreatedDate\"]\n journals = SFrame(pd.read_csv(self._dataset_dir /\"Journals.txt.gz\", sep=\"\\t\",\n names=cols).replace({pd.NA: None}))\n return journals\n \n \n @property\n @save_sframe(sframe=\"Authors.sframe\")\n def authors(self):\n \"\"\"\n Creates authors names SFrames from.txt.gz files\n \"\"\"\n authors = SFrame(pd.read_csv(self._dataset_dir /\"Authors.txt.gz\", sep=\"\\t\",\n names=[\"AuthorId\", \"Rank\", \"NormalizedName\", \"DisplayName\",\n \"LastKnownAffiliationId\", \"PaperCount\",\n \"CitationCount\", \"CreatedDate\"]).replace({pd.NA: None}))\n authors['First name'] = authors['NormalizedName'].apply(lambda s: s.split()[0])\n authors['Last name'] = authors['NormalizedName'].apply(lambda s: s.split()[-1])\n return authors\n \n \n @property\n def author_names(self):\n \"\"\"\n Creates authors names SFrames from.txt.gz files\n \"\"\"\n return self.authors[[\"AuthorId\", \"NormalizedName\"]]\n\n @property\n @save_sframe(sframe=\"PaperReferences.sframe\")\n def references(self):\n \"\"\"Creating the references SFrame from.txt.gz files\"\"\"\n references = SFrame.read_csv(str(self._dataset_dir / \"PaperReferences.txt.gz\"), header=False, delimiter=\"\\t\")\n references = references.rename({\"X1\": \"PaperId\", \"X2\": \"PaperReferenceId\"})\n return references\n\n @property\n @save_sframe(sframe=\"PaperReferencesCount.sframe\")\n def reference_count(self):\n return self.references.groupby(\"PaperId\", {\"Ref Number\": agg.COUNT()})\n\n @property\n @save_sframe(sframe=\"PaperFieldsOfStudy.sframe\")\n def paper_fields_of_study(self):\n \"\"\"\n Creating Keywords SFrame from.txt.gz files\n \"\"\"\n cols = [\"PaperId\", \"FieldOfStudyId\", \"Score\"]\n papaers_field = SFrame.read_csv(\"~/mag/PaperFieldsOfStudy.txt.gz\",header=False, sep=\"\\t\")\n return papaers_field.rename(dict(zip([f\"X{i+1}\" for i in range(len(cols))], cols)))\n\n# return keywords.rename({\"X1\": \"PaperId\", \"X2\": \"Keyword name\", \"X3\": \"Field of study ID mapped to keyword\"})\n\n # @property\n # @save_sframe(sframe=\"PaperKeywordsList.sframe\")\n # def paper_keywords_list(self):\n # \"\"\"\n # Creating Paper Keywords List SFrame\n # \"\"\"\n # return self.paper_pields_of_study.groupby(\"PaperId\", {\"Field List\": agg.CONCAT(\"Keyword name\")})\n\n @property\n @save_sframe(sframe=\"FieldsOfStudy.sframe\")\n def fields_of_study(self):\n \"\"\"\n Creating Field of study SFrame from.txt.gz files\n \"\"\"\n cols = [\"FieldOfStudyId\", \"Rank\", \"NormalizedName\", \"DisplayName\", \"MainType\", \"Level\", \"PaperCount\", \"CitationCount\", \"CreatedDate\"]\n fields_of_study = SFrame(pd.read_csv(self._dataset_dir / \"FieldsOfStudy.txt.gz\", sep=\"\\t\",\n names=cols).replace({pd.NA: None}))\n return fields_of_study\n \n @property\n @save_sframe(sframe=\"PaperResources.sframe\")\n def paper_resources(self):\n \"\"\"\n Creating Field of study SFrame from.txt.gz files\n ResourceType. 1 = Project, 2 = Data, 4 = Code\n \"\"\"\n cols = [\"PaperId\", \"ResourceType\", \"ResourceUrl\", \"SourceUrl\", \"RelationshipType\"]\n return SFrame(pd.read_csv(self._dataset_dir / \"PaperResources.txt.gz\", sep=\"\\t\",\n names=cols).replace({pd.NA: None}))\n\n\n @property\n @save_sframe(sframe=\"PaperAuthorAffiliations.sframe\")\n def paper_author_affiliations(self):\n \"\"\"\n Creating authors affiliation SFrame from.txt.gz files\n :return:\n \"\"\"\n cols = [\"PaperId\", \"AuthorId\", \"AffiliationId\", \"AuthorSequenceNumber\", \"OriginalAuthor\", \"OriginalAffiliation\"]\n paper_author_affiliations = SFrame(pd.read_csv(self._dataset_dir / \"PaperAuthorAffiliations.txt.gz\", sep=\"\\t\",\n names=cols).replace({pd.NA: None}))\n\n return paper_author_affiliations\n \n @property\n @save_sframe(sframe=\"Affiliations.sframe\")\n def affiliations(self):\n \"\"\"\n Creating authors affiliation SFrame from.txt.gz files\n :return:\n \"\"\"\n cols = [\"AffiliationId\", \"Rank\", \"NormalizedName\", \"DisplayName\", \"GridId\", \"OfficialPage\", \"WikiPage\", \"PaperCount\", \"CitationCount\", \"CreatedDate\"]\n affiliations = SFrame(pd.read_csv(self._dataset_dir / \"Affiliations.txt.gz\", sep=\"\\t\",\n names=cols).replace({pd.NA: None}))\n\n return affiliations\n\n\n def add_geo_data_to_affiliations(self, max_workers=2):\n \"\"\"\n Creating authors affiliation SFrame from.txt.gz files\n :return:\n \"\"\"\n fields = [\"AffiliationId\", \"Rank\", \"NormalizedName\", \"DisplayName\", \"GridId\", \"OfficialPage\", \"WikiPage\", \"PaperCount\", \"CitationCount\", \"CreatedDate\"]\n wl = WikiLocationFetcher(self.affiliations[fields], max_workers)\n wl.add_location_data()\n wl.aff.save(f\"{self._sframe_dir}/Affiliations.sframe\")\n \n \n @property\n @save_sframe(sframe=\"PapersOrderedAuthorsList.sframe\")\n def papers_authors_lists(self):\n \"\"\"\n Create SFrame in which each row contains PaperId and a sorted list of the paper's authors\n \"\"\"\n\n authors_sf = self.paper_author_affiliations[\"PaperId\", \"AuthorId\", \"AuthorSequenceNumber\"]\n authors_sf['Author_Seq'] = authors_sf.apply(lambda r: [r[\"AuthorId\"], r[\"AuthorSequenceNumber\"]])\n g = authors_sf.groupby(\"PaperId\", {\"Authors List\": agg.CONCAT('Author_Seq')})\n g['Authors List Sorted'] = g[\"Authors List\"].apply(lambda l: sorted(l, key=lambda i: i[1]))\n g['Authors List Sorted'] = g['Authors List Sorted'].apply(lambda l: [i[0] for i in l])\n g = g.remove_column(\"Authors List\")\n g = g[\"PaperId\", 'Authors List Sorted']\n g['Authors Number'] = g['Authors List Sorted'].apply(lambda l: len(l))\n return g\n\n @property\n @save_sframe(sframe=\"FieldOfStudyChildren.sframe\")\n def field_of_study_children(self):\n \"\"\"\n Creates field of study hierarchy sframe from.txt.gz files\n \"\"\"\n h_sf = SFrame.read_csv(str(self._dataset_dir / \"FieldOfStudyChildren.txt.gz\"), header=False, delimiter=\"\\t\")\n return h_sf.rename({\"X1\": \"FieldOfStudyId\", \"X2\": \"ChildFieldOfStudyId\"})\n\n def _get_tmp_papers_sframe_path(self, min_ref_num, start_year, end_year):\n \"\"\"\n Get Papers SFrame path according to years and references number filters\n :param min_ref_num: paper's minimal references number\n :param start_year: start year\n :param end_year: end year\n :return: a path to the Papers SFrame which contains papers with the above filter\n :rtype: PosixPath\n \"\"\"\n return self._dataset_dir / f\"papers_sframe_minref_{min_ref_num}_{start_year}_{end_year}\"\n\n def get_papers_sframe(self, min_ref_num=None, start_year=None, end_year=None):\n \"\"\"\n Return SFrame with Papers data according to the input filter variables\n :param min_ref_num: paper's minimal references number\n :param start_year: start year (only include paper that were published after start year)\n :param end_year: end year (only include paper that were published before end year)\n :return: SFrame with paper data\n :rtype: SFrame\n :note: after the SFrame is created it is saved to the TMP_DIR to future use\n \"\"\"\n sf = self.references\n tmp_papers_sf_path = self._get_tmp_papers_sframe_path(min_ref_num, start_year, end_year)\n if tmp_papers_sf_path.is_dir():\n return load_sframe(str(tmp_papers_sf_path))\n\n if min_ref_num is not None:\n sf = sf.groupby('PaperId', {'Ref Count': agg.COUNT()}) # There are 30058322 in the list\n sf = sf[sf['Ref Count'] >= min_ref_num] # left with 22,083,058\n sf.__materialize__()\n p_sf = self.papers\n sf = p_sf.join(sf)\n if start_year is not None:\n sf = sf[sf['Year'] >= start_year]\n if end_year is not None:\n sf = sf[sf['Year'] <= end_year]\n sf.__materialize__()\n\n if not tmp_papers_sf_path.is_dir():\n sf.save(str(tmp_papers_sf_path))\n\n return sf\n \n @property\n @save_sframe(sframe=\"PaperFieldsOfStudy.sframe\")\n def papers_fields_of_study(self):\n \"\"\"Creating the references SFrame from.txt.gz files\"\"\"\n fos = SFrame.read_csv(str(self._dataset_dir / \"PapersFieldsOfStudy.txt.gz\"), header=False, delimiter=\"\\t\")\n return references.rename({\"X1\": \"PaperId\", \"X2\": \"FieldOfStudyId\", \"X3\": \"Score\"})\n\n \n @save_sframe(sframe=\"PapersFieldsOfStudyLevel.sframe\")\n def papers_fields_of_study_level(self, flevels=(0, 1, 2, 3)):\n \"\"\"\n Create SFrame with each paper fields of study by hierarchical levels\n :param flevels: list of levels, for each level add the papers fields of study in this level\n \"\"\"\n k_sf = self.paper_fields_of_study\n# FieldOfStudyId\n g = k_sf.groupby('PaperId', {'Field of study list': agg.CONCAT(\"FieldOfStudyId\")})\n fh = FieldsHierarchyAnalyzer(self)\n \n # add fields of study names from ID\n names = []\n for l in tqdm(g['Field of study list']):\n names.append([fh.get_field_name(i) for i in l])\n g['Field of study list names'] = names\n \n for flevel in flevels:\n parent_list = []\n for paper_field_of_study_list in tqdm(g['Field of study list']):\n parent_list.append(\n list(set.union(\n *[fh.get_parents_field_of_study(field, flevel) for field in paper_field_of_study_list])))\n g[f'Fields of study parent list (L{flevel})'] = parent_list\n \n names = []\n for paper_field_of_study_parents_list in g[f'Fields of study parent list (L{flevel})']:\n names.append(\n [fh.get_field_name(field_of_study) for field_of_study in paper_field_of_study_parents_list])\n g[f'Fields of study parent list names (L{flevel})'] = names\n return g\n\n @property\n @save_sframe(sframe=\"ExtendedPaperReferences.sframe\")\n def extended_references(self):\n \"\"\"\n Create SFrame with references data with additional column that state if the reference is self-citation\n \"\"\"\n\n ref_sf = self.references\n p_sf = self.papers_authors_lists\n ref_sf = ref_sf.join(p_sf, on='PaperId', how=\"left\")\n ref_sf = ref_sf.join(p_sf, on={'PaperReferenceId': 'PaperId'}, how=\"left\")\n ref_sf = ref_sf.fillna('Authors List Sorted.1', array('d'))\n ref_sf = ref_sf.fillna('Authors List Sorted', array('d'))\n ref_sf.__materialize__()\n ref_sf['self citation'] = ref_sf.apply(\n lambda r: len(set(r['Authors List Sorted.1']) & set(r['Authors List Sorted'])))\n ref_sf.__materialize__()\n return ref_sf.remove_columns(['Authors List Sorted.1', 'Authors List Sorted'])\n\n def _get_total_citation_by_year(self, year_citation, max_year=2015):\n \"\"\"\n Calculate the total citation by year\n :param year_citation: list of (year, citation) tuple\n :param max_year: the maximal year\n :return: dict with the total number of citation in each year\n \"\"\"\n min_year = int(min([y for y, c in year_citation]))\n total_citations_dict = {}\n for i in range(min_year, int(max_year + 1)):\n total_citations_dict[str(i)] = sum([v for y, v in year_citation if y <= i])\n return total_citations_dict\n\n def _papers_citations_number_by_year(self, without_self_citation=True):\n \"\"\"\n Get papers total number of citation in each year\n :param without_self_citation: if True calculate only non-self citations, other calculate with self-citations\n :return: SFrame with a column that contains citations_dict by year\n \"\"\"\n ref_sf = self.extended_references\n if without_self_citation:\n ref_sf = ref_sf[ref_sf['self citation'] == 0]\n\n sf = self.papers[\"PaperId\", \"Year\"]\n sf = ref_sf.join(sf, on=\"PaperId\")\n g = sf.groupby([\"PaperReferenceId\", \"Year\"], {\"Citation Number\": agg.COUNT()})\n g = g.rename({\"Year\": \"Year\", \"PaperReferenceId\": \"PaperId\"})\n g['Citation by Year'] = g.apply(lambda r: (r[\"Year\"], r[\"Citation Number\"]))\n h = g.groupby('PaperId', {'Citation by Years': agg.CONCAT('Citation by Year')})\n if without_self_citation:\n h['Total Citations by Year without Self Citations'] = h['Citation by Years'].apply(\n lambda l: self._get_total_citation_by_year(l))\n else:\n h['Total Citations by Year'] = h['Citation by Years'].apply(lambda l: self._get_total_citation_by_year(l))\n return h.remove_column(\"Citation by Years\")\n\n @property\n @save_sframe(sframe=\"PapersCitationByYear.sframe\")\n def papers_citation_number_by_year(self):\n \"\"\"\n Create SFrame with each paper's citation numbers by year dict\n (one dict with self-citations and the other without)\n \"\"\"\n\n r_sf = self._papers_citations_number_by_year(False)\n r_sf2 = self._papers_citations_number_by_year(True)\n return r_sf.join(r_sf2, on=\"PaperId\")\n\n @property\n @save_sframe(sframe=\"PaperUrls.sframe\")\n def urls(self):\n\n \"\"\"\n Creating URLs SFrame from.txt.gz files\n \"\"\"\n cols = [\"PaperId\", \"SourceType\", \"SourceUrl\", \"LanguageCode\"]\n urls = SFrame(pd.read_csv(self._dataset_dir / \"PaperUrls.txt.gz\", sep=\"\\t\",\n names=cols).replace({pd.NA: None}))\n return urls.groupby(\"PaperId\", {\"Urls\": agg.CONCAT(\"SourceUrl\")})\n\n @property\n @save_sframe(sframe=\"ExtendedPapers.sframe\")\n def extended_papers(self):\n \"\"\"\n Created extended papers SFrame which contains various papers features, such as\n paper citation numbers, authors list, urls, etc.\n :return:\n \"\"\"\n sf = self.papers\n sframe_list = (self.reference_count, self.papers_citation_number_by_year, self.papers_authors_lists,\n self.urls, self.papers_fields_of_study_level())\n # self.paper_keywords_list, self.papers_fields_of_study()\n for t in tqdm(sframe_list):\n sf = sf.join(t, how=\"left\", on=\"PaperId\")\n return sf.fillna(\"Ref Number\", 0)\n\n def _create_field_of_study_paper_ids(self, level):\n \"\"\"\n Create SFrame in which each row contains a field of study and it's matching list of PaperIds\n :param level: field of study level\n :return: SFrame with the fields of study in the input level papers ids\n :rtype: SFrame\n \"\"\"\n\n col = 'Fields of study parent list (L%s)' % level\n sf = self.extended_papers\n new_col_name = \"Field ID\"\n sf = sf[sf[col] != None]\n sf = sf.stack(col, new_column_name=new_col_name)\n g = sf.groupby(new_col_name, {'PaperIds': agg.CONCAT(\"PaperId\")})\n g[new_col_name] = g[new_col_name].astype(int)\n f_sf = self.fields_of_study\n g = g.join(f_sf, on={new_col_name: \"FieldOfStudyId\"})\n g['Number of Paper'] = g['PaperIds'].apply(lambda l: len(l))\n g['Level'] = level\n return g.rename({new_col_name: \"Field of study ID\"})\n\n @save_sframe(sframe=\"FieldsOfStudyPapersIds.sframe\")\n def fields_of_study_papers_ids(self, levels=(1, 2, 3)):\n \"\"\"\n Creates SFrames with each Fields of study PaperIds\n :param levels: list of fields of study level\n\n \"\"\"\n\n sf = SFrame()\n for level in tqdm(levels):\n sf = sf.append(self._create_field_of_study_paper_ids(level))\n return sf\n", "id": "4793065", "language": "Python", "matching_score": 4.475365161895752, "max_stars_count": 1, "path": "ScienceDynamics/datasets/microsoft_academic_graph.py" }, { "content": "import os\n\nfrom ScienceDynamics.config.configs import VenueType, JOURNALS_PAPERS_SFRAMES_DIR, CONFERENCES_PAPERS_SFRAMES_DIR, \\\n JOURNAL_AUTHORS_ACADEMIC_BIRTHYEAR_PKL, CONFERENCE_AUTHORS_ACADEMIC_BIRTHYEAR_PKL, FIELDS_OF_STUDY_SFRAME, \\\n FIELDS_OF_STUDY_HIERARCHY_SFRAME, PAPERS_SFRAME, EXTENDED_PAPERS_SFRAME, PAPER_AUTHOR_AFFILIATIONS_SFRAME, \\\n PAPER_KEYWORDS_SFRAME, PAPER_REFERENCES_SFRAME, CO_AUTHORSHIP_LINK_SFRAME, L3_FIELD_PAPERS_LIST_SFRAME, \\\n JOURNALS_SFRAME, CONFERENCES_SAME\nfrom ScienceDynamics.config.log_config import logger\nfrom ScienceDynamics.utils import filter_sframe_by_func\nimport itertools\nfrom collections import Counter\nimport pickle\nimport numpy as np\nimport json\nfrom datetime import datetime\nimport turicreate as tc\nimport turicreate.aggregate as agg\n\n\nclass VenueAnalyzer(object):\n def __init__(self, venue_id, venue_type=VenueType.journal, academic_birthday_dict=None):\n \"\"\"\n Construct a VenueAnalyzer object\n :param venue_id: the venue ID as appear in Microsoft Academic Graph dataset\n :param venue_type: the venue type of VenueType\n @type venue_type: VenueType\n :param academic_birthday_dict: dict with all the authors academic birthdays (the year in which they publish their\n first paper)\n \"\"\"\n\n self._venue_id = venue_id\n self._venue_type = venue_type\n self._name = self._get_venue_name()\n self._all_papers_sf = self._get_all_papers_sframe()\n self._academic_birthyear_dict = academic_birthday_dict\n self._l3_papers_dict = None\n self._co_authors_links = None\n\n def _get_venue_name(self):\n \"\"\"\n Return the venue full name\n :return: string with the venue full name\n :rtype: str\n \"\"\"\n if self.venue_type == VenueType.journal:\n sf = self.journals_sframe\n sf = sf[sf[\"Journal ID\"] == self.venue_id]\n if len(sf) != 0:\n return sf[0][\"Journal name\"]\n elif self.venue_type == VenueType.conference:\n sf = self.confrences_sframe\n sf = sf[sf[\"Conference ID\"] == self.venue_id]\n if len(sf) != 0:\n return sf[0][\"Full Name\"]\n return None\n\n def _get_all_papers_sframe(self):\n \"\"\"\n Return SFrame with all the papers published in the venue\n :return: Papers SFrame with all the papers details that were published in the venue\n :rtype tc.SFrame\n @note: The SFrame object was created by academic_parser.create_venue_papers_sframe\n \"\"\"\n if self.venue_type == VenueType.journal:\n return tc.load_sframe(\"%s/%s.sframe\" % (JOURNALS_PAPERS_SFRAMES_DIR, self._venue_id))\n elif self.venue_type == VenueType.conference:\n return tc.load_sframe(\"%s/%s.sframe\" % (CONFERENCES_PAPERS_SFRAMES_DIR, self._venue_id))\n\n def _get_papers_sframe(self, filter_func=None):\n \"\"\"\n Filter all the papers according to the filter function and return filtered Papers SFrame object\n :param filter_func: filter function\n :return: Filtered Papers SFrame object\n :rtype: tc.SFrame\n \"\"\"\n if filter_func is None:\n return self._all_papers_sf\n return filter_sframe_by_func(self._all_papers_sf, filter_func)\n\n def get_papers_sframe_by_year(self, start_year, end_year):\n \"\"\"\n Get venue papers between input years\n :param start_year: start year\n :param end_year: end year\n :return: SFrame with all the papers between the input years\n :rtype: tc.SFrame\n @note: the start_year/end_year can be equal None\n \"\"\"\n f = None\n if start_year is None and end_year is None:\n return self._all_papers_sf\n elif start_year is not None and end_year is not None:\n f = lambda r: end_year >= r['Paper publish year'] >= start_year\n elif end_year is None:\n f = lambda r: r['Paper publish year'] >= start_year\n elif start_year is None:\n f = lambda r: end_year >= r['Paper publish year']\n\n return self._get_papers_sframe(f)\n\n def get_papers_ids_set(self, start_year=None, end_year=None):\n \"\"\" Returns all the venue papers ids between years\n :param start_year: start year\n :param end_year: end year\n :return: set with all papers ids between the input years\n :rtype: Set()\n \"\"\"\n sf = self.get_papers_sframe_by_year(start_year, end_year)\n return set(sf[\"Paper ID\"])\n\n def get_venue_authors_sframe(self, start_year=None, end_year=None):\n \"\"\"\n Returns a SFrame object with all the venue's authors details who publish papers in the venue between the\n input years\n :param start_year: start year\n :param end_year: end year\n :return: SFrame with the authors details\n :rtype: tc.SFrame\n \"\"\"\n sf = self.paper_author_affiliations_sframe\n p_ids = self.get_papers_ids_set(start_year, end_year)\n return sf[sf['Paper ID'].apply(lambda pid: pid in p_ids)]\n\n def get_venue_authors_ids_list(self, start_year=None, end_year=None, authors_seq_list=None):\n \"\"\"\n Returns a list with all the author ids that publish in the venue between the input years.\n :param start_year: start year\n :param end_year: end year\n :param authors_seq_list: author sequence in the paper's authors list. For example, authors_seq_list=[0] will return\n only the authors ids who where the first authors, while authors_seq_list=[-1] will return only the authors ids\n who were the last authors.\n :return: list of authors ids\n :rtype: list\n @note: an author id can appear multiple times, each time for each paper the author publish in the venue\n \"\"\"\n sf = self.get_papers_sframe_by_year(start_year, end_year)\n if authors_seq_list is None:\n selected_authors_list = sf[\"Authors List Sorted\"]\n else:\n selected_authors_list = sf[\"Authors List Sorted\"].apply(lambda l: [l[i] for i in authors_seq_list])\n return list(itertools.chain.from_iterable(selected_authors_list))\n\n def get_venue_authors_affiliations_list(self, start_year=None, end_year=None):\n \"\"\"\n Returns a list of venue authors' affiliations\n :param start_year: start year\n :param end_year: end year\n :return: a list of venue authors' affiliations with repeats\n :rtype: list\n \"\"\"\n sf = self.get_venue_authors_sframe(start_year, end_year)\n return list(sf[\"Normalized affiliation name\"])\n\n def get_authors_papers_count(self):\n \"\"\"\n Calculate the number of papers each author published in the venue\n :return: Counter object in which keys are the venue's author ids and the values are the number of times\n each author published a paper in the venue\n :rtype: Counter\n \"\"\"\n l = self.get_venue_authors_ids_list()\n return Counter(l)\n\n def get_percentage_of_new_authors(self, year):\n \"\"\"\n Get the perecentage of new authors that publish paper's in the venue in specific year\n :param year: year\n :return: returns percentage of new authors, and the total number of authors in a specific year as a tuple\n :rtyoe: dict with the new authrors percentage and total authors value\n \"\"\"\n s1 = set(self.get_venue_authors_ids_list(start_year=self.min_year, end_year=year - 1))\n s2 = set(self.get_venue_authors_ids_list(start_year=year, end_year=year))\n if len(s2) == 0:\n return None\n return {'new_authors_percentage': len(s2 - s1) / float(len(s2)), 'total_authors': len(s2)}\n\n def get_percentage_of_new_authors_over_time(self):\n \"\"\"\n Get the percentage of new authors by year\n :return: a dict with the percentage of new authors in each year, and the total number of authors in each year\n :rtyoe: dict\n \"\"\"\n return {i: self.get_percentage_of_new_authors(i) for i in range(self.min_year + 1, self.max_year + 1)}\n\n def get_percentage_papers_by_new_authors(self, year, authors_seq_list):\n \"\"\"\n Get the percentage of new authors, in specific sequence, in specific year\n :param year: year\n :param authors_seq_list: authors sequence list\n :return: the percentage of new authors in specific sequence, and the number of new papers in the year\n :rtyoe: dict with the percentage of new authors and the total number of new papers\n \"\"\"\n all_previous_authors_set = set(self.get_venue_authors_ids_list(start_year=self.min_year, end_year=year - 1))\n year_papers_sf = self.get_papers_sframe_by_year(year, year)\n if len(year_papers_sf) == 0:\n return None\n if authors_seq_list is None:\n year_papers_sf[\"Selected Authors\"] = year_papers_sf[\"Authors List Sorted\"]\n else:\n year_papers_sf[\"Selected Authors\"] = year_papers_sf[\"Authors List Sorted\"].apply(\n lambda l: [l[i] for i in authors_seq_list])\n\n year_papers_sf['New Paper'] = year_papers_sf[\"Selected Authors\"].apply(\n lambda l: 1 if len(set(l) & all_previous_authors_set) == 0 else 0)\n\n return {\"new_authors_papers_percentage\": year_papers_sf['New Paper'].sum() / float(len(year_papers_sf)),\n \"total_papers\": len(year_papers_sf)}\n\n def get_yearly_percentage_of_papers_with_new_authors_dict(self, authors_seq_list):\n return {i: self.get_percentage_papers_by_new_authors(i, authors_seq_list) for i in\n range(self.min_year + 1, self.max_year + 1)}\n\n def get_venue_median_number_of_authors_by_year(self):\n sf = self._all_papers_sf.groupby(\"Paper publish year\", {'Authors Number List': agg.CONCAT(\"Authors Number\")})\n return {r[\"Paper publish year\"]: np.median(r['Authors Number List']) for r in sf}\n\n def get_number_of_papers_by_year(self):\n sf = self._all_papers_sf.groupby(\"Paper publish year\", {\"Count\": agg.COUNT()})\n return {r[\"Paper publish year\"]: r[\"Count\"] for r in sf}\n\n def average_and_median_academic_age(self, year, authors_seq_list):\n \"\"\"\n Calculate venue authors average/median academic ages, i.e. number of years since the year the published their first paper,\n in specific year\n :param year: year\n :param authors_seq_list: authors sequence\n :return: dict with the average and median academic age values\n \"\"\"\n authors_list = self.get_venue_authors_ids_list(year, year, authors_seq_list)\n # remove papers with too many authors\n academic_birthyears_list = [year - self.authors_academic_birthyear_dict[a] for a in authors_list if\n a in self.authors_academic_birthyear_dict]\n if len(academic_birthyears_list) == 0:\n return {'average': 0, 'median': 0}\n return {'average': np.average(academic_birthyears_list), 'median': np.median(academic_birthyears_list)}\n\n def get_average_and_median_academic_age_dict(self, authors_seq_list=None):\n return {i: self.average_and_median_academic_age(i, authors_seq_list) for i in\n range(self.min_year + 1, self.max_year + 1)}\n\n def get_authors_publications_number_in_year_range(self, authors_set, start_year, end_year, authors_seq=None):\n d = {}\n\n for y in range(start_year, end_year + 1):\n p_sf = self.get_papers_sframe_by_year(y, y)\n if authors_seq is not None:\n p_sf['Selected Authors'] = p_sf[\"Authors List Sorted\"].apply(lambda l: [l[i] for i in authors_seq])\n else:\n p_sf['Selected Authors'] = p_sf[\"Authors List Sorted\"]\n p_sf['published'] = p_sf['Selected Authors'].apply(lambda l: 1 if len(set(l) & authors_set) > 0 else 0)\n d[y] = p_sf['published'].sum()\n return d\n\n def get_venue_stats(self):\n features_dict = {\"name\": self.name, \"id\": self.venue_id,\n \"min_year\": self._all_papers_sf['Paper publish year'].min(),\n \"max_year\": self._all_papers_sf['Paper publish year'].max(),\n \"total_authors_number\": len(set(self.get_venue_authors_ids_list())),\n \"total_papers_number\": len(self.get_papers_ids_set()),\n \"percentage_of_papers_with_all_new_authors\": self.get_yearly_percentage_of_papers_with_new_authors_dict(\n None),\n \"percentage_of_papers_new_first_authors\": self.get_yearly_percentage_of_papers_with_new_authors_dict(\n [0]),\n \"percentage_of_papers_new_last_authors\": self.get_yearly_percentage_of_papers_with_new_authors_dict(\n [-1]),\n \"percentage_of_papers_new_first_and_last_authors\": self.get_yearly_percentage_of_papers_with_new_authors_dict(\n [0, -1]),\n \"avg_median_academic_age_first_authors\": self.get_average_and_median_academic_age_dict([0]),\n \"avg_median_academic_age_last_authors\": self.get_average_and_median_academic_age_dict([-1]),\n \"avg_median_academic_age_all_authors\": self.get_average_and_median_academic_age_dict(None),\n \"median_number_of_authors_by_year\": self.get_venue_median_number_of_authors_by_year()}\n return features_dict\n\n def update_venue_stats(self):\n j = json.load(open(f\"/data/json/journals/{self.name} ({self.venue_id}).json\", \"r\"))\n logger.info(f\"update features of {self.name}\")\n j[\"median_number_of_authors_by_year\"] = self.get_venue_median_number_of_authors_by_year()\n j[\"number_of_papers_in_a_year\"] = self.get_number_of_papers_by_year()\n json.dump(j, open(f\"/data/json/journals/{self.name} ({self.venue_id}).json\", \"w\"))\n\n @property\n def name(self):\n return self._name\n\n @property\n def venue_id(self):\n return self._venue_id\n\n @property\n def venue_type(self):\n return self._venue_type\n\n @property\n def min_year(self):\n return self._all_papers_sf['Paper publish year'].min()\n\n @property\n def max_year(self):\n return self._all_papers_sf['Paper publish year'].max()\n\n @property\n def papers_with_new_first_authors_dict(self):\n return self.get_yearly_percentage_of_papers_with_new_authors_dict([0])\n\n @property\n def papers_with_new_last_authors_dict(self):\n return self.get_yearly_percentage_of_papers_with_new_authors_dict([-1])\n\n @property\n def authors_academic_birthyear_dict(self):\n if self._academic_birthyear_dict is not None:\n return self._academic_birthyear_dict\n\n if self.venue_type == VenueType.journal:\n p = JOURNAL_AUTHORS_ACADEMIC_BIRTHYEAR_PKL\n else:\n p = CONFERENCE_AUTHORS_ACADEMIC_BIRTHYEAR_PKL\n d = pickle.load(open(p, \"rb\"))\n self._academic_birthyear_dict = d\n\n return self._academic_birthyear_dict\n\n def get_venue_authors_timeseries(self):\n\n p = self._all_papers_sf[\"Paper ID\", \"Paper publish year\"]\n a = self.authors_affilations_sframe[\"Paper ID\", \"Author ID\"]\n sf = p.join(a, on=\"Paper ID\")[\"Author ID\", \"Paper publish year\"]\n sf = sf.groupby(\"Author ID\", {\"mindate\": agg.MIN(\"Paper publish year\"),\n \"maxdate\": agg.MAX(\"Paper publish year\")})\n sf.rename({\"Author ID\": \"v_id\"})\n sf[\"mindate\"] = sf[\"mindate\"].apply(lambda y: datetime(year=y, month=1, day=1))\n sf[\"maxdate\"] = sf[\"maxdate\"].apply(lambda y: datetime(year=y, month=1, day=1))\n\n if sf.num_rows() == 0:\n return None\n\n return tc.TimeSeries(sf, index=\"mindate\")\n\n def get_venue_authors_links_timeseries(self):\n a = self.authors_affilations_sframe[\"Paper ID\", \"Author ID\"]\n\n a = self._all_papers_sf.join(a, on=\"Paper ID\")\n a = a['datetime', 'Author ID', 'Paper publish year', 'Paper ID']\n links_sf = a.join(a, on=\"Paper ID\")\n p = self.papers_sframe[\"Paper ID\", \"Paper publish year\"]\n\n links_sf.rename({'Author ID': 'src_id', 'Author ID.1': 'dst_id'})\n links_sf = links_sf[\"src_id\", \"dst_id\", \"datetime\"]\n links_sf = links_sf[links_sf[\"src_id\"] != links_sf[\n \"dst_id\"]] # because this is a direct network we keep for each link both (u,v) and (v,u)\n return tc.TimeSeries(links_sf, index=\"datetime\")\n\n def create_timeseries(self, outpath):\n v_ts = self.get_venue_authors_timeseries()\n v_ts.save(f\"{outpath}/{self._venue_id}.vertices.timeseries\")\n i_ts = self.get_venue_authors_links_timeseries()\n i_ts.save(f\"{outpath}/{self._venue_id}.interactions.timeseries\")\n\n # -------------------------#\n # SFrame Properties #\n # -------------------------#\n @property\n def fields_of_study_sframe(self):\n if os.path.isdir(FIELDS_OF_STUDY_SFRAME):\n return tc.load_sframe(FIELDS_OF_STUDY_SFRAME)\n return tc.load_sframe(FIELDS_OF_STUDY_S3_SFRAME)\n\n @property\n def fields_of_study_hierarchy_sframe(self):\n if os.path.isdir(FIELDS_OF_STUDY_HIERARCHY_SFRAME):\n return tc.load_sframe(FIELDS_OF_STUDY_HIERARCHY_SFRAME)\n return tc.load_sframe(FIELDS_OF_STUDY_HIERARCHY_S3_SFRAME)\n\n @property\n def papers_sframe(self):\n if os.path.isdir(PAPERS_SFRAME):\n return tc.load_sframe(PAPERS_SFRAME)\n return tc.load_sframe(PAPERS_S3_SFRAME)\n\n @property\n def extended_papers_sframe(self):\n if os.path.isdir(EXTENDED_PAPERS_SFRAME):\n return tc.load_sframe(EXTENDED_PAPERS_SFRAME)\n return tc.load_sframe(EXTENDED_PAPERS_S3_SFRAME)\n\n @property\n def paper_author_affiliations_sframe(self):\n if os.path.isdir(PAPER_AUTHOR_AFFILIATIONS_SFRAME):\n return tc.load_sframe(PAPER_AUTHOR_AFFILIATIONS_SFRAME)\n return tc.load_sframe(PAPER_AUTHOR_AFFILIATIONS_S3_SFRAME)\n\n @property\n def paper_keywords_sframe(self):\n if os.path.isdir(PAPER_KEYWORDS_SFRAME):\n return tc.load_sframe(PAPER_KEYWORDS_SFRAME)\n return tc.load_sframe(PAPER_KEYWORDS_S3_SFRAME)\n\n @property\n def paper_references_sframe(self):\n if os.path.isdir(PAPER_REFERENCES_SFRAME):\n return tc.load_sframe(PAPER_REFERENCES_SFRAME)\n return tc.load_sframe(PAPER_REFERENCES_S3_SFRAME)\n\n @property\n def authors_affilations_sframe(self):\n if os.path.isdir(PAPER_AUTHOR_AFFILIATIONS_SFRAME):\n return tc.load_sframe(PAPER_AUTHOR_AFFILIATIONS_SFRAME)\n return tc.load_sframe(PAPER_AUTHOR_AFFILIATIONS_S3_SFRAME)\n\n @property\n def coauthors_links_sframe(self):\n if self._co_authors_links is not None:\n return self._co_authors_links\n if os.path.isdir(CO_AUTHORSHIP_LINK_SFRAME):\n self._co_authors_links = tc.load_sframe(CO_AUTHORSHIP_LINK_SFRAME)\n else:\n self._co_authors_links = tc.load_sframe(CO_AUTHORSHIP_LINK_S3_SFRAME)\n return self._co_authors_links\n\n @property\n def l3_field_papers_sframe(self):\n if os.path.isdir(L3_FIELD_PAPERS_LIST_SFRAME):\n return tc.load_sframe(L3_FIELD_PAPERS_LIST_SFRAME)\n return tc.load_sframe(L3_FIELD_PAPERS_LIST_S3_SFRAME)\n\n @property\n def journals_sframe(self):\n if os.path.isdir(JOURNALS_SFRAME):\n return tc.load_sframe(JOURNALS_SFRAME)\n return tc.load_sframe(JOURNALS_S3_SFRAME)\n\n @property\n def confrences_sframe(self):\n if os.path.isdir(CONFERENCES_SAME):\n return tc.load_sframe(CONFERENCES_SAME)\n return tc.load_sframe(CONFRENCES_S3_SFRAME)\n\n @property\n def l3_papers_dict(self):\n if self._l3_papers_dict is None:\n sf = self.l3_field_papers_sframe\n self._l3_papers_dict = {r[\"Field of study ID\"]: r[\"Papers List\"] for r in sf}\n return self._l3_papers_dict\n\n\ndef update_journals_features():\n for i in os.listdir(\"/data/sframes/journals\"):\n try:\n logger.info(f\"Updating {i}\")\n jid = i.split(\".sframe\")[0]\n va = VenueAnalyzer(jid, VenueType.journal, None)\n va.update_venue_stats()\n except Exception as e:\n print(e.message)\n logger.error(e.message)\n\n\ndef get_all_journals_features():\n academic_birthyear_dict = pickle.load(open(JOURNAL_AUTHORS_ACADEMIC_BIRTHYEAR_PKL, \"rb\"))\n for i in os.listdir(\"/data/sframes/journals\"):\n logger.info(f\"Analyzing {i}\")\n try:\n jid = i.split(\".sframe\")[0]\n va = VenueAnalyzer(jid, VenueType.journal, academic_birthyear_dict)\n j = va.get_venue_stats()\n json.dump(j, open(f\"/data/json/journals/{va.name} ({va.venue_id}).json\", \"w\"))\n except Exception as e:\n print(e.message)\n logger.error(e.message)\n\n\ndef create_all_timeseries():\n for i in os.listdir(\"/data/sframes/journals\"):\n jid = i.split(\".sframe\")[0]\n print(f\"Createing {jid} timeseries\")\n va = VenueAnalyzer(jid, VenueType.journal, None)\n p = f\"/data/timeseries/journals/{jid}\"\n if os.path.isdir(p):\n continue\n os.mkdir(p)\n va.create_timeseries(p)\n\n\nif __name__ == \"__main__\":\n va = VenueAnalyzer(\"08364228\") # Nature 08364228 Science 003B355D PNAS 077EDC2F\n", "id": "9507770", "language": "Python", "matching_score": 3.6361966133117676, "max_stars_count": 1, "path": "ScienceDynamics/prediction/venue_analyzer.py" }, { "content": "from ScienceDynamics.config.configs import VenueType\nimport turicreate.aggregate as agg\nfrom collections import Counter\n\nfrom ScienceDynamics.sframe_creators.create_mag_sframes import get_papers_sframe\n\n\nclass VenuesAnalyzer(object):\n def __init__(self, min_ref_num=5, venue_type=VenueType.journal):\n self._min_ref_num = min_ref_num\n self._venue_col_name = VenuesAnalyzer._get_venue_col_name(venue_type)\n\n @staticmethod\n def _get_venue_col_name(venue_type):\n col_name = \"Journal ID mapped to venue name\"\n if venue_type == VenueType.conference:\n col_name = \"Conference ID mapped to venue name\"\n return col_name\n\n def get_venues_papers_ids(self, end_year):\n p_sf = get_papers_sframe(min_ref_num=self._min_ref_num, end_year=end_year)\n return p_sf.groupby(self._venue_col_name, {'papers_list': agg.CONCAT('Paper ID')})\n\n def get_venues_authors_ids(self, end_year):\n p_sf = get_papers_sframe(min_ref_num=self._min_ref_num, end_year=end_year)\n a_sf = get_authors_sframe(min_ref_num=self._min_ref_num, end_year=end_year)\n sf = a_sf.join(p_sf, on=\"Paper ID\")\n\n return sf.groupby(self._venue_col_name, {'authors_list': agg.CONCAT('Author ID')})\n\n def get_venue_features(self, end_year):\n p_sf = self.get_venue_features(end_year=end_year)\n a_sf = self.get_venue_features(end_year=end_year)\n sf = p_sf.join(a_sf, on=self._venue_col_name)\n sf['authors_dict'] = sf['authors_list'].apply(lambda l: dict(Counter(l)))\n sf['unique_authors'] = sf['authors_dict'].apply(lambda d: len(d.keys()))\n sf['total_authors'] = sf['authors_list'].apply(lambda l: len(l))\n sf['total_papers'] = sf['papers_list'].apply(lambda l: len(l))\n\n return sf\n", "id": "5863070", "language": "Python", "matching_score": 1.777004361152649, "max_stars_count": 1, "path": "ScienceDynamics/prediction/venues_analyzer.py" }, { "content": "from ScienceDynamics.config.configs import VenueType\nfrom ScienceDynamics.config.fetch_config import AUTHORS_FETCHER, PAPERS_FETCHER\nfrom ScienceDynamics.author import Author\nfrom functools import lru_cache\nimport json\n\n\nclass Paper(object):\n def __init__(self, paper_id, papers_fetcher=PAPERS_FETCHER, authors_fetcher=AUTHORS_FETCHER, json_data=None):\n \"\"\"\n Construct a paper object\n :param paper_id: paper id\n :param papers_fetcher: papers fetcher object\n :param authors_fetcher: authors fetcher object\n :param json_data: paper's json_data\n \"\"\"\n self._id = paper_id\n self._authors_fetcher = authors_fetcher\n self._json_data = json_data\n if json_data is None:\n self._json_data = papers_fetcher.get_paper_data(paper_id)\n\n def _get_data_value(self, k):\n \"\"\"\n Returns a paper feature value by name, if the key exists otherwise returns None\n :param k: feature name\n :return: the feature value if exists or None otherwise\n \"\"\"\n if k in self._json_data:\n return self._json_data[k]\n return None\n\n # ------------------------------------#\n # Times Authors Published in Venue #\n # ------------------------------------#\n def times_list_authors_published_in_venue(self):\n \"\"\"\n A list of time each paper's author published in venue until the year before the paper publication\n :return: list of time each paper's publist in venue\n :rtype: list of int\n \"\"\"\n return [\n a.times_published_in_venue(self.venue_id, self.venue_type, start_year=None, end_year=self.publish_year - 1)\n for a in self.authors_list]\n\n def did_authors_publish_before_in_venue(self):\n \"\"\"\n Return True if one of the paper's author published in venue until a year before this paper publication, or\n False otherwise\n :return: True if one of the authors published in venue false otherwise\n :rtype: bool\n \"\"\"\n if self.total_number_of_times_authors_published_in_venue > 0:\n return True\n return False\n\n def _did_author_published_in_venue(self, a):\n \"\"\"\n Check if an input author published in the paper's venue in the past\n :param a: author object\n :type a: Author\n :return: True if the author published in the venue before, or False otherwise\n :rtype: bool\n \"\"\"\n return a.times_published_in_venue(self.venue_id, self.venue_type, start_year=None,\n end_year=self.publish_year - 1) > 0\n\n def did_first_author_publish_in_venue(self):\n \"\"\"\n Return True if the paper's first author published in venue until a year before this paper publication, or\n false otherwise\n :return: True if the first published in venue false otherwise\n :rtype: bool\n \"\"\"\n return self._did_author_published_in_venue(self.first_author)\n\n def did_last_author_publish_in_venue(self):\n \"\"\"\n Return True if the paper's last author published in venue until a year before this paper publication, or\n false otherwise\n :return: True if the last published in venue false otherwise\n :rtype: bool\n \"\"\"\n return self._did_author_published_in_venue(self.last_author)\n\n # </editor-fold>\n\n # <editor-fold desc=\"Citations Related Functions\">\n # -----------------------------------#\n # Citations Related Functions #\n # -----------------------------------#\n @lru_cache(maxsize=2)\n def get_total_citation_number_by_year_dict(self, include_self_citation):\n \"\"\"\n Returns dict with the number of citation the paper received by a specific year\n :param include_self_citation: if True include also self citations\n :return: dict in which each key is the year and each value is the total number of citations the paper received by the\n input year\n :rtype: dict<int,int>\n \"\"\"\n n = \"Total Citations by Year\"\n if not include_self_citation:\n n = \"Total Citations by Year without Self Citations\"\n d = self._get_data_value(n) # type: dict\n if d==\"nan\":\n d = None\n if d is not None:\n if type(d) is str:\n d = json.loads(d)\n d = {int(y): v for y, v in d.items()}\n return d\n\n @lru_cache(maxsize=400)\n def get_total_citations_number_in_year(self, year, include_self_citation=True):\n \"\"\"\n Get the number of citations in a specific year\n :param year: input year\n :param include_self_citation: to include self-citations or not\n :return: the number of citations in a specific year\n :note: in case the year is out of range the function will return 0\n \"\"\"\n d = self.get_total_citation_number_by_year_dict(include_self_citation)\n if year not in d:\n return 0\n # calculating the previous year with citations\n years = [y for y in d.keys() if y < year]\n if len(years) == 0:\n return d[year]\n year_before = max(years)\n return d[year] - d[year_before]\n\n def total_self_citations_in_year(self, year):\n \"\"\"\n Return the total number of self citation in a year\n :param year: input year\n :return: the total number of self citations in a year\n :rtype: int\n \"\"\"\n return (\n self.get_total_citations_number_in_year(year, True) - self.get_total_citations_number_in_year(year,\n False))\n\n def get_max_citations_number_in_year(self, include_self_citation=True):\n \"\"\"\n Returns citation maximal number\n :param include_self_citation: to include or not include self-citations\n :return: the maximal number of citations in a year since the paper's publication\n :rtype: int\n \"\"\"\n d = self.get_total_citation_number_by_year_dict(include_self_citation)\n start_year = min(d.keys())\n end_year = max(d.keys())\n l = [self.get_total_citations_number_in_year(y, include_self_citation) for y in range(start_year, end_year + 1)]\n return max(l)\n\n def total_citations_number_by_year(self, end_year, include_self_citation):\n \"\"\"\n Returns the papers total citations number by input year\n :param end_year: end year\n :param include_self_citation: if True include also self citations\n :return: the total number of paper citation by year\n \"\"\"\n d = self.get_total_citation_number_by_year_dict(include_self_citation)\n if d is None or d == {}:\n return 0\n if end_year in d:\n return d[end_year]\n years_keys = [y for y in d.keys() if y <= end_year]\n if len(years_keys) == 0:\n return 0\n return d[max(years_keys)]\n\n def total_citation_number_after_years_from_publication(self, years_num, include_self_citation):\n \"\"\"\n Return the number of total citations after input years since publication\n :param years_num: the number of years after publication\n :param include_self_citation: if True include also self citations\n :return: return the total number of citations after input number of years\n :rtype: int\n \"\"\"\n return self.total_citations_number_by_year(self.publish_year + years_num, include_self_citation)\n\n # </editor-fold>\n\n # <editor-fold desc=\"Paper's Properties\">\n @property\n def paper_id(self):\n \"\"\"\n Return paper id\n :return: the paper id\n :rtype: str\n \"\"\"\n return self._id\n\n @property\n def venue_type(self):\n \"\"\"\n Returns the paper's venue type if one exists or None otherwise\n :return: Venue type\n :rtype: VenueType\n \"\"\"\n n = self._get_data_value('Journal ID mapped to venue name')\n if n is not None and n != '':\n return VenueType.journal\n\n self._get_data_value('Conference ID mapped to venue name')\n if n is not None and n != '':\n return VenueType.conference\n\n return None\n\n @property\n def venue_id(self):\n \"\"\"\n Returns the venue id\n :return: returns the venue id\n :rtype: str\n \"\"\"\n if self.venue_type == VenueType.journal:\n return self._get_data_value(u'Journal ID mapped to venue name')\n\n if self.venue_type == VenueType.conference:\n return self._get_data_value(u'Conference ID mapped to venue name')\n\n return None\n\n @property\n def venue_name(self):\n \"\"\"\n Returns the venue's name\n :return: the venue name\n :rtype: str\n \"\"\"\n return self._get_data_value('Original venue name')\n\n @property\n def references_count(self):\n \"\"\"\n Returns the paper references number\n :return: the paper's references number\n :rtype: int\n \"\"\"\n return self._get_data_value('Ref Number')\n\n @property\n def publish_year(self):\n \"\"\"\n Returns the paper's publish year\n :return: the paper's publish year\n :rtype: int\n \"\"\"\n return self._get_data_value('Paper publish year')\n\n @property\n def rank(self):\n \"\"\"\n Returns the paper's rank\n :return: paper\n \"\"\"\n return self._get_data_value(\"Paper rank\")\n\n @property\n def total_number_of_times_authors_published_in_venue(self):\n \"\"\"\n The total number of times the authros published in venue\n :return: the total times all authors publish in venue\n :rtype: int\n :note: there can be double count of each paper\n \"\"\"\n return sum(self.times_list_authors_published_in_venue())\n\n # <editor-fold desc=\"Paper's Authors Properties\">\n\n @property\n def author_ids_list(self):\n \"\"\"\n The paper's author ids list\n :return: the papers authors ids list\n :rtype: list of str\n \"\"\"\n author_ids = self._get_data_value('Authors List Sorted')\n if type(author_ids) == str:\n author_ids = json.loads(author_ids)\n return author_ids\n\n @property\n def authors_number(self):\n \"\"\"\n Paper authors number\n :return: return the number of authros\n :rtype: int\n \"\"\"\n return self._get_data_value('Authors Number')\n\n @property\n def first_author(self):\n \"\"\"\n Return first author Author object\n :return: First author object\n :rtype: Author\n \"\"\"\n if self.first_author_id is None:\n return None\n return Author(author_id=self.first_author_id, authors_fetcher=self._authors_fetcher)\n\n @property\n def last_author(self):\n \"\"\"\n Return last author Author object\n :return: Last author object\n :rtype: Author\n \"\"\"\n if self.last_author_id is None:\n return None\n return Author(self.last_author_id, self._authors_fetcher)\n\n @property\n def authors_list(self):\n \"\"\"\n Return the papers author object list\n :return: Return the papers author object list\n :rtype: list of Author\n \"\"\"\n if self.author_ids_list is None:\n return []\n return [Author(i, self._authors_fetcher) for i in self.author_ids_list]\n\n @property\n def first_author_id(self):\n \"\"\"\n Return first author id\n :return: first author id\n :rtype: str\n \"\"\"\n if self.author_ids_list is None:\n return None\n return self.author_ids_list[0]\n\n @property\n def last_author_id(self):\n \"\"\"\n Return last author id\n :return: last author id\n :rtype: str\n \"\"\"\n if self.author_ids_list is None:\n return None\n return self.author_ids_list[-1]\n\n @property\n def authors_fullnames_list(self):\n \"\"\"\n Returns authors full names\n :return: return a list with paper's authors full names\n :rtype: list<str>\n \"\"\"\n return [a.fullname for a in self.authors_list]\n\n\n @property\n def title(self):\n return self._get_data_value(\"Original paper title\")\n\n @property\n def paper_norm_title(self):\n return self._get_data_value('Normalized paper title')\n\n @property\n def keywords_list(self):\n \"\"\"\n Return the papers keyworkds\n :return: paper's keywords list\n :rtype: list of str\n \"\"\"\n return self._get_data_value('Keywords List')\n\n @property\n def title_bag_of_words(self):\n \"\"\"\n Returns the title bag of words dict\n :return: the title bag-of-words dict\n :rtype: dict\n \"\"\"\n return self._get_data_value('Title Bag of Words')\n\n\n @property\n def paper_length(self):\n \"\"\"\n Returns the paper length in pages if possible\n :return: the paper's number of pages\n :rtype: int\n :note: this is estimation to the paper's length\n \"\"\"\n start = self._get_data_value('page_start')\n end = self._get_data_value('page_end')\n if start is None or end is None:\n return None\n try:\n length = int(end) - int(start) + 1\n except:\n return None\n return length\n\n @property\n def abstract(self):\n \"\"\"\n Returns the paper's abstract if possible\n :return: paper's abstract\n :rtype: str\n \"\"\"\n return self._get_data_value('abstract')\n\n @property\n def issn(self):\n \"\"\"\n Returns the paper's publication ISSN if possible\n :return: the paper's ISSN\n :rtype: str\n \"\"\"\n return self._get_data_value('issn')\n", "id": "10022385", "language": "Python", "matching_score": 4.696593761444092, "max_stars_count": 1, "path": "ScienceDynamics/paper.py" }, { "content": "from ScienceDynamics.config.configs import VenueType\nfrom ScienceDynamics.config.fetch_config import AUTHORS_FETCHER\nfrom ScienceDynamics.utils import join_all_lists\nfrom collections import Counter\n\n\nclass Author(object):\n def __init__(self, author_id, fullname=None, authors_fetcher=AUTHORS_FETCHER):\n \"\"\"\n Construct an Author object that contains the authors features\n :param author_id: an author id\n :param authors_fetcher: an authors fetcher object\n :type author_id: str\n :param fullname: author name\n :type authors_fetcher: AuthorsFetcher\n :note: in case several authors as the same name than only the first matching author will return\n \"\"\"\n\n self._start_year = None\n self._last_year = None\n self._authors_fetcher = authors_fetcher\n\n if author_id is not None:\n self._json_data = authors_fetcher.get_author_data(author_id=author_id) # type: dict\n elif fullname is not None:\n self._json_data = authors_fetcher.get_author_data(author_name=fullname) # type: dict\n\n self._id = self.author_id\n self._paper_collection_analyzer = None\n\n def _filter_dict_by_years(self, d, start_year, end_year):\n if start_year is not None:\n d = {k: v for k, v in d.items() if k >= start_year}\n if end_year is not None:\n d = {k: v for k, v in d.items() if k <= end_year}\n return d\n\n def _get_items_list_between_years(self, key_name, start_year, end_year):\n d = self._json_data[key_name]\n d = self._filter_dict_by_years(d, start_year, end_year)\n return join_all_lists(d.values())\n\n def get_academic_age(self, at_year):\n \"\"\"\n Get the author's academic age at specific year\n :param at_year: year\n :return: the author's academic age\n :rtype: int\n \"\"\"\n return at_year - self.first_publication_year\n\n def get_papers_list(self, start_year, end_year):\n \"\"\"\n Return the author's paper list between the start_year and end_year\n :param start_year: start year\n :param end_year: end year\n :return: a list of the author's paper ids that were published between the start and end years\n :rtype: list of str\n \"\"\"\n return self._get_items_list_between_years('Papers by Years Dict', start_year, end_year)\n\n def get_papers_list_at_year(self, year):\n \"\"\"\n Return the author's papers list in a specific year\n :param year: year\n :return: the author's paper ids that were published at the input year\n :rtype: list of str\n \"\"\"\n return self.get_papers_list(year, year)\n\n def number_of_papers(self, start_year, end_year):\n \"\"\"\n Returns the author's number of papers between years\n :param start_year: start year\n :param end_year: end year\n :return: the number of papers the author wrote between the start and end years\n :rtype: int\n \"\"\"\n return len(self.get_papers_list(start_year, end_year))\n\n def get_coauthors_list(self, start_year, end_year):\n \"\"\"\n Return the author's coauthors list between the start_year and end_year\n :param start_year: start year\n :param end_year: end year\n :return: a list of the author's coauthors ids that were published between the start and end years\n :rtype: list of str\n \"\"\"\n return self._get_items_list_between_years('Coauthors by Years Dict', start_year, end_year)\n\n def get_coauthors_list_at_year(self, year):\n \"\"\"\n Return the author's coauthors in a specific year\n :param year: year\n :return: the author's paper ids that were published at the input year\n :rtype: list of str\n \"\"\"\n return self.get_coauthors_list(year, year)\n\n def number_of_coauthors(self, start_year, end_year):\n \"\"\"\n Returns the author's number of papers between years\n :param start_year: start year\n :param end_year: end year\n :return: the number of papers the author wrote between the start and end years\n :rtype: int\n \"\"\"\n return len(self.get_coauthors_list(start_year, end_year))\n\n def get_venues_list(self, venue_type, start_year, end_year):\n \"\"\"\n Returns the venues in which the author published between years\n :param venue_type: venue type of (can be VenueType.journal or VenueType.conference)\n :param start_year: the start year\n :param end_year: the end year\n :return: return a list of all the venues the author published in between the input years\n :rtype: list of str\n \"\"\"\n k = 'Journal ID by Year Dict'\n if venue_type == VenueType.conference:\n k = 'Conference ID by Year Dict'\n return self._get_items_list_between_years(k, start_year, end_year)\n\n def times_published_in_venue(self, venue_id, venue_type, start_year, end_year):\n \"\"\"\n Return the time the author published in specific venue between start_year and end_year\n :param venue_id: the venue id\n :param venue_type: the venue type\n :param start_year: start year\n :param end_year: end year\n :return: the number of time the author published in venue\n :rtype: int\n \"\"\"\n l = self.get_venues_list(venue_type, start_year, end_year)\n c = Counter(l)\n if venue_id not in c:\n return 0\n return c[venue_id]\n\n def _get_data_value(self, k):\n \"\"\"\n Get a data value from the features json data\n :param k: feature name\n :return: the value of the feature name if it exists or None otherwise\n \"\"\"\n if k in self._json_data:\n return self._json_data[k]\n return None\n\n @property\n def author_id(self):\n \"\"\"\n Return the author's id\n :return: author id\n :rtype: str\n \"\"\"\n return self._get_data_value('Author ID')\n\n @property\n def gender(self):\n \"\"\"\n Return the author's gender if it was identified by his/her first name\n :return: The authors gender or None\n :rtype: str\n \"\"\"\n if ('Gender Dict' not in self._json_data) or (self._json_data['Gender Dict'] is None):\n return None\n return self._json_data['Gender Dict']['Gender']\n\n @property\n def male_probability(self):\n \"\"\"\n Returns the probabilty of the author's first name to be male\n :return: The author's probability of being male according to it's first name\n :rtyoe: float\n \"\"\"\n if ('Gender Dict' not in self._json_data) or (self._json_data['Gender Dict'] is None):\n return None\n return self._json_data['Gender Dict']['Percentage Males']\n\n @property\n def female_probability(self):\n p = self.male_probability\n if p is None:\n return None\n return 1 - p\n\n @property\n def fullname(self):\n \"\"\"\n Returns the authors full name (in lower case)\n :return: the authors full name\n :rtype: str\n \"\"\"\n return self._get_data_value('Author name')\n\n @property\n def firstname(self):\n \"\"\"\n Returns the authors first name (in lower case)\n :return: the authors first name\n :rtype: str\n :note: if the name as less than two words the property will return None\n \"\"\"\n l = self.fullname.split()\n if len(l) < 2:\n return None\n return l[0]\n\n @property\n def lastname(self):\n \"\"\"\n Returns the authors last name (in lower case)\n :return: the authors last name\n :rtype: str\n :note: if the name as less than two words the property will return None\n \"\"\"\n l = self.fullname.split()\n if len(l) < 2:\n return None\n return l[-1]\n\n @property\n def coauthors_dict(self):\n if 'Coauthors by Years Dict' in self._json_data:\n return dict(self._json_data['Coauthors by Years Dict'])\n\n @property\n def papers_dict(self):\n \"\"\"\n Dict of the authors full paper ids list by year\n :return: dict of the author's paper ids by year\n :rtype: dict\n \"\"\"\n return self._json_data['Papers by Years Dict']\n\n @property\n def papers_list(self):\n \"\"\"\n List of the authors full paper ids list\n :return: list of the author's paper ids\n :rtype: list of str\n \"\"\"\n return self.get_papers_list(None, None)\n\n @property\n def journals_dict(self):\n \"\"\"\n Dict of journals in which the author published in\n :return: dict with list of journals by year\n :rtype: dict<int,list<str>>\n \"\"\"\n return self._get_data_value('Journal ID by Year Dict')\n\n @property\n def conference_list(self):\n \"\"\"\n Dict of conferences in which the author published in\n :return: dict with list of conferences by year\n :rtype: dict<int,list<str>>\n \"\"\"\n return self._get_data_value('Conference ID by Year Dict')\n\n @property\n def papers_number(self):\n \"\"\"\n Returns the number of papers the author publish\n :return: the number of papers the author published\n :rtype: int\n \"\"\"\n return len(self.papers_list)\n\n @property\n def first_publication_year(self):\n \"\"\"\n Returns the year in which the author publish his/her first paper\n :return: the year the author's first paper was published\n :rtype: int\n \"\"\"\n if self._start_year is None and self.papers_dict is not None:\n self._start_year = min(self.papers_dict.keys())\n\n return self._start_year\n\n @property\n def last_publication_year(self):\n \"\"\"\n The year in which the author's last paper was published according to the dataset\n :return: the year in which the authors first paper was publish\n \"\"\"\n if self._last_year is None and self.papers_dict is not None:\n self._last_year = max(self.papers_dict.keys())\n\n return self._last_year\n\n @property\n def papers_collection_analyzer(self):\n \"\"\"\n Return PaperCollection object for analyzing the authors papers\n :return: paper collection analyzer object\n :rtype: PapersCollection\n \"\"\"\n if self._paper_collection_analyzer is None:\n from ScienceDynamics.papers_collection_analyer import PapersCollection\n self._paper_collection_analyzer = PapersCollection(papers_ids=self.papers_list)\n\n return self._paper_collection_analyzer\n\n @staticmethod\n def find_authors_id_by_name(author_name, authors_fetcher=AUTHORS_FETCHER):\n \"\"\"\n Returns a list of ids for authors with the input author name\n :param authors_fetcher:\n :param author_name: author's full name or regex object\n :param: authors_fetcher: Author Fetcher object\n :return: list of author_ids\n :rtype: list<str>\n :note the author_name can be regex object\n \"\"\"\n return authors_fetcher.get_author_ids_by_name(author_name)\n", "id": "4624892", "language": "Python", "matching_score": 3.389525890350342, "max_stars_count": 1, "path": "ScienceDynamics/author.py" }, { "content": "import itertools\nimport math\nimport random\nfrom collections import Counter\nfrom functools import lru_cache\n\nimport numpy as np\nimport turicreate as tc\n\nfrom ScienceDynamics.authors_list_analyzer import AuthorsListAnalyzer\nfrom ScienceDynamics.config.configs import VenueType\nfrom ScienceDynamics.config.fetch_config import AUTHORS_FETCHER, PAPERS_FETCHER\nfrom ScienceDynamics.config.log_config import logger\nfrom tqdm import tqdm\n\nclass PapersCollection(object):\n MAX_PAPERS = 500000\n\n def __init__(self, papers_ids=None, papers_list=None, papers_filter_func=None, papers_fetcher=PAPERS_FETCHER,\n authors_fetcher=AUTHORS_FETCHER):\n \"\"\"\n Consturct a paper collection object using a list of paper ids or a list of Paper object\n :param papers_ids: papers ids list\n :param papers_list: Paper object list (optional)\n :param papers_filter_func: a filter function to filter the papers. For example, the filter fucntion\n \"lambda p: p.reference_count < 5\" will filter out papeprs with less than 5 references (optional)\n :param papers_fetcher: a PapersFetcher object (optional)\n :param authors_fetcher: an AuthorsFetcher object (optional)\n \"\"\"\n self._authors_fetcher = authors_fetcher\n self._papers_fetcher = papers_fetcher\n\n if papers_list is None:\n papers_list = []\n self.__papers_list = papers_list # type: list[Paper]\n self.__papers_ids = papers_ids # type: list[str]\n self._min_year = None\n self._max_year = None\n\n if papers_ids is None and len(papers_list) > 0:\n self.__papers_ids = [p.paper_id for p in papers_list]\n if papers_filter_func is not None:\n self.__filter_papers(papers_filter_func)\n if len(self.__papers_list) > self.MAX_PAPERS:\n logger.warn(\n \"PapersCollection contains over maximal number of papers %s.\\n Randomly selecting %s papers\\b\" % (\n len(self.__papers_list), self.MAX_PAPERS))\n self.__randomly_select_papers(self.MAX_PAPERS)\n\n def __filter_papers(self, filter_func=lambda p: p):\n \"\"\"\n Filtering the papers according to the filter func\n :param filter_func: input filter function, all papers in the collection in which filter_func(p) == True will be\n filtered\n :note the function update both the self.__papers_list and the self.__papers_ids vars\n :warning: due to caching issues calling this function outside the the class constructor can have\n unexcpected reults\n \"\"\"\n self.__papers_list = [p for p in self.papers_list if not filter_func(p)]\n self.__papers_ids = [p.paper_id for p in self.__papers_list]\n\n def __randomly_select_papers(self, n):\n \"\"\"\n Randomly select n papers\n :param n: number of paper to select\n :warning: due to caching issues calling this function outside the the class constructor can have\n unexcpected reults\n \"\"\"\n self.__papers_list = random.shuffle(self.__papers_list)[:n]\n self.__papers_ids = [p.paper_id for p in self.__papers_list]\n\n def first_authors_list(self, year):\n \"\"\"\n Get papers' first authors list in a specific year\n :param year: year\n :return: a list of first authors\n :rtyoe: list<Author>\n \"\"\"\n return [p.first_author for p in self.papers_published_in_year_list(year)]\n\n def last_authors_list(self, year):\n \"\"\"\n Get papers' last authors list in a specific year\n :param year: year\n :return: a list of last authors\n :rtyoe: list<Author>\n \"\"\"\n return [p.last_author for p in self.papers_published_in_year_list(year)]\n\n @lru_cache(maxsize=100)\n def all_authors_in_year_list(self, year):\n \"\"\"\n Get authors list of papers that were published in a specific year\n :param year: year\n :return: a list of all authors\n :rtyoe: list<Author>\n \"\"\"\n l = [p.authors_list for p in self.papers_published_in_year_list(year)]\n return list(itertools.chain.from_iterable(l))\n\n @lru_cache(maxsize=100)\n def papers_published_in_year_list(self, year):\n \"\"\"\n Return papers list of papers in collection that were published in specific year\n :param year: input year\n :return: list of Papers that were published in the input year\n :rtype: list<Paper>\n \"\"\"\n return [p for p in self.papers_list if p.publish_year == year]\n\n @lru_cache(maxsize=100)\n def papers_between_years_list(self, start_year=None, end_year=None):\n \"\"\"\n Return papers list of papers in collection that were published in year range\n :param start_year: start year\n :param end_year: end year\n :return: list of Papers that were published between start_year and end_year\n :rtype: list<Paper>\n \"\"\"\n if start_year is None:\n start_year = 0\n if end_year is None:\n end_year = float(\"inf\")\n\n return [p for p in self.papers_list if end_year >= p.publish_year >= start_year]\n\n def papers_ranks_list(self, year):\n \"\"\"\n Return list of collection's papers ranks in input year\n :param year: input year\n :return: list of collection ranks\n :rtype: list<int>\n \"\"\"\n return [p.rank() for p in self.papers_published_in_year_list(year)]\n\n def papers_length_list(self, year):\n \"\"\"\n Return list of collection's papers length in input year\n :param year: input year\n :return: list of collection ranks\n :rtype: list<int>\n \"\"\"\n return [p.paper_length for p in self.papers_published_in_year_list(year) if p.paper_length is not None]\n\n def papers_total_citations_after_years(self, publication_year, after_years, include_self_citations):\n \"\"\"\n Return the total citations of collection's papers that were published in the input publication after after_years\n :param publication_year: a publication year\n :param after_years: year after publication\n :param include_self_citations: if True include self citation otherwise don't include self citations\n :return: the total number of citation of papers that were published in published_years after\n after_years\n :rtype: float\n \"\"\"\n return sum([i[1] for i in\n self.papers_citations_after_years_list(publication_year, after_years, include_self_citations)])\n\n def paper_with_max_citation_after_years(self, publication_year, after_years, include_self_citations):\n l = self.papers_citations_after_years_list(publication_year, after_years, include_self_citations)\n l = sorted(l, key=lambda x: x[1], reverse=True)\n return l[0]\n\n @lru_cache(maxsize=100)\n def papers_citations_after_years_list(self, publication_year, after_years, include_self_citations):\n \"\"\"\n Return the list with the total citations of collection's papers that were published in the input publication\n after after_years\n :param publication_year: a publication year\n :param after_years: year after publication\n :param include_self_citations: if True include self citation otherwise don't include self citations\n :return: List of the total number of citation of papers that were published in published_years after\n after_years\n :rtype: list<Paper,int>\n \"\"\"\n l = self.papers_published_in_year_list(publication_year)\n return [\n (p, p.total_citation_number_after_years_from_publication(after_years, include_self_citations)) for p in l]\n\n def papers_total_citations_in_year(self, year, include_self_citations):\n \"\"\"\n Returns the papers' total number of citations in a specfici year\n :param year: year\n :param include_self_citations: if True include self citation otherwise don't include self citations\n :return: the total number of papers' citations in a specific year\n :rtype: int\n \"\"\"\n l = self.papers_between_years_list(None, year)\n return sum([p.get_total_citations_number_in_year(year, include_self_citations) for p in l])\n\n def get_yearly_most_cited_papers_sframe(self, citation_after_year, max_publish_year):\n \"\"\"\n Returns SFrame, with the most cited in each year papers details\n :param max_publish_year:\n :param citation_after_year: number of years to check the number of citations after paper publication year\n :param: max_publish_year: the maximal publish year\n :return: SFrame with the most cited paper details in each year\n :rtype: tc.SFrame\n \"\"\"\n m = {\"ids\": [], \"year\": [], \"title\": [], \"citation_number\": [], \"venue_type\": [], \"venue_name\": []}\n for y, p in self.get_yearly_most_cited_paper_dict(citation_after_year, True,\n self.max_publication_year).items():\n if p.publish_year > max_publish_year:\n continue\n m[\"ids\"].append(p.paper_id)\n\n m[\"year\"].append(p.publish_year)\n m[\"title\"].append(p.title)\n m[\"citation_number\"].append(p.total_citations_number_by_year(p.publish_year + citation_after_year,\n include_self_citation=True))\n m[\"venue_name\"].append(p.venue_name)\n t = \"\"\n if p.venue_type == VenueType.journal:\n t = \"journal\"\n elif p.venue_type == VenueType.conference:\n t = \"conference\"\n\n m[\"venue_type\"].append(t)\n sf = tc.SFrame(m)\n\n return sf.sort(\"year\", ascending=False)\n\n def papers_in_which_authors_published_in_venue_list(self, year=None):\n \"\"\"\n Return the papers list of only papers in which one of their authors published in the venue in the past\n :param year: input year or None for all papers\n :return: list of papers in which one of the authors published before in the same venue the paper was published\n :rtype: list<Paper>\n \"\"\"\n if year is not None:\n papers_list = self.papers_published_in_year_list(year)\n else:\n papers_list = self.papers_list\n return [p for p in papers_list if p.did_authors_publish_before_in_venue()]\n\n def papers_in_which_authors_not_published_in_venue(self, year=None):\n \"\"\"\n Return the papers list of only papers in which their authors didn't publish in the venue in the past\n :param year: input year or None for all papers\n :return: list of papers in which one the papers' authors didn't publish before in the same venue the paper was\n published\n :rtype: list<Paper>\n \"\"\"\n if year is not None:\n papers_list = self.papers_published_in_year_list(year)\n else:\n papers_list = self.papers_list\n return [p for p in papers_list if not p.did_authors_publish_before_in_venue()]\n\n # <editor-fold desc=\"Papers Collection Properties\">\n\n def papers_number(self, year):\n \"\"\"\n Return the papers number\n :param year: input year\n :return: the number of papers in the collection that where published in the input list\n :rtype: int\n \"\"\"\n return len(self.papers_published_in_year_list(year))\n\n def authors_number(self, year, unique=True):\n \"\"\"\n The total number of authors who wrote the collections papers in a specific year\n :param year: input year\n :param unique: if True count each author only once, otherwise each author can be count multiple times\n :return: the number of authors who wrote the collection paper\n \"\"\"\n l = self.all_authors_in_year_list(year)\n if unique:\n l = list(set(l))\n return len(l)\n\n @property\n def papers_ids(self):\n \"\"\"\n Returns a list with the papers ids\n :return: returns a list with the papers ids\n :rtype: list<str>\n \"\"\"\n return self.__papers_ids\n\n @property\n def papers_list(self):\n \"\"\"\n Returns a list of the collection's paper objects\n :return: list of papers objects\n :rtype: list<Paper>\n \"\"\"\n if len(self.__papers_list) == 0:\n from ScienceDynamics.paper import Paper\n self.__papers_list = [Paper(i, self._papers_fetcher, self._authors_fetcher) for i in tqdm(self.papers_ids)]\n\n return self.__papers_list\n\n @property\n def max_publication_year(self):\n \"\"\"\n Returns the maximal publication year among all the collections papers\n :return: maximal publication year\n :rtype: int\n \"\"\"\n if self._max_year is None:\n self._max_year = max([p.publish_year for p in self.papers_list if p.publish_year is not None])\n return self._max_year\n\n @property\n def min_publication_year(self):\n \"\"\"\n Returns the minimal publication year among all the collections papers\n :return: minimal publication year\n :rtype: int\n \"\"\"\n if self._min_year is None:\n self._min_year = min([p.publish_year for p in self.papers_list if p.publish_year is not None])\n return self._min_year\n\n def max_citations_paper(self, by_year, include_self_citations):\n \"\"\"\n Returns the paper with the maximal number of citations by the input year\n :param by_year: input year\n :param include_self_citations: to include or to not include self-citation ion the calculations\n :return: Paper with maximal number of citation\n :rtype: Paper\n \"\"\"\n l = [(p, p.total_citations_number_by_year(by_year, include_self_citations)) for p in self.papers_list]\n l = sorted(l, key=lambda k: k[1], reverse=True)\n return l[0][0]\n\n def authors_full_names(self, year, unique=True):\n \"\"\"\n Returns the full authors names for authors who publish paper in specific years\n :param year: input\n :param unique: if True return list with unique names otherwise return the full list\n :return: a list with the authors full names for authors\n :rtype: list<str>\n \"\"\"\n authors_names = list(\n itertools.chain.from_iterable([p.authors_fullnames_list for p in self.papers_published_in_year_list(year)]))\n if unique:\n authors_names = list(set(authors_names))\n return authors_names\n\n # </editor-fold>\n\n # <editor-fold desc=\"Authors Age\">\n\n def authors_average_age(self, at_year):\n \"\"\"\n Returns the papers' authors average age in a specific year\n :param at_year: year\n :return: the average authors age at the input year\n :rtype: float\n \"\"\"\n a = AuthorsListAnalyzer(self.all_authors_in_year_list(at_year))\n return a.get_average_age(at_year)\n\n def authors_median_age(self, at_year):\n \"\"\"\n Returns the papers' authors average age in a specific year\n :param at_year: year\n :return: the average authors age at the input year\n :rtype: float\n \"\"\"\n a = AuthorsListAnalyzer(self.all_authors_in_year_list(at_year))\n return a.get_median_age(at_year)\n\n def first_authors_average_age(self, at_year):\n \"\"\"\n Returns the papers' first authors average age in a specific year\n :param at_year: year\n :return: the average first authors age at the input year\n :rtype: float\n \"\"\"\n a = AuthorsListAnalyzer(self.first_authors_list(at_year))\n return a.get_average_age(at_year)\n\n def first_authors_median_age(self, at_year):\n \"\"\"\n Returns the papers' first authors median age in a specific year\n :param at_year: year\n :return: the median first authors age at the input year\n :rtype: float\n \"\"\"\n a = AuthorsListAnalyzer(self.first_authors_list(at_year))\n return a.get_median_age(at_year)\n\n def last_authors_average_age(self, at_year):\n \"\"\"\n Returns the papers' last authors average age in a specific year\n :param at_year: year\n :return: the average last authors age at the input year\n :rtype: float\n \"\"\"\n a = AuthorsListAnalyzer(self.last_authors_list(at_year))\n return a.get_average_age(at_year)\n\n def last_authors_median_age(self, at_year):\n \"\"\"\n Returns the papers' last authors median age in a specific year\n :param at_year: year\n :return: the median last authors age at the input year\n :rtype: float\n \"\"\"\n a = AuthorsListAnalyzer(self.last_authors_list(at_year))\n return a.get_average_age(at_year)\n\n @staticmethod\n def _percentage_of_papers_authors_publish_before(papers_list, author_type):\n \"\"\"\n Return the percentage of papers in which authors published in the venue before\n :param papers_list: a paper list\n :param author_type: author type all/first/last\n :return: the percentage of papers which their authors published in the venue before\n :rtype: float\n \"\"\"\n if len(papers_list) == 0:\n return None\n if author_type is \"all\":\n l = [p for p in papers_list if p.did_authors_publish_before_in_venue()]\n elif author_type == \"first\":\n l = [p for p in papers_list if p.did_first_author_publish_in_venue()]\n elif author_type == \"last\":\n l = [p for p in papers_list if p.did_last_author_publish_in_venue()]\n else:\n raise Exception(\"Invalid author type - %s\" % author_type)\n return len(l) / float(len(papers_list))\n\n def percentage_of_papers_with_authors_that_publish_before_in_the_same_venue(self, year):\n \"\"\"\n Returns the percentage of papers in which authors publish a paper before in the venue\n :param year: year\n :return: the percentage of papers in which authors already published in the venue before\n :rtype: float\n \"\"\"\n return PapersCollection._percentage_of_papers_authors_publish_before(self.papers_published_in_year_list(year),\n \"all\")\n\n def percentage_of_papers_with_first_authors_that_publish_before_in_the_same_venue(self, year):\n \"\"\"\n Returns the percentage of papers in which first authors publish a paper before in the venue\n :param year: year\n :return: the percentage of papers in which first authors already published in the venue before\n :rtype: float\n \"\"\"\n return PapersCollection._percentage_of_papers_authors_publish_before(self.papers_published_in_year_list(year),\n \"first\")\n\n def percentage_of_papers_with_last_authors_that_publish_before_in_the_same_venue(self, year):\n \"\"\"\n Returns the percentage of papers in which last authors publish a paper before in the venue\n :param year: year\n :return: the percentage of papers in which last authors already published in the venue before\n :rtype: float\n \"\"\"\n return PapersCollection._percentage_of_papers_authors_publish_before(self.papers_published_in_year_list(year),\n \"last\")\n\n # </editor-fold>\n\n # <editor-fold desc=\"Gender Stats \">\n def authors_avg_female_probability(self, at_year):\n \"\"\"\n Return the avg female probabilites of the authors' first names of all the authors who published paper at\n the input year\n :param at_year: year\n :return: gender statistics of the all the papers' authors\n :rtype:dict\n \"\"\"\n a = AuthorsListAnalyzer(self.all_authors_in_year_list(at_year))\n return a.get_avg_female_probabilities()\n\n def first_avg_female_probability(self, at_year):\n \"\"\"\n Return the gender statistics of all the first authors who published paper at the input year\n :param at_year: year\n :return: gender statistics of the all the papers' first authors\n :rtype:dict\n \"\"\"\n a = AuthorsListAnalyzer(self.first_authors_list(at_year))\n return a.get_avg_female_probabilities()\n\n def last_avg_female_probability(self, at_year):\n \"\"\"\n Return the gender statistics of all the last authors who published paper at the input year\n :param at_year: year\n :return: gender statistics of the all the papers' last authors\n :rtype:dict\n \"\"\"\n a = AuthorsListAnalyzer(self.last_authors_list(at_year))\n return a.get_avg_female_probabilities()\n\n # </editor-fold>\n\n def papers_median_rank(self, year):\n \"\"\"\n Returns the papers median rank for papers publish in the input year\n :param year: input year\n :return: the median value of papers rank for papers publish in input year\n \"\"\"\n return np.median(self.papers_ranks_list(year))\n\n def papers_average_rank(self, year):\n \"\"\"\n Returns the papers average rank for papers publish in the input year\n :param year: input year\n :return: the median value of papers rank for papers publish in input year\n \"\"\"\n return np.average(self.papers_ranks_list(year))\n\n def papers_average_length(self, year):\n \"\"\"\n Returns the average paper length and the number of paper with lentgh\n :return: tupe in which the first element is the average paper length and the second element is the number of papers\n with length\n \"\"\"\n l = self.papers_length_list(year)\n if len(l) > 0:\n return np.average(l), len(l)\n return None\n\n def papers_median_citations_after_years(self, publish_year, after_years, include_self_citations):\n \"\"\"\n Returns the median number of citations after input years for papers that were published in a specific year\n :param publish_year: papers publish year\n :param after_years: after years from publication\n :param include_self_citations: if True count each author only once, otherwise each author can be count multiple\n times\n :return: the median number of citations after the input year for papers that were published in a specific year\n \"\"\"\n return np.median([c for p, c in self.papers_citations_after_years_list(publish_year, after_years,\n include_self_citations=include_self_citations)])\n\n def papers_average_citations_after_years(self, publish_year, after_years, include_self_citations):\n \"\"\"\n Returns the average number of citations after input years for papers that were published in a specific year\n :param publish_year: papers publish year\n :param after_years: after years from publication\n :param include_self_citations: if True count each author only once, otherwise each author can be count multiple\n times\n :return: the average number of citations after the input year for papers that were published in a specific year\n \"\"\"\n return np.average([c for p, c in self.papers_citations_after_years_list(publish_year, after_years,\n include_self_citations=include_self_citations)])\n\n def top_keywords(self, year, top_keywords_number=20):\n \"\"\"\n Returns a dict with the most common keywords in the input papers\n :param year: input year\n :param top_keywords_number: the number of top keywords to return\n :return: a dict with the most common keywords among the papers in the collection which where published in a\n specific year\n :rtype: dict<str,int>\n \"\"\"\n papers_list = self.papers_published_in_year_list(year)\n if papers_list is None or len(papers_list) == 0:\n return {}\n l = list(itertools.chain.from_iterable(\n [p.keywords_list for p in papers_list if p.keywords_list is not None and len(p.keywords_list) > 0 and type(p.keywords_list) == list]))\n if len(l) == 0:\n return {}\n c = Counter(l)\n return dict(c.most_common(top_keywords_number))\n\n def get_citations_number_after_years_dict(self, after_years, include_self_citations):\n \"\"\"\n Create dict of the papers citation number of papers that were published in specific year after the input years\n :param after_years: a number of year after publication\n :param include_self_citations: if True include self citation otherwise the citation number would be calculate\n without self-citations\n :return: dict in which each key is a year and each value is a list of papers and their corresponding citation\n number after \"after_years\" years\n :rtype: dict<int,list<(Paper, int)>>\n \"\"\"\n\n min_year = self.min_publication_year\n max_year = self.max_publication_year\n d = {}\n for y in range(min_year, max_year + 1):\n d[y] = self.papers_citations_after_years_list(y, after_years, include_self_citations)\n return d\n\n def get_yearly_most_cited_paper_dict(self, after_years, include_self_citations, max_year):\n \"\"\"\n Return the most cited paper after X years in each year\n :param after_years: a number of years\n :param include_self_citations: if True include self-citations otherwise the citation number will be without self\n citations\n :param max_year: max year to do the calculation for\n :return: dict in which each key is a year and each value is a the most cited paper that was published in the key\n year after after_years\n :rtype: dict<int,Paper>\n \"\"\"\n d = self.get_citations_number_after_years_dict(after_years, include_self_citations)\n d = {k: v for k, v in d.items() if v != []}\n\n for y, papers_list in d.items():\n if y > max_year or len(papers_list) == 0:\n continue\n\n papers_list = sorted(papers_list, key=lambda k: k[1], reverse=True)\n\n p = papers_list[0][0]\n d[y] = p\n\n return d\n\n def calculate_feature_over_time(self, feature_name, start_year, end_year):\n \"\"\"\n Returns the feature values over the years from start_year to end_year\n :param feature_name: input feature name that is valud PapersCollection function\n :param start_year: start year\n :param end_year: end year\n :return: dict with the input feature values from start year to the end year\n :rtype: dict\n \"\"\"\n d = {}\n if feature_name not in dir(PapersCollection):\n raise Exception(\"Invalid PaperCollection function - %s\" % feature_name)\n for y in range(start_year, end_year + 1):\n f_value = eval(f\"self.{feature_name}({y})\")\n if f_value is None or str(f_value) == 'nan' or (type(f_value) is float and math.isnan(f_value)):\n continue\n if feature_name not in d:\n d[feature_name] = {}\n d[feature_name][y] = f_value\n\n return d\n", "id": "4736394", "language": "Python", "matching_score": 5.826512813568115, "max_stars_count": 1, "path": "ScienceDynamics/papers_collection_analyer.py" }, { "content": "import os\nimport json\nimport traceback\nfrom ScienceDynamics.config.configs import VenueType\nfrom ScienceDynamics.config.fetch_config import PAPERS_FETCHER, VENUE_FETCHER\nfrom ScienceDynamics.config.log_config import logger\nfrom ScienceDynamics.paper import Paper\nfrom ScienceDynamics.papers_collection_analyer import PapersCollection\n\n\nclass Venue(PapersCollection):\n VENUE_FEATURES_LIST = ('papers_number', 'authors_number', 'authors_average_age', 'authors_median_age',\n 'first_authors_average_age', 'first_authors_median_age',\n 'last_authors_average_age', 'last_authors_median_age',\n 'percentage_of_papers_with_authors_that_publish_before_in_the_same_venue',\n \"percentage_of_papers_with_first_authors_that_publish_before_in_the_same_venue\",\n \"percentage_of_papers_with_last_authors_that_publish_before_in_the_same_venue\",\n \"first_avg_female_probability\", \"last_avg_female_probability\", \"top_keywords\",\n \"papers_average_length\"\n )\n\n def __init__(self, venue_id=None, venue_name=None, issn_list=(), venue_type=VenueType.journal, papers_ids=None,\n papers_list=None, papers_filter_func=None, papers_type='MAG', venue_fetcher=VENUE_FETCHER):\n \"\"\"\n Create a venue object with the venue features\n :param venue_id: the venue id (optional)\n :param venue_name: the venue name (optional)\n :param issn_list: the venue issn list ((optional)\n :param venue_type: the venue type\n :param papers_ids: contains papers ids (optional)\n :param papers_ids: contains papers list\n :param papers_filter_func: filter function to filter the papers accordingly\n :param papers_type: MAG for MAG dataset papers or 'Join' for AMinerMag Datasets papers\n :param venue_fetcher: a venue fetcher object\n :note if papers_ids is None then the class will fetch the papers ids based on the input parameters\n \"\"\"\n if venue_id is None and venue_name is None and len(issn_list) == 0:\n raise Exception(\"Cannot consturct venue venue_id, venue_name, and issn list are empty\")\n logger.info(\n f\"Consturcting a Venue object with the following params venue_id={venue_id}, venue_name={venue_name}, issn_list={issn_list}\")\n self._venue_id = venue_id\n self._name = venue_name\n self._venue_type = venue_type\n self._papers_type = papers_type\n if papers_ids is None and papers_list is None:\n _paper_ids_dict = venue_fetcher.get_papers_ids_dict(venue_id, venue_name, venue_type, issn_list)\n papers_ids = _paper_ids_dict['papers_ids']\n if papers_type == 'Join':\n papers_ids = _paper_ids_dict['join_papers_ids']\n\n if papers_list is not None:\n super(Venue, self).__init__(papers_list=papers_list, papers_filter_func=papers_filter_func)\n logger.info(f\"Consturcted a Venue object with {len(papers_list)} papers\")\n else:\n super(Venue, self).__init__(papers_ids=papers_ids, papers_filter_func=papers_filter_func)\n logger.info(f\"Consturcted a Venue object with {len(papers_ids)} papers\")\n\n if venue_name is not None or len(issn_list) > 0:\n self._sjr_features = VENUE_FETCHER.get_sjr_dict(venue_name, issn_list)\n self._issn_list = issn_list\n self._features = {}\n\n def _calculate_venue_features_over_time(self, features_list, start_year, end_year):\n \"\"\"\n The function calcualte the venue features given in the features list\n :param features_list: list of valid venue features (see for example VENUE_FEATURES_LIST)\n :param start_year: the start year\n :param end_year: the end year\n :return: a dict with the venue features over given as input in the features_list\n \"\"\"\n t = \"\"\n if self.venue_type is VenueType.journal:\n t = 'journal'\n if self.venue_type is VenueType.conference:\n t = 'conference'\n\n d = {\"name\": self.name, \"id\": self.venue_id, \"issn\": self._issn_list, 'papers_type': self._papers_type,\n \"type\": t, 'start_year': self.min_publication_year, \"end_year\": self.max_publication_year, 'features': {}}\n\n for f in features_list:\n logger.info(f\"Calculating venue={self.name} feature={f}\")\n d['features'][f] = self.calculate_feature_over_time(f, start_year, end_year)\n return d\n\n @property\n def venue_id(self):\n return self._venue_id\n\n @property\n def venue_type(self):\n return self._venue_type\n\n @property\n def name(self):\n return self._name\n\n @property\n def features_dict(self):\n \"\"\"\n Calcualtes the venue features dict over time using the features in the VENUE_FEATURES_LIST\n :return: dict with the venue features over time\n \"\"\"\n start_year = self.min_publication_year\n end_year = self.max_publication_year\n\n if not self._features: # dict is empty\n self._features = self._calculate_venue_features_over_time(self.VENUE_FEATURES_LIST,\n start_year=start_year, end_year=end_year)\n\n return self._features\n\n\nif __name__ == \"__main__\":\n\n min_ref_number = 5\n min_journal_papers_num = 100\n sf = VENUE_FETCHER.get_valid_venues_papers_ids_sframe_from_mag(min_ref_number=min_ref_number,\n min_journal_papers_num=min_journal_papers_num)\n sf = sf.sort(\"Count\")\n for d in sf:\n j_id = d['Journal ID mapped to venue name']\n path = \"/data/journals/%s.json\" % j_id\n n = d['Journal name'].replace('ieee', 'IEEE').replace('acm', 'ACM').title()\n logger.info(f'Getting {j_id} ({n}) venue features')\n if os.path.isfile(path):\n continue\n try:\n papers_data_list = PAPERS_FETCHER.get_journal_papers_data(j_id)\n papers_list = [Paper(j['Paper ID'], json_data=j) for j in papers_data_list]\n papers_list = [p for p in papers_list if p.references_count >= min_ref_number]\n\n logger.info(f'Created {len(papers_list)} paper objects for {j_id} venue')\n v = Venue(venue_id=j_id, venue_name=n, papers_list=papers_list,\n papers_filter_func=lambda p: p.publish_year >= 2015)\n f = v.features_dict\n json.dump(f, open(path, \"w\"))\n except Exception as e:\n logger.error(f\"Failed to get features of {j_id}\\n - {e.message}\\n\\n{traceback.format_exc()}\")\n", "id": "2978153", "language": "Python", "matching_score": 4.240727424621582, "max_stars_count": 1, "path": "ScienceDynamics/venue.py" }, { "content": "from ScienceDynamics.config.fetch_config import FIELDS_OF_STUDY_FETCHER\nfrom ScienceDynamics.config.log_config import logger\nfrom ScienceDynamics.papers_collection_analyer import PapersCollection\nimport turicreate as tc\n\n\nclass FieldOfStudy(PapersCollection):\n \"\"\"Calls for analyzing field of study papers \"\"\"\n\n FIELD_FEATURES_LIST = ('papers_number', 'authors_number', 'authors_average_age', 'authors_median_age',\n 'first_authors_average_age', 'first_authors_median_age',\n 'last_authors_average_age', 'last_authors_median_age',\n \"first_avg_female_probability\", \"last_avg_female_probability\", \"top_keywords\",\n \"papers_average_length\"\n )\n\n def __init__(self, field_id, papers_filter_func=None, field_of_study_fetcher=FIELDS_OF_STUDY_FETCHER):\n \"\"\"\n Construct a FieldOfStudyAnalyzer object\n :param field_id: field of study id\n :param papers_filter_func: paper filter function\n :param field_of_study_fetcher: FieldOfStudyFetcher object (optional)\n \"\"\"\n self._id = field_id\n self._field_of_study_fetcher = field_of_study_fetcher\n self._name = self._field_of_study_fetcher.get_field_name(field_id)\n self._level = self._field_of_study_fetcher.get_field_level(field_id)\n\n paper_ids = self._field_of_study_fetcher.get_field_paper_ids(field_id)\n\n super(FieldOfStudy, self).__init__(papers_ids=paper_ids, papers_filter_func=papers_filter_func)\n if len(self.papers_ids) > self.MAX_PAPERS:\n logger.warn(\n \"There are high number of papers -- %s -- in field %s (%s) , please consider to use better filter func for optimal results\" % (\n self.papers_number, self._name, field_id))\n\n def features_dict(self, cited_max_year=2015, add_field_features_over_time=False):\n \"\"\"\n Get's field features as dict object which includes the field meta information including the most cited papers\n in each year\n :param add_field_features_over_time:\n :param cited_max_year:\n :return: dict with the field of study information including the most cited papers in each year\n :rtype: dict\n \"\"\"\n start_year = self.min_publication_year\n end_year = self.max_publication_year\n d = {\"field_id\": self._id, \"name\": self.name, \"level\": self.level, \"papers_number\": len(self.papers_ids),\n \"start_year\": start_year, \"end_year\": end_year, \"features\": {}}\n p = self.max_citations_paper(self.max_publication_year, include_self_citations=True)\n d[\"max_cited_paper\"] = {\"year\": p.publish_year, \"title\": p.title,\n \"citation_number\": p.total_citations_number_by_year(cited_max_year,\n include_self_citation=True),\n \"venue_name\": p.venue_name,\n \"venue_tyoe\": str(p.venue_type)}\n\n if add_field_features_over_time:\n for f in self.FIELD_FEATURES_LIST:\n d[\"features\"][f] = self.calculate_feature_over_time(f, start_year, end_year)\n\n return d\n\n def print_fields_features(self):\n d = self.features_dict()\n print(f\"Field ID {d['field_id']}: \")\n print(f\"Field Name {d['name']}: \")\n print(f\"Field Level {d['level']}: \")\n print(f\"Field Papers Number {d['papers_number']}\")\n sf = tc.SFrame(d[\"yearly_most_cited_papers\"])\n sf = sf.sort(\"year\", ascending=False)\n sf.print_rows(len(sf))\n\n @property\n def name(self):\n return self._name\n\n @property\n def level(self):\n return self._level\n", "id": "3479980", "language": "Python", "matching_score": 2.2230260372161865, "max_stars_count": 1, "path": "ScienceDynamics/field_of_study.py" }, { "content": "import networkx as nx\nimport turicreate as tc\nfrom functools import lru_cache\n\n\nclass FieldsHierarchyAnalyzer(object):\n def __init__(self, mag, min_confidence=0.8):\n self._mag = mag\n self._g = self.create_fields_of_study_graph(min_confidence)\n\n def is_field_in_level(self, field_id, level):\n return level in self._g.nodes[field_id]['levels']\n\n def get_field_levels(self, field_id):\n return self._g.nodes[field_id]['levels']\n\n def get_field_name(self, field_id):\n if not self._g.has_node(field_id):\n return None\n return self._g.nodes[field_id]['Name']\n\n @lru_cache(maxsize=1000000)\n def get_parents_field_of_study(self, field_id, parent_level):\n if not self._g.has_node(field_id):\n return set()\n\n levels = self.get_field_levels(field_id)\n if parent_level in levels:\n return {field_id}\n\n if not FieldsHierarchyAnalyzer.is_higher_level(levels, parent_level):\n return set()\n\n ids = set()\n for n in self._g.predecessors(field_id):\n ids |= self.get_parents_field_of_study(n, parent_level)\n\n return ids\n\n def create_fields_of_study_graph(self, min_confidence=0.8):\n g = nx.DiGraph()\n h_sf = self._mag.field_of_study_children\n f_sf = self._mag.fields_of_study[[\"FieldOfStudyId\",\"DisplayName\",\"Level\"]]\n h_sf = h_sf.join(f_sf, on={'FieldOfStudyId': 'FieldOfStudyId'}, how='left')\n h_sf = h_sf.rename({'DisplayName': 'Parent field of study name', \"Level\":\"PLevel\"})\n h_sf = h_sf.join(f_sf, on={'ChildFieldOfStudyId': 'FieldOfStudyId'}, how='left')\n h_sf = h_sf.rename({'DisplayName': 'Child field of study name',\"Level\":\"CLevel\"})\n\n for r in h_sf:\n v = r['FieldOfStudyId']\n u = r['ChildFieldOfStudyId']\n g.add_edge(v, u)\n if 'levels' not in g.nodes[v]:\n g.nodes[v]['levels'] = set()\n l = int(r['PLevel'])\n g.nodes[v]['levels'].add(l)\n g.nodes[v]['Name'] = r['Parent field of study name']\n\n if 'levels' not in g.nodes[u]:\n g.nodes[u]['levels'] = set()\n l = int(r['CLevel'])\n g.nodes[u]['levels'].add(l)\n g.nodes[u]['Name'] = r['Child field of study name']\n\n return g\n\n @staticmethod\n def is_higher_level(levels_list, search_level):\n l = [i for i in list(levels_list) if i > search_level]\n return len(l) > 0\n", "id": "5655405", "language": "Python", "matching_score": 2.89391827583313, "max_stars_count": 1, "path": "ScienceDynamics/sframe_creators/fields_of_study_hieararchy_analyzer.py" }, { "content": "from ScienceDynamics.sframe_creators.fields_of_study_hieararchy_analyzer import FieldsHierarchyAnalyzer\n\n\nclass FieldsOfStudyAnalyzer(object):\n def __init__(self):\n self._fh = None\n\n def get_fields_of_study_dict(self, papers_features_sframe):\n d = {}\n\n for flevel in range(3):\n col_name = f\"Fields of study parent list (L{flevel}s)\"\n for r in papers_features_sframe:\n p_id = r['Paper ID']\n f_list = r[col_name]\n if f_list is None or len(f_list) == 0:\n continue\n for f in f_list:\n if f not in d:\n d[f] = {'name': self.fields_hierarchy_analyzer.get_field_name(f),\n 'levels': {flevel},\n 'papers_ids': []\n }\n d[f]['papers_ids'].append(p_id)\n d[f]['levels'].add(flevel)\n return d\n\n @property\n def fields_hierarchy_analyzer(self):\n if self._fh is None:\n self._fh = FieldsHierarchyAnalyzer()\n return self._fh\n", "id": "6357729", "language": "Python", "matching_score": 0.328688383102417, "max_stars_count": 1, "path": "ScienceDynamics/sframe_creators/fields_of_study_analyzer.py" }, { "content": "import networkx as nx\nfrom subs2network.consts import ROLES_GRAPH, ACTORS_GRAPH, IMDB_RATING, VIDEO_NAME, MOVIE_YEAR\nimport numpy as np\nfrom subs2network.utils import add_prefix_to_dict_keys\n\nfrom subs2network.subtitle_fetcher import SubtitleFetcher\nfrom subs2network.subtitle_analyzer import SubtitleAnalyzer\nfrom subs2network.utils import get_movie_obj\n\n\nclass VideoSnAnalyzer(object):\n def __init__(self, video_name, entities_links_dict, video_rating=0):\n self._entities_dict = entities_links_dict\n self._video_name = video_name\n self._video_rating = video_rating\n\n def construct_social_network_graph(self, graph_type=ROLES_GRAPH, min_weight=2):\n if graph_type == ROLES_GRAPH:\n g = self._entities_dict[1]\n elif graph_type == ACTORS_GRAPH:\n g = self._entities_dict[0]\n else:\n raise Exception(\"Unsupported graph type %s\" % graph_type)\n\n g = g.edge_subgraph([(u, v) for (u, v, d) in g.edges(data=True) if d['weight'] >= min_weight])\n\n g.graph[IMDB_RATING] = self.video_rating\n g.graph[VIDEO_NAME] = self._video_name\n return g\n\n @staticmethod\n def get_features_dict(g, calculate_edges_features=False):\n if len(g.edges()) == 0:\n return None\n d = {\"edges_number\": len(g.edges()), \"nodes_number\": len(g.nodes())}\n\n d.update(add_prefix_to_dict_keys(nx.degree(g, g.nodes()), \"degree\"))\n d.update(add_prefix_to_dict_keys(nx.closeness_centrality(g), \"closeness\"))\n d.update(add_prefix_to_dict_keys(nx.pagerank(g), \"pagerank\"))\n d.update(add_prefix_to_dict_keys(nx.betweenness_centrality(g), \"betweenness\"))\n d.update(add_prefix_to_dict_keys(VideoSnAnalyzer.get_nodes_average_weights(g), \"avg-weight\"))\n\n if calculate_edges_features:\n for v, u in g.edges():\n d[f\"{v}_{u}_weight\"] = g.adj[v][u][\"weight\"]\n edge_weights = nx.get_edge_attributes(g, \"weight\")\n d[\"average_edge_weight\"] = np.average(list(edge_weights.values()))\n d[\"max_edge_weight\"] = max(edge_weights.values())\n if VIDEO_NAME in g.graph:\n d[VIDEO_NAME] = g.graph[VIDEO_NAME]\n if IMDB_RATING in g.graph:\n d[IMDB_RATING] = g.graph[IMDB_RATING]\n if MOVIE_YEAR in g.graph:\n d[MOVIE_YEAR] = g.graph[MOVIE_YEAR]\n\n return d\n\n @staticmethod\n def get_nodes_average_weights(g):\n d = {}\n for u in g.nodes():\n w = []\n for v in g.neighbors(u):\n w.append(g.adj[u][v][\"weight\"])\n d[u] = np.average(w)\n return d\n\n @property\n def video_rating(self):\n return self._video_rating\n\n\nif __name__ == \"__main__\":\n video_name = \"<NAME>\"\n movie = get_movie_obj(video_name, \"<NAME>\", 1999, \"0133093\")\n sf = SubtitleFetcher(movie)\n d = sf.fetch_subtitle(\"../temp\")\n sa = SubtitleAnalyzer(d, use_top_k_roles=20)\n e = sa.get_subtitles_entities_links(60)\n va = VideoSnAnalyzer(video_name, e)\n g = va.construct_social_network_graph(ROLES_GRAPH)\n print(nx.info(g))\n g = va.construct_social_network_graph(ACTORS_GRAPH)\n print(nx.info(g))\n print(va.get_features_dict(g))\n", "id": "12575079", "language": "Python", "matching_score": 3.0933570861816406, "max_stars_count": 1, "path": "subs2network/video_sn_analyzer.py" }, { "content": "import glob\nimport os\nfrom networkx.readwrite import json_graph\nimport json\nimport networkx as nx\nimport pandas as pd\n\nfrom subs2network.utils import add_prefix_to_dict_keys\nfrom subs2network.imdb_dataset import imdb_data\nfrom subs2network.consts import MOVIE_YEAR\n\n\ndef get_node_features(g):\n closeness = nx.closeness_centrality(g)\n betweenness = nx.betweenness_centrality(g)\n betweenness_weight = nx.betweenness_centrality(g, weight=\"weight\")\n degree_centrality = nx.degree_centrality(g)\n pr = nx.pagerank(g, weight=None)\n pr_weight = nx.pagerank(g, weight=\"weight\")\n clustering = nx.clustering(g)\n for v in g.nodes():\n res = {\"total_weight\": g.degree(v, weight=\"weight\"), \"degree\": g.degree(v), \"movie_name\": g.graph[\"movie_name\"],\n \"year\": g.graph[\"movie_year\"], \"imdb_rating\": g.graph[\"imdb_rating\"], \"closeness\": closeness[v],\n \"betweenness_weight\": betweenness_weight[v], \"betweenness\": betweenness[v],\n \"degree_centrality\": degree_centrality[v], \"clustering\": clustering[v], \"pagerank\": pr[v],\n \"pr_weight\": pr_weight[v], \"gender\": imdb_data.get_actor_gender(v), \"name\": v}\n yield res\n\n\ndef get_actor_features(g, actor):\n res = {}\n closeness = nx.closeness_centrality(g)\n betweenness = nx.betweenness_centrality(g)\n betweenness_weight = nx.betweenness_centrality(g, weight=\"weight\")\n degree_centrality = nx.degree_centrality(g)\n clustering = nx.clustering(g)\n pr = nx.pagerank(g, weight=None)\n pr_weight = nx.pagerank(g)\n\n v = actor\n res[\"total_weight\"] = g.degree(v, weight=\"weight\")\n res[\"degree\"] = g.degree(v)\n res[\"closeness\"] = closeness[v]\n res[\"betweenness\"] = betweenness[v]\n res[\"betweenness_weight\"] = betweenness_weight[v]\n res[\"degree_centrality\"] = degree_centrality[v]\n res[\"clustering\"] = clustering[v]\n res[\"movie_rating\"] = g.graph[\"imdb_rating\"]\n res[\"pagerank\"] = pr[v]\n res[\"pagerank_weight\"] = pr_weight[v]\n\n # res[\"gender\"] = imdb_data.get_actor_gender(v)\n return res\n\n\ndef average_graph_weight(g):\n stats = pd.Series(list(nx.get_edge_attributes(g, \"weight\").values())).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"weight\")\n\n\ndef average_graph_degree(g):\n stats = pd.Series([d for n, d in nx.degree(g, g.nodes())]).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"degree\")\n\n\ndef average_actor_appearance(g):\n stats = pd.Series([g.degree(v, weight=\"weight\") for v in g.nodes()]).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"appearance\")\n\n\ndef average_closeness_centrality(g):\n stats = pd.Series(list(nx.closeness_centrality(g).values())).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"closeness\")\n\n\ndef average_eigenvector_centrality(g):\n stats = pd.Series(list(nx.eigenvector_centrality(g).values())).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"eigenvector\")\n\n\ndef average_betweenness_centrality(g):\n stats = pd.Series(list(nx.betweenness_centrality(g).values())).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"betweenness\")\n\n\ndef average_pagerank(g):\n stats = pd.Series(list(nx.nx.pagerank(g, weight=None).values())).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"pagerank\")\n\n\ndef average_weighted_pagerank(g):\n stats = pd.Series(list(nx.nx.pagerank(g, weight=\"weight\").values())).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"weighted_pagerank\")\n\n\ndef average_weighted_betweenness_centrality(g):\n stats = pd.Series(list(nx.betweenness_centrality(g).values())).describe()\n del stats[\"count\"]\n return add_prefix_to_dict_keys(stats.to_dict(), \"weighted_betweenness\")\n\n\ndef average_clustering(g):\n try:\n return {\"average_clustering\": nx.average_clustering(g)}\n except:\n return {\"average_clustering\": 0}\n\n\ndef average_weighted_clustering(g):\n try:\n return {\"average_weighted_clustering\": nx.average_clustering(g, weight=\"weight\")}\n except:\n return {\"average_clustering\": 0}\n\n\ndef graph_clique_number(g):\n return {\"clique_number\": nx.graph_clique_number(g)}\n\n\ndef average_degree_connectivity(g):\n return {\"average_degree_connectivity\": nx.average_degree_connectivity(g)}\n\n\ndef get_edge_number(g):\n return {\"edge_number\": len(g.edges)}\n\n\ndef get_node_number(g):\n return {\"node_number\": len(g.node)}\n\n\ndef analyze_movies():\n p = \"../temp/movies/\"\n res = []\n for movie in tqdm(os.listdir(p)):\n path = os.path.join(p, movie)\n g_pth = os.path.join(path, f\"json/{movie}.json\")\n if not os.path.exists(g_pth):\n g_pth = glob.glob(os.path.join(path, f\"json/*.json\"))\n if g_pth:\n g_pth = g_pth[0]\n if g_pth:\n # try:\n with open(g_pth) as f:\n g = json_graph.node_link_graph(json.load(f))\n if g.number_of_nodes() == 0:\n continue\n d = extract_graph_features(g)\n #\n # with open(os.path.join(path, f\"{movie}.json\")) as f:\n # movie_info = json.load(f)\n # d.update(json.loads(movie_info))\n res.append(d)\n # except:\n # pass\n # else:\n # print(movie)\n pd.DataFrame(res).to_csv(f\"../temp//graph_features.csv\", index=False)\n\n\ndef analyze_directors():\n p = \"../temp/directors/\"\n for director in os.listdir(p):\n res = []\n json_path = os.path.join(p, director, \"json\")\n graphs = []\n for g_pth in glob.glob(os.path.join(json_path, f\"*roles*\")):\n\n if g_pth:\n try:\n with open(g_pth) as f:\n g = json_graph.node_link_graph(json.load(f))\n d = extract_graph_features(g)\n d.update({\"rating\": g.graph[\"imdb_rating\"], \"year\": g.graph[\"movie_year\"],\n \"name\": g.graph[\"movie_name\"]})\n graphs.append(g)\n res.append(d)\n except:\n pass\n if graphs:\n joined_grpah = nx.compose_all(graphs)\n d = extract_graph_features(joined_grpah)\n d[\"name\"] = \"combined\"\n res.append(d)\n pd.DataFrame(res).to_csv(f\"../temp/output/{director}.csv\", index=False)\n\n\ndef get_triangles(g):\n all_cliques = nx.enumerate_all_cliques(g)\n return [x for x in all_cliques if len(x) == 3]\n\n\ndef analyze_triangles():\n p = \"../temp/movies/\"\n res = []\n json_path = os.path.join(p, \"*\", \"json\")\n for g_pth in tqdm(glob.glob(os.path.join(json_path, f\"*roles.json\"))):\n if g_pth:\n with open(g_pth) as f:\n g = json_graph.node_link_graph(json.load(f))\n tr = get_triangles(g)\n for t in tr:\n t.append(g.graph[\"movie_name\"].replace(\" - roles\", \"\"))\n t.append(g.graph[\"movie_year\"])\n res += tr\n\n pd.DataFrame(res).to_csv(f\"../temp/triangles.csv\")\n\n\ndef analyze_genders():\n p = \"../temp/movies/\"\n res = []\n json_path = os.path.join(p, \"*\", \"json\")\n for g_pth in tqdm(glob.glob(os.path.join(json_path, f\"*roles.json\"))):\n\n if g_pth:\n with open(g_pth) as f:\n g = json_graph.node_link_graph(json.load(f))\n if g.number_of_nodes() > 5:\n d = get_node_features(g)\n res += list(d)\n\n pd.DataFrame(res).to_csv(f\"../temp/gender.csv\")\n\n\ndef extract_graph_features(g):\n d = {}\n d.update(get_edge_number(g))\n d.update(get_node_number(g))\n d.update(average_actor_appearance(g))\n d.update(average_closeness_centrality(g))\n d.update(average_clustering(g))\n d.update(average_weighted_clustering(g))\n d.update(average_betweenness_centrality(g))\n d.update(average_weighted_betweenness_centrality(g))\n # d.update(average_eigenvector_centrality(g))\n d.update(average_pagerank(g))\n d.update(average_weighted_pagerank(g))\n d.update(average_graph_degree(g))\n d.update(average_graph_weight(g))\n d.update(average_clustering(g))\n d.update(graph_clique_number(g))\n genders = get_genders_in_graph(g)\n d[\"m_count\"] = genders.count(\"M\")\n d[\"f_count\"] = genders.count(\"F\")\n d[\"movie_name\"] = g.graph[\"movie_name\"].replace(\" - roles\", \"\")\n d[\"year\"] = g.graph[\"movie_year\"]\n d[\"imdb_rating\"] = g.graph[\"imdb_rating\"]\n return d\n\n\nfrom tqdm import tqdm\n\n\ndef create_pdf():\n from PIL import Image\n from PIL import ImageFont\n from PIL import ImageDraw\n # imagelist is the list with all image filenames\n p = \"../temp/movies/\"\n res = []\n\n for i, movie in enumerate(tqdm(os.listdir(p), total=len(os.listdir(p)))):\n\n path = os.path.join(p, movie)\n image = glob.glob(os.path.join(path, f\"graphs/*({'[0-9]' * 4}).png\"))\n if image:\n img = Image.open(image[0]).convert(\"RGB\")\n draw = ImageDraw.Draw(img)\n # font = ImageFont.truetype(<font-file>, <font-size>)\n font = ImageFont.truetype(\"arial.ttf\", 50)\n # draw.text((x, y),\"Sample Text\",(r,g,b))\n draw.text((10, 10), movie, (0, 0, 0), font=font)\n res.append(img)\n\n res[0].save(\"test4.pdf\", \"PDF\", resolution=100.0, save_all=True, append_images=res[1:], quality=60, optimize=True)\n\n\ndef add_gender_to_graph(movie, is_roles=True):\n p = \"../temp/movies/\"\n json_path = os.path.join(p, movie, \"json\")\n if is_roles:\n graph_paths = glob.glob(os.path.join(json_path, f\"*roles*\"))[0]\n else:\n graph_paths = glob.glob(os.path.join(json_path, f\"*{movie}.json\"))\n imdb_data.actors_gender\n for graph_path in graph_paths:\n with open(graph_path) as f:\n g = json_graph.node_link_graph(json.load(f))\n for v in g.nodes():\n if is_roles:\n g.node[v][\"gender\"] = imdb_data.get_actor_gender(v)\n else:\n g.node[v][\"gender\"] = imdb_data.get_actor_gender(g.node[v]['role'])\n data = json_graph.node_link_data(g)\n json_path = f\"../temp/({g.graph[MOVIE_YEAR]}) - {movie}.json\"\n with open(json_path, 'w') as fp:\n json.dump(data, fp)\n\n\ndef gender_in_top_movies():\n p = \"../temp/movies/\"\n movies = imdb_data.get_movies_data()\n for m in tqdm(movies):\n movie_name = m['primaryTitle'].replace('.', '').replace('/', '')\n json_path = os.path.join(p, movie_name, \"json\")\n try:\n graph_path = glob.glob(os.path.join(json_path, f\"*roles.json\"))[0]\n with open(graph_path) as f:\n g = json_graph.node_link_graph(json.load(f))\n yield get_genders_in_graph(g)\n except IndexError:\n pass\n\n\ndef get_genders_in_graph(g):\n return [imdb_data.get_actor_gender(v) for v in g.nodes()]\n\n\nif __name__ == \"__main__\":\n # gender_in_top_movies()\n add_gender_to_graph(\"Jobs\", False)\n # analyze_triangles()\n # analyze_genders()\n # analyze_directors()\n # create_pdf()\n # analyze_movies()\n # analyze_movies()\n", "id": "1941428", "language": "Python", "matching_score": 2.4935574531555176, "max_stars_count": 1, "path": "subs2network/graph_analyzer.py" }, { "content": "import json\nimport logging\nimport os\nimport shutil\nimport sys\n\nimport networkx as nx\nimport requests\nfrom networkx.readwrite import json_graph\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail, Email, Content\nfrom subliminal import video\nfrom tqdm import tqdm\nimport glob\nfrom subs2network.consts import OUTPUT_PATH\nfrom turicreate import SFrame\n\ndef send_email(send_to, subject, mail_content):\n try:\n sg = SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))\n # make a message object\n from_email = Email(\"<EMAIL>\")\n to_email = Email(send_to)\n content = Content(\"text/plain\", mail_content)\n mail = Mail(from_email, subject, to_email, content)\n sg.client.mail.send.post(request_body=mail.get())\n except:\n return False\n return True\n\n\ndef add_prefix_to_dict_keys(d, prefix, sep=\"-\"):\n h = {}\n if type(d) is dict:\n d = d.items()\n for k, v in d:\n h[prefix + sep + k] = v\n return h\n\n\ndef get_movie_obj(name, title, year, imdb_id):\n \"\"\"\n Returns a subliminal movie object according to the movie's details\n :param name: movie's name\n :param title: the movie's title\n :param year: the year the movie was created\n :param imdb_id: the movie's IMDB id\n :return: video.Movie object\n :rtype: video.Movie\n \"\"\"\n logging.info(\"Fetching Subtitle For Movie:%s | Year: %s | IMDB ID: %s \" % (title, year, imdb_id))\n return video.Movie(name=name, title=title, year=year, imdb_id=imdb_id)\n\n\ndef get_episode_obj(video_name, series, season_num, episode_num, episode_name, imdb_id):\n \"\"\"\n Returns a subliminal TV episode object according to the episode's details\n :param imdb_id:\n :param video_name: the episode name, which usually consists of the series name and episode details\n :param series: the episode's series name\n :param season_num: the episode's season number\n :param episode_num: the episode number\n :param episode_name: the episode title\n :return: video.Episode object\n :rtype: video.Episode\n \"\"\"\n logging.info(\"Fetching Subtitle Series:%s | Season: %s | Episode Number: %s | Name: %s\" % (\n series, season_num, episode_num, episode_name))\n return video.Episode(video_name, series, season_num, episode_num, title=episode_name, series_imdb_id=imdb_id)\n\n\ndef get_lazy_episode_obj(video_name, series, season_num, episode_num, episode_name, imdb_id):\n \"\"\"\n Returns a subliminal TV episode object according to the episode's details\n :param imdb_id:\n :param video_name: the episode name, which usually consists of the series name and episode details\n :param series: the episode's series name\n :param season_num: the episode's season number\n :param episode_num: the episode number\n :param episode_name: the episode title\n :return: video.Episode object\n :rtype: video.Episode\n \"\"\"\n yield get_episode_obj(video_name, series, season_num, episode_num, episode_name, imdb_id)\n\n\ndef download_file(url, output_path, exist_overwrite, min_size=0, verbose=True):\n # Todo handle requests.exceptions.ConnectionError\n if exist_overwrite or not os.path.exists(output_path):\n r = requests.get(url, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n size_read = 0\n if total_size - min_size > 0:\n with tqdm(\n total=total_size,\n unit='B',\n unit_scale=True,\n unit_divisor=1024,\n disable=not verbose\n ) as pbar:\n with open(output_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n size_read = min(total_size, size_read + 1024)\n pbar.update(len(chunk))\n\n\ndef to_iterable(item):\n if item is None: # include all nodes via iterator\n item = []\n elif not hasattr(item, \"__iter__\") or isinstance(item, str): # if vertices is a single node\n item = [item] # ?iter()\n return item\n\n\ndef delete_movies_results(p):\n for movie in os.listdir(p):\n path = os.path.join(p, movie)\n if glob.glob(os.path.join(path, f\"subtitles/*.srt\")):\n try:\n os.remove(glob.glob(os.path.join(path, f\"subtitles/{movie}*roles.pkl\"))[0])\n os.remove(os.path.join(path, f\"{movie}.json\"))\n except FileNotFoundError:\n pass\n shutil.rmtree(os.path.join(path, \"json\"), ignore_errors=True)\n shutil.rmtree(os.path.join(path, \"graphs\"), ignore_errors=True)\n shutil.rmtree(os.path.join(path, \"csv\"), ignore_errors=True)\n\n\n#f\"{DOWNLOAD_PATH}/movies/*/subtitles/*.srt\"\ndef copy_files(src_regex, dst):\n for movie in glob.glob(src_regex):\n shutil.copyfile(movie, os.path.join(dst, os.path.basename(movie)))\n\n\ndef convert_json_to_gfx(json_path_iter):\n for json_path in json_path_iter:\n with open(json_path) as f:\n g = json_graph.node_link_graph(json.load(f))\n nx.write_graphml(g, f\"{json_path.split('.')[0]}.graphml\")\n\n\ndef combine_json_graphs(json_path_iter):\n graphs = []\n for json_path in json_path_iter:\n with open(json_path) as f:\n graphs.append(json_graph.node_link_graph(json.load(f)))\n joined_graph = nx.compose_all(graphs)\n nx.write_graphml(joined_graph, f\"joined_graph.graphml\")\n\n\ndef combine_distinct_json_graphs(json_path_iter):\n graphs = []\n c = 0\n sf = SFrame.read_json('starwars.json')\n for json_path in json_path_iter:\n with open(json_path) as f:\n g = json_graph.node_link_graph(json.load(f))\n for v in g.nodes:\n g.node[v][\"year\"] = str(g.graph[\"movie_year\"])\n try:\n g.node[v][\"image_url\"] = sf[sf[\"title\"] == v][\"base_images\"][0]['desktop']['ratio_1x1']\n except IndexError:\n g.node[v][\"image_url\"] = \"\"\n g = nx.convert_node_labels_to_integers(g, first_label=c, label_attribute=\"name\")\n c += len(g.nodes)\n graphs.append(g)\n joined_graph = nx.compose_all(graphs)\n nx.write_graphml(joined_graph, f\"joined_graph.graphml\")\n\n# starw = glob.glob(\"/Users/dimakagan/Projects/subs2network/output/star/d3/*.json\")\n# combine_distinct_json_graphs(starw)\n", "id": "3469354", "language": "Python", "matching_score": 4.1858906745910645, "max_stars_count": 1, "path": "subs2network/utils.py" }, { "content": "import logging\nimport os\nimport pickle\nimport types\nimport glob\nimport babelfish\nfrom guessit.api import GuessitException\nfrom subliminal import video, download_best_subtitles, save_subtitles, region\n\nfrom subs2network.consts import IMDB_ID, VIDEO_NAME, SUBTITLE_PATH, ROLES_PATH, OUTPUT_PATH\nfrom subs2network.exceptions import SubtitleNotFound\nfrom subs2network.utils import get_movie_obj\n\n\nclass SubtitleFetcher(object):\n region.configure('dogpile.cache.dbm', arguments={'filename': f'{OUTPUT_PATH}/cachefile.dbm'})\n\n \"\"\"\n Responsible for fetching the subtitle information including metadata from the open subtitles websites or from\n analyzed local files\n \"\"\"\n\n def __init__(self, video_obj, lang=babelfish.Language(\"eng\")):\n \"\"\"\n Class constructor which receives as input a video object of a movie or TV series episode and the language of the\n video\n :param video_obj: video object that contains a movie's or TV series episode's details\n :param lang: the language of the video as babelfish object\n :return: None\n \"\"\"\n self._video_obj = video_obj\n self._lang = lang\n\n def load_video_obj(self):\n if isinstance(self._video_obj, types.GeneratorType):\n self._video_obj = next(self._video_obj)\n\n def fetch_subtitle(self, path):\n \"\"\"\n Fetch the subtitle using subliminal or from local file\n :param path: the file path to save the subtitle or to load the subtitle details from\n :return:\n :rtype: dict\n \"\"\"\n\n p = path + os.path.sep + self.get_video_string() + \".pkl\"\n if not os.path.isfile(p) or not glob.glob(f\"{path}/{self._video_obj.name}*.srt\"):\n self.load_video_obj()\n logging.debug(\"Fetching %s's best matched subtitle\" % self.get_video_string())\n # This download the best subtitle as SRT file to the current directory\n try:\n subtitle = download_best_subtitles({self._video_obj}, {self._lang}, hearing_impaired=True)\n subtitle = subtitle[self._video_obj]\n except GuessitException:\n subtitle = []\n if not subtitle:\n raise SubtitleNotFound\n save_subtitles(self._video_obj, subtitle, encoding='utf-8', directory=path)\n self._save_subtitle_info_dict(path)\n logging.debug(\"Loading %s metadata from %s\" % (self.get_video_string(), p))\n with open(p, \"rb\") as f:\n # os.chdir(owd)\n return pickle.load(f) # test if the subtitle object is loadable\n\n def _get_subtitle_srt_path(self, search_path):\n \"\"\"\n Trys to find video's subtitle in the search path\n :param search_path: path for searching the video's subtitle\n :return: path to the video's subtitles or None\n :rtype: str\n \"\"\"\n if self.is_episode:\n for p in os.listdir(search_path):\n for e in self.episode_details_strings():\n if e.lower() in p.lower() and \".srt\" in p.lower():\n return search_path + os.path.sep + p\n elif self.is_movie:\n movie_name = self._video_obj.name.lower()\n for p in os.listdir(search_path):\n if movie_name in p.lower() and \".srt\" in p.lower():\n return search_path + os.path.sep + p\n return None\n\n def _save_subtitle_info_dict(self, path):\n \"\"\"\n save subtitle's metadata as a dict object to a file using pickle\n :param path: the path to save the subtitle's metadata dict using cPcikle\n \"\"\"\n\n p = path + os.path.sep + self.get_video_string() + \".pkl\"\n roles_path = path + os.path.sep + self.get_video_string() + \"roles.pkl\"\n try:\n d = {VIDEO_NAME: self._video_obj.name, IMDB_ID: self._video_obj.imdb_id,\n SUBTITLE_PATH: self._get_subtitle_srt_path(path), ROLES_PATH: roles_path}\n except AttributeError:\n d = {VIDEO_NAME: self._video_obj.name, IMDB_ID: self._video_obj.series_imdb_id,\n SUBTITLE_PATH: self._get_subtitle_srt_path(path), ROLES_PATH: roles_path}\n\n logging.debug(f\"Saving {self.get_video_string()}'s metadata to {p}\")\n with open(p, \"wb\") as f:\n pickle.dump(d, f)\n\n def get_video_string(self):\n \"\"\"\n Return the video's representing name name\n :return: string with the video's representing name\n :rtype: str\n \"\"\"\n if self.is_episode:\n return f\"{self._video_obj.series} {self.episode_details_strings()[1]}\"\n if self.is_movie:\n return self._video_obj.name\n raise Exception(\"Unsupported video type\")\n\n def episode_details_strings(self):\n \"\"\"\n In many case the downloaded subtitle file may contain various versions of the episodes season's & episode's names.\n This function return a list with most common episode's & season's name\n :return: list of strings with the most common season & episode names\n :rtype: list of [str]\n \"\"\"\n episode_name_list = []\n\n if self.is_episode:\n episode_name_list.append(f\"S0{self._video_obj.season}E0{self._video_obj.episode}\")\n episode_name_list.append(f\"S{self._video_obj.season}E{self._video_obj.episode}\")\n e = \"\"\n if self._video_obj.season < 10:\n e += f\"S0{self._video_obj.season}\"\n else:\n e += f\"S{self._video_obj.season}\"\n if self._video_obj.episode < 10:\n e += f\"E0{self._video_obj.episode}\"\n else:\n e += f\"E{self._video_obj.episode}\"\n episode_name_list.append(e)\n e = f\"S0{self._video_obj.season}\"\n if self._video_obj.episode < 10:\n e += f\"E0{self._video_obj.episode}\"\n else:\n e += f\"E{self._video_obj.episode}\"\n episode_name_list.append(e)\n return episode_name_list\n\n @property\n def is_episode(self):\n \"\"\"\n Is video TV series episode?\n :return: True if the video is TV series episode or False otherwise\n :rtype: bool\n \"\"\"\n return type(self._video_obj) is video.Episode\n\n @property\n def is_movie(self):\n \"\"\"\n Is movie object?\n :return: True if the video is a movie or false otherwise\n :rtype: bool\n \"\"\"\n return type(self._video_obj) is video.Movie\n\n @staticmethod\n def get_episode_obj(video_name, series, season_num, episode_num, episode_name, imdb_id):\n \"\"\"\n Returns a subliminal TV episode object according to the episode's details\n :param imdb_id:\n :param video_name: the episode name, which usually consists of the series name and episode details\n :param series: the episode's series name\n :param season_num: the episode's season number\n :param episode_num: the episode number\n :param episode_name: the episode title\n :return: video.Episode object\n :rtype: video.Episode\n \"\"\"\n logging.info(\"Fetching Subtitle Series:%s | Season: %s | Episode Number: %s | Name: %s\" % (\n series, season_num, episode_num, episode_name))\n return video.Episode(video_name, series, season_num, episode_num, title=episode_name, series_imdb_id=imdb_id)\n\n\nif __name__ == \"__main__\":\n movie = get_movie_obj(\"Kill Bill: Vol 2\", \"Kill Bill: Vol. 2\", 2004, \"0378194\")\n sf = SubtitleFetcher(movie)\n sf.fetch_subtitle(\"../temp\")\n", "id": "9639360", "language": "Python", "matching_score": 2.680440902709961, "max_stars_count": 1, "path": "subs2network/subtitle_fetcher.py" }, { "content": "import logging\nimport os\nimport re\nfrom collections import Counter, defaultdict\nfrom itertools import chain\n\nimport networkx as nx\nimport pysrt\nimport spacy\nfrom nltk.tag import StanfordNERTagger\nfrom nltk.tokenize import word_tokenize\n\nfrom subs2network.consts import IMDB_ID, SUBTITLE_PATH, ROLES_PATH, IMDB_NAME, STANFORD_NLP_JAR, STANFORD_NLP_MODEL, \\\n OUTPUT_PATH, STANFORD_NLP_JAR_URL, STANFORD_NLP_MODEL_URL\nfrom subs2network.exceptions import SubtitleNotFound\nfrom subs2network.subtitle_fetcher import SubtitleFetcher\nfrom subs2network.utils import get_movie_obj, download_file\nfrom subs2network.video_roles_analyzer import VideoRolesAnalyzer\n\n\nclass RemoveControlChars(object):\n\n def __init__(self):\n # or equivalently and much more efficiently\n control_chars = ''.join(map(chr, chain(range(0, 32), range(127, 160))))\n self.control_char_re = re.compile('[%s]' % re.escape(control_chars))\n\n def remove_control_chars(self, s):\n return self.control_char_re.sub('', s)\n\n\nclass SubtitleAnalyzer(object):\n \"\"\"\n Fetch and analyze subtitle of a movie and use it to construct the connection between the movie various roles\n \"\"\"\n\n def __init__(self, subtitle_info_dict, use_top_k_roles=None, ignore_roles_names=None):\n \"\"\"\n Construct the SubtitleAnalyzer and create the video's role time line based\n :param subtitle_info_dict: dict with the video metadata created by the SubtitleFetcher class\n :param use_top_k_roles: use only the top K roles when constructing the movie? (None - to use all roles)\n :param ignore_roles_names: list of roles name to ignore\n\n \"\"\"\n self._roles = defaultdict(lambda: {\"role\": None, \"first\": 0, \"last\": 0})\n self._interactions = {}\n if ignore_roles_names is None:\n ignore_roles_names = set()\n download_file(STANFORD_NLP_JAR_URL, STANFORD_NLP_JAR, False)\n download_file(STANFORD_NLP_MODEL_URL, STANFORD_NLP_MODEL, False)\n\n if not os.path.exists(subtitle_info_dict[SUBTITLE_PATH]):\n subtitle_info_dict[ROLES_PATH] = OUTPUT_PATH + subtitle_info_dict[ROLES_PATH].split(\"temp\")[1]\n subtitle_info_dict[SUBTITLE_PATH] = OUTPUT_PATH + subtitle_info_dict[SUBTITLE_PATH].split(\"temp\")[1]\n\n imdb_id = subtitle_info_dict[IMDB_ID].strip('t')\n self._video_role_analyzer = VideoRolesAnalyzer(imdb_id, use_top_k_roles, ignore_roles_names,\n subtitle_info_dict[ROLES_PATH])\n\n subtitle_srt_path = subtitle_info_dict[SUBTITLE_PATH]\n\n self._subs_entities_timeline_dict = self.create_video_roles_timeline(subtitle_srt_path)\n\n def create_video_roles_timeline(self, subtitle_path):\n if subtitle_path is None:\n raise SubtitleNotFound(f\"Could not find video's subtitle in path: {subtitle_path}\")\n subs = pysrt.open(subtitle_path)\n subs_entities_timeline_dict = {}\n\n re_brackets_split = re.compile(r\"(\\[.*?\\]|.*?:|^\\(.*?\\)$)\")\n # (\\[(.* ?)\\] | (.* ?)\\: | ^ \\((.* ?)\\)$)\n cc = RemoveControlChars()\n subs_clean = [cc.remove_control_chars(s.text.strip('-\\\\\\/').replace(\"\\n\", \" \")) for s in subs]\n subs_clean = [re.sub(r'<[^<]+?>', '', s) for s in subs_clean]\n brackets = [re_brackets_split.findall(s) for s in subs_clean]\n subs_text = [word_tokenize(s) for s in subs_clean]\n st = StanfordNERTagger(STANFORD_NLP_MODEL,\n encoding='utf-8', path_to_jar=STANFORD_NLP_JAR)\n\n nlp = spacy.load('en_core_web_sm', disable=['parser', 'tagger', 'textcat'])\n entities_spacy = [[(ent.text, ent.label_) for ent in nlp(s).ents] for s in subs_clean]\n\n entities_nltk = st.tag_sents(subs_text)\n\n for s, e_n, e_s, b in zip(subs, entities_nltk, entities_spacy, brackets):\n roles = self._video_role_analyzer.find_roles_names_in_text_ner(e_n, e_s)\n for item in b:\n roles.update(self._video_role_analyzer.find_roles_names_in_text(item))\n # role_counter.update(roles)\n if len(roles) > 0:\n t = s.start.seconds + s.start.minutes * 60\n subs_entities_timeline_dict[t] = roles\n logging.debug(str(subs_entities_timeline_dict))\n return subs_entities_timeline_dict\n\n def get_subtitles_entities_links(self, timelaps_seconds):\n\n timeline = sorted(self._subs_entities_timeline_dict.items(), key=lambda x: x[0])\n graphs = [nx.Graph(), nx.Graph()]\n for i, item in enumerate(timeline):\n t1, entities1 = item\n self.update_appearances(graphs, entities1, t1)\n if len(entities1) > 1:\n edges = self._get_edges(entities1, entities1)\n self.update_interaction(graphs, edges, t1)\n for t2, entities2 in timeline[i + 1:]:\n if t2 - t1 < timelaps_seconds:\n edges = self._get_edges(entities1, entities2)\n self.update_appearances(graphs, entities1, t1)\n self.update_appearances(graphs, entities2, t2)\n self.update_interaction(graphs, edges, t2)\n\n else:\n break\n return graphs\n\n @staticmethod\n def update_appearances(graphs, roles, t):\n for i, g in enumerate(graphs):\n for role in roles:\n r = role[i][IMDB_NAME]\n if r in g.node:\n g.node[r][\"last\"] = t\n else:\n g.add_node(r, **{\"first\": t, \"last\": t, \"role\": role[1 - i][IMDB_NAME]})\n\n @staticmethod\n def update_interaction(graphs, roles, t):\n for i, g in enumerate(graphs):\n for role in roles:\n v, u = role[0][i][IMDB_NAME], role[1][i][IMDB_NAME]\n if (v, u) in g.edges:\n g.adj[v][u][\"last\"] = t\n g.adj[v][u][\"weight\"] += 1\n else:\n g.add_edge(v, u, **{\"first\": t, \"last\": t, \"weight\": 1})\n\n def _get_edges(self, l1, l2):\n edges = []\n for v1 in l1:\n for v2 in l2:\n if str(v1[1]) == str(v2[1]):\n continue\n if v1 > v2:\n v1, v2 = v2, v1\n edges.append((v1, v2))\n return edges\n\n @property\n def imdb_rating(self):\n return self._video_role_analyzer.rating()\n\n\nif __name__ == \"__main__\":\n movie = get_movie_obj(\"The Godfather\", \"The Godfather\", 1972, \"0068646\")\n sf = SubtitleFetcher(movie)\n d = sf.fetch_subtitle(\"../temp\")\n sa = SubtitleAnalyzer(d)\n G = sa.get_subtitles_entities_links(60)\n", "id": "5232523", "language": "Python", "matching_score": 3.7115859985351562, "max_stars_count": 1, "path": "subs2network/subtitle_analyzer.py" }, { "content": "import logging\nimport os\nimport pickle\nimport re\nfrom collections import defaultdict\n\nimport spacy\nimport stop_words\nimport tmdbsimple as tmdb\nfrom fuzzywuzzy import process\nfrom imdb import IMDb\nfrom nltk.corpus import names\n\nfrom subs2network.consts import IMDB_NAME, IMDB_CAST, MIN_NAME_SIZE\nfrom subs2network.exceptions import CastNotFound\nfrom subs2network.utils import to_iterable\n\n# import spacy\n\ntmdb.API_KEY = os.getenv('TMD_API_KEY')\n\n\nclass VideoRolesAnalyzer(object):\n \"\"\"\n Identifies roles in text using roles' information from IMDB\n \"\"\"\n\n def __init__(self, imdb_id, use_top_k_roles=None, ignore_roles_names=None, roles_path=None):\n \"\"\"\n Construct VideoRolesAnalyzer object which can get text and identify the characters names in the text\n :param imdb_id: imdb\n :param remove_roles_names: list of roles names to ignore when analyzing the roles dict.\n \"\"\"\n\n self._roles_dict = defaultdict(set)\n self._roles_path = None\n if roles_path is not None:\n self._roles_path = roles_path\n self.imdb_id = imdb_id\n if self._roles_path is None or not os.path.exists(self._roles_path):\n self._imdb_movie = IMDb().get_movie(imdb_id)\n with open(self._roles_path, \"wb\") as f:\n pickle.dump(self._imdb_movie, f)\n else:\n with open(self._roles_path, \"rb\") as f:\n self._imdb_movie = pickle.load(f)\n self._stop_words_english = set(stop_words.get_stop_words(\"english\")) - set([n.lower() for n in names.words()])\n self._use_top_k_roles = {}\n self._ignore_roles_names = set(ignore_roles_names)\n self._init_roles_dict(use_top_k_roles)\n\n def get_tmdb_cast(self):\n cast = {}\n external_source = 'imdb_id'\n find = tmdb.Find(f\"tt{self.imdb_id}\")\n resp = find.info(external_source=external_source)\n m_id = resp['movie_results'][0]['id']\n m = tmdb.Movies(m_id)\n\n for person in m.credits()['cast']:\n if \"uncredited\" not in person['character']:\n cast[person['name']] = person['character'].replace(\" (voice)\", \"\")\n return cast\n\n def _init_roles_dict(self, use_top_k_roles, remove_possessives=True):\n \"\"\"\n Initialize roles dict where each of the dict's key is represent a part of a unique role name and each value is\n a tuple of matching (Person, Role)\n :param use_top_k_roles: only use the top K IMDB roles\n :param remove_possessives: remove roles name which contains possessives, such as Andy's Wife\n :return:\n \"\"\"\n\n re_possessive = re.compile(r\"(\\w+\\'s\\s+\\w+|\\w+s\\'\\s+\\w+)\")\n try:\n cast_list = self._imdb_movie[IMDB_CAST]\n except KeyError:\n raise CastNotFound\n if use_top_k_roles is not None:\n cast_list = cast_list[:use_top_k_roles]\n try:\n tmdb_cast = self.get_tmdb_cast()\n except:\n tmdb_cast = {}\n for i, p in enumerate(cast_list):\n for role in to_iterable(p.currentRole):\n\n if role.notes == '(uncredited)':\n return\n if role is None or IMDB_NAME not in role.keys():\n logging.warning(\"Could not find current role for %s\" % str(p))\n else:\n if remove_possessives and len(re_possessive.findall(role[IMDB_NAME])) > 0:\n logging.info(\"Skipping role with possessive name - %s\" % role[IMDB_NAME])\n continue\n nlp = spacy.load('en_core_web_sm')\n\n doc = nlp(role[IMDB_NAME])\n adj = False\n for token in doc:\n if token.pos_ == \"ADJ\":\n adj = True\n if not adj or len(doc) == 1 or i < 4:\n if p[IMDB_NAME] in tmdb_cast:\n tmdb_role = tmdb_cast[p[IMDB_NAME]]\n if len(tmdb_role) > len(role[IMDB_NAME]):\n role[IMDB_NAME] = tmdb_role\n self._add_role_to_roles_dict(p, role)\n\n def _add_role_to_roles_dict(self, person, role):\n role_name = role[IMDB_NAME]\n if role_name in self._ignore_roles_names:\n return\n n = str(role_name).strip().lower().replace('\"', '')\n re_white_space = re.compile(r\"\\b([^\\d\\W].*?)\\b\")\n re_apost_name = re.compile(r\"^'(.*?)'$\")\n\n if re_apost_name.match(n):\n n = re_apost_name.findall(n)[0]\n\n parts = re_white_space.findall(n)\n\n for name_part in parts:\n\n if name_part == \"himself\" or name_part == \"herself\":\n self._add_role_to_roles_dict(person, person)\n continue\n\n if name_part in self._stop_words_english or len(name_part) < MIN_NAME_SIZE:\n continue\n\n if name_part.title() in self._ignore_roles_names:\n continue\n\n for part in name_part.split(\"-\"):\n if part not in self._stop_words_english or len(part) > MIN_NAME_SIZE:\n self._roles_dict[part].add((person, role))\n\n self._roles_dict[name_part].add((person, role))\n\n def find_roles_names_in_text(self, txt):\n \"\"\"\n Find matched roles in the input text\n :param txt: input text\n :return: set of matched roles in the text\n \"\"\"\n matched_roles = set()\n if not txt:\n return matched_roles\n\n txt = txt.strip().lower()\n s = \"(%s)\" % \"|\".join([fr\"\\b{r}\\b\" for r in self._roles_dict.keys()])\n\n roles_in_text = set(re.findall(s, txt))\n\n for r in roles_in_text:\n role = self.match_roles(r)\n if role:\n matched_roles.add(role)\n return matched_roles\n\n def find_roles_names_in_text_ner(self, stanford_ner, spacy_ner):\n \"\"\"\n Find matched roles in the input text\n :param spacy_ner:\n :param stanford_ner:\n :return: set of matched roles in the text\n \"\"\"\n stanford_res = self.find_roles_names_in_text_stanford_ner(stanford_ner)\n spacy_res = self.find_roles_names_in_text_spacy_ner(spacy_ner)\n return stanford_res.union(spacy_res)\n\n def find_roles_names_in_text_spacy_ner(self, classified_text):\n \"\"\"\n Find matched roles in the input text\n :param classified_text:\n :return: set of matched roles in the text\n \"\"\"\n matched_roles = set()\n\n for txt, ent_type in classified_text:\n if ent_type in {\"PERSON\", \"ORG\"}:\n role = self.match_roles(txt)\n if role:\n matched_roles.add(role)\n\n return matched_roles\n\n def match_roles(self, raw_txt):\n txt = raw_txt.lower().split()\n for n in txt:\n if n in self._roles_dict:\n if len(self._roles_dict[n]) == 1:\n return list(self._roles_dict[n])[0]\n\n choices = {role[IMDB_NAME]: (actor, role) for actor, role in self._roles_dict[n]}\n try:\n m = process.extractOne(raw_txt, choices.keys(), score_cutoff=90)[0]\n return choices[m]\n except TypeError:\n pass\n\n def find_roles_names_in_text_stanford_ner(self, classified_text):\n \"\"\"\n Find matched roles in the input text\n :param classified_text:\n :return: set of matched roles in the text\n \"\"\"\n matched_roles = set()\n people = []\n temp = []\n\n for r, ent_type in classified_text:\n if ent_type in {\"PERSON\", \"ORG\"}:\n temp.append(r)\n elif temp:\n people.append(\" \".join(temp))\n temp = []\n for p in people:\n role = self.match_roles(p)\n if role:\n matched_roles.add(role)\n\n return matched_roles\n\n def rating(self):\n \"\"\"\n Return the video IMDB rating\n :return: Video's IMDB rating\n \"\"\"\n return self._imdb_movie.data[\"rating\"]\n\n\nif __name__ == \"__main__\":\n vde = VideoRolesAnalyzer(7959026)\n print(vde.find_roles_names_in_text(\"Sayid. l'm on it, <NAME>.\"))\n", "id": "1287315", "language": "Python", "matching_score": 2.2884602546691895, "max_stars_count": 1, "path": "subs2network/video_roles_analyzer.py" }, { "content": "import os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nIMDB_CAST = \"cast\"\nIMDB_NAME = \"name\"\nMIN_NAME_SIZE = 0\nSRC_ID = \"src_id\"\nDST_ID = \"dst_id\"\nWEIGHT = \"weight\"\n\nSUBTITLE_SLEEP_TIME = 3\n\nEPISODE_ID = \"id\"\nEPISODE_NAME = \"title\"\nEPISODE_NUMBER = \"episode\"\nEPISODE_RATING = \"rating\"\nSEASON_ID = \"seasonid\"\nSEASON_NUMBER = \"SeasonNumber\"\nDVD_SEASON = \"DVD_season\"\nDVD_EPISODE = \"DVD_episodenumber\"\nSERIES_ID = \"seriesid\"\nEPISODE_GUEST_STARTS = \"GuestStars\"\nSERIES_NAME = \"Series_name\"\nIMDB_ID = \"imdb_id\"\nVIDEO_NAME = \"movie_name\"\nIMDB_RATING = \"imdb_rating\"\nSUBTITLE_PATH = \"subtitle_path\"\nROLES_PATH = \"roles_path\"\nMOVIE_YEAR = \"movie_year\"\nROLES_GRAPH = \"roles_graph\"\nACTORS_GRAPH = \"actors_graph\"\nMAX_YEAR = 2018\ndirname, filename = os.path.split(os.path.abspath(__file__))\nTHE_TVDB_URL = r\"http://thetvdb.com/data/series/%s/all/en.xml\"\n\nIMDB_NAMES_URL = \"https://datasets.imdbws.com/name.basics.tsv.gz\"\nIMDB_TITLES_URL = \"https://datasets.imdbws.com/title.basics.tsv.gz\"\nIMDB_CREW_URL = \"https://datasets.imdbws.com/title.crew.tsv.gz\"\nIMDB_RATING_URL = \"https://datasets.imdbws.com/title.ratings.tsv.gz\"\nIMDB_PRINCIPALS_URL = \"https://datasets.imdbws.com/title.principals.tsv.gz\"\n\nBASE_DIR_NAME = \".subs2net\"\n\nBASEPATH = os.path.expanduser(os.path.join('~', BASE_DIR_NAME))\nif not os.path.exists(BASEPATH):\n os.mkdir(BASEPATH)\n os.mkdir(f\"{BASEPATH}/data\")\n os.mkdir(f\"{BASEPATH}/subtitles\")\n os.mkdir(f\"{BASEPATH}/output\")\n os.mkdir(f\"{BASEPATH}/ner\")\n\nOUTPUT_PATH = f\"{BASEPATH}/output\"\nDATA_PATH = f\"{BASEPATH}/data\"\nSTANFORD_NLP_MODEL = f\"{BASEPATH}/ner/english.all.3class.distsim.crf.ser.gz\"\nSTANFORD_NLP_JAR = f\"{BASEPATH}/ner/stanford-ner.jar\"\nSTANFORD_NLP_JAR_URL = \"https://github.com/data4goodlab/subs2network/raw/master/ner/stanford-ner.jar\"\nSTANFORD_NLP_MODEL_URL = \"https://github.com/data4goodlab/subs2network/raw/master/ner/classifiers/english.all.3class.distsim.crf.ser.gz\"\nDEBUG = True\n\n\ndef set_output_path(output_path):\n global OUTPUT_PATH\n OUTPUT_PATH = output_path\n", "id": "5507849", "language": "Python", "matching_score": 5.586370944976807, "max_stars_count": 1, "path": "subs2network/consts.py" }, { "content": "import os\n\n\n\nIMDB_NAMES_URL = \"https://datasets.imdbws.com/name.basics.tsv.gz\"\nIMDB_TITLES_URL = \"https://datasets.imdbws.com/title.basics.tsv.gz\"\nIMDB_CREW_URL = \"https://datasets.imdbws.com/title.crew.tsv.gz\"\nIMDB_RATING_URL = \"https://datasets.imdbws.com/title.ratings.tsv.gz\"\nIMDB_PRINCIPALS_URL = \"https://datasets.imdbws.com/title.principals.tsv.gz\"\n\nBASE_DIR_NAME = \".imdb\"\n\nBASEPATH = os.path.expanduser(os.path.join('~', BASE_DIR_NAME))\n\n\nOUTPUT_PATH = f\"{BASEPATH}/output\"\n\n\ndef set_output_path(output_path):\n global OUTPUT_PATH\n OUTPUT_PATH = output_path\n", "id": "5723163", "language": "Python", "matching_score": 1.9484227895736694, "max_stars_count": 0, "path": "IMDb_dataset/consts.py" }, { "content": "import turicreate.aggregate as agg\nfrom turicreate import SFrame\n\nfrom IMDb_dataset.consts import IMDB_RATING_URL, OUTPUT_PATH, IMDB_CREW_URL, IMDB_TITLES_URL, IMDB_PRINCIPALS_URL, \\\n IMDB_NAMES_URL, output_path\nfrom IMDb_dataset.utils import download_file\n\n\n\nclass IMDbDataset(object):\n\n def __init__(self, verbose=False, output_path=None):\n self._rating = None\n self._crew = None\n self._title = None\n self._actors = None\n self._actors_movies = None\n self._all_actors = None\n self._verbose = verbose\n if output_path is not None:\n set_output_path(output_path)\n\n def get_movie_rating(self, imdb_id):\n try:\n return self.rating[self.rating[\"tconst\"] == f\"tt{imdb_id}\"][\"averageRating\"][0]\n except IndexError:\n return None\n\n\n def get_actor_movies(self, actor):\n try:\n return self.actors_movies[self.actors_movies[\"nconst\"] == actor]\n except IndexError:\n return None\n\n @property\n def popular_actors(self):\n if self._actors is None:\n download_file(IMDB_PRINCIPALS_URL, f\"{OUTPUT_PATH}/title.principals.tsv.gz\", False)\n self._actors = SFrame.read_csv(f\"{OUTPUT_PATH}/title.principals.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n self._actors = self._actors.filter_by([\"actor\", \"actress\"], \"category\")[\"tconst\", \"nconst\"]\n\n self._actors = self._actors.join(\n self.rating[(self.rating[\"titleType\"] == \"movie\") & (self.rating[\"numVotes\"] > 1000)])\n self._actors = self._actors.groupby(\"nconst\", operations={'averageRating': agg.AVG(\"averageRating\"),\n 'count': agg.COUNT()})\n self._actors = self._actors.sort(\"averageRating\", ascending=False)\n names = SFrame.read_csv(f\"{OUTPUT_PATH}/name.basics.tsv.gz\", delimiter=\"\\t\")\n\n self._actors = self._actors.join(names)\n\n return self._actors\n\n @property\n def actors_movies(self):\n if self._actors_movies is None:\n download_file(IMDB_PRINCIPALS_URL, f\"{OUTPUT_PATH}/title.principals.tsv.gz\", False)\n self._actors_movies = SFrame.read_csv(f\"{OUTPUT_PATH}/title.principals.tsv.gz\", delimiter=\"\\t\",\n na_values=[\"\\\\N\"], verbose=self._verbose)\n self._actors_movies = self._actors_movies.filter_by([\"actor\", \"actress\"], \"category\")[\n \"tconst\", \"nconst\", \"characters\"]\n self._actors_movies = self._actors_movies.join(self.title[self.title[\"titleType\"] == \"movie\"])\n self._actors_movies = self._actors_movies.join(self.all_actors)\n return self._actors_movies\n\n @property\n def all_actors(self):\n if self._all_actors is None:\n download_file(IMDB_NAMES_URL, f\"{OUTPUT_PATH}/name.basics.tsv.gz\", False)\n self._all_actors = SFrame.read_csv(f\"{OUTPUT_PATH}/name.basics.tsv.gz\", delimiter=\"\\t\",\n na_values=[\"\\\\N\"], verbose=self._verbose)\n self._all_actors[\"primaryProfession\"] = self._all_actors[\"primaryProfession\"].apply(lambda x: x.split(\",\"))\n self._all_actors = self._all_actors.stack(\"primaryProfession\", \"primaryProfession\")\n self._all_actors = self._all_actors.filter_by([\"actor\", \"actress\"], \"primaryProfession\")\n return self._all_actors\n\n @property\n def rating(self):\n if self._rating is None:\n download_file(IMDB_RATING_URL, f\"{OUTPUT_PATH}/title.ratings.tsv.gz\", False)\n self._rating = SFrame.read_csv(f\"{OUTPUT_PATH}/title.ratings.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n self._rating = self._rating.join(self.title)\n return self._rating\n\n @property\n def crew(self):\n if self._crew is None:\n download_file(IMDB_CREW_URL, f\"{OUTPUT_PATH}/title.crew.tsv.gz\", False)\n self._crew = SFrame.read_csv(f\"{OUTPUT_PATH}/title.crew.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n self._crew[\"directors\"] = self.crew[\"directors\"].apply(lambda c: c.split(\",\"))\n self._crew = self._crew.stack(\"directors\", \"directors\")\n return self._crew\n\n @property\n def title(self):\n if self._title is None:\n download_file(IMDB_TITLES_URL, f\"{OUTPUT_PATH}/title.basics.tsv.gz\", False)\n self._title = SFrame.read_csv(f\"{OUTPUT_PATH}/title.basics.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n return self._title\n\n\n def get_movies_data(self):\n rating = self.rating[self.rating[\"numVotes\"] > 1000]\n sf = self.title.join(rating)\n sf = sf[sf[\"titleType\"] == \"movie\"]\n return sf.sort(\"averageRating\", ascending=False)\n\n def get_directors_data(self):\n\n rating = self.rating[self.rating[\"numVotes\"] > 10000]\n\n sf = self.crew.join(rating)\n\n title = self.title[self.title[\"titleType\"] == \"movie\"]\n sf = sf.join(title)\n sf = sf.groupby(key_column_names='directors',\n operations={'averageRating': agg.AVG(\"averageRating\"), 'count': agg.COUNT()})\n\n sf = sf[sf[\"count\"] > 5]\n\n names = SFrame.read_csv(f\"{OUTPUT_PATH}/name.basics.tsv.gz\", delimiter=\"\\t\")\n sf = sf.join(names, {\"directors\": \"nconst\"})\n return sf.sort(\"averageRating\", ascending=False)\n\n def get_movies_by_character(self, character):\n p = self.actors_movies.dropna(\"characters\").stack(\"characters\", new_column_name=\"character\")\n char_movies = p[p[\"character\"].apply(lambda x: 1 if character in x else 0)]\n char_movies = char_movies.join(self.rating)\n char_movies = char_movies[char_movies[\"numVotes\"] > 2000].filter_by(\"movie\", \"titleType\")\n return char_movies[[\"primaryTitle\", \"tconst\", \"startYear\", \"averageRating\", \"numVotes\"]].unique()\n\n def get_movies_by_title(self, title):\n m = self.rating[self.rating[\"primaryTitle\"].apply(lambda x: 1 if title in x else 0)]\n m = m[m[\"numVotes\"] > 2000].filter_by(\"movie\", \"titleType\")\n return m[[\"primaryTitle\", \"tconst\", \"startYear\", \"averageRating\", \"numVotes\"]].unique()\n\n", "id": "6474849", "language": "Python", "matching_score": 6.731004238128662, "max_stars_count": 0, "path": "IMDb_dataset/imdb_dataset.py" }, { "content": "import turicreate.aggregate as agg\nfrom turicreate import SFrame\n\nfrom subs2network.consts import IMDB_RATING_URL, OUTPUT_PATH, IMDB_CREW_URL, IMDB_TITLES_URL, IMDB_PRINCIPALS_URL, \\\n DATA_PATH, IMDB_NAMES_URL\nfrom subs2network.utils import download_file\n\n\ndef get_gender(profession):\n if \"actor\" in profession:\n return \"M\"\n if \"actress\" in profession:\n return \"F\"\n raise IndexError\n\n\nclass IMDbDatasets(object):\n\n def __init__(self, verbose=False):\n self._rating = None\n self._crew = None\n self._title = None\n self._actors = None\n self._actors_movies = None\n self._first_name_gender = None\n self._actors_gender = None\n self._all_actors = None\n self._verbose = verbose\n\n def get_movie_rating(self, imdb_id):\n try:\n return self.rating[self.rating[\"tconst\"] == f\"tt{imdb_id}\"][\"averageRating\"][0]\n except IndexError:\n return None\n\n def get_actor_gender(self, actor):\n try:\n return self.actors_gender[actor]\n except KeyError:\n try:\n first_name = actor.split(\" \")[0].lower()\n return self.first_name_gender[first_name][\"Gender\"][:1]\n except KeyError:\n return \"U\"\n\n @property\n def actors_gender(self):\n if self._actors_gender is None:\n self._actors_gender = self.all_actors[[\"primaryName\", \"gender\"]].unstack([\"primaryName\", \"gender\"])[0][\n \"Dict of primaryName_gender\"]\n return self._actors_gender\n\n def add_actor_gender(self, actor):\n try:\n return get_gender(actor[\"primaryProfession\"])\n except IndexError:\n try:\n return self.get_gender_by_name(actor[\"primaryName\"])\n except IndexError:\n return \"U\"\n\n def get_gender_by_name(self, actor):\n try:\n first_name = actor.split(\" \")[0].lower()\n return self.first_name_gender[first_name][\"Gender\"][0]\n except KeyError:\n return None\n except IndexError:\n return None\n\n def get_actor_movies(self, actor):\n try:\n return self.actors_movies[self.actors_movies[\"nconst\"] == actor]\n except IndexError:\n return None\n\n @property\n def popular_actors(self):\n if self._actors is None:\n download_file(IMDB_PRINCIPALS_URL, f\"{OUTPUT_PATH}/title.principals.tsv.gz\", False)\n self._actors = SFrame.read_csv(f\"{OUTPUT_PATH}/title.principals.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n self._actors = self._actors.filter_by([\"actor\", \"actress\"], \"category\")[\"tconst\", \"nconst\"]\n\n self._actors = self._actors.join(\n self.rating[(self.rating[\"titleType\"] == \"movie\") & (self.rating[\"numVotes\"] > 1000)])\n self._actors = self._actors.groupby(\"nconst\", operations={'averageRating': agg.AVG(\"averageRating\"),\n 'count': agg.COUNT()})\n self._actors = self._actors.sort(\"averageRating\", ascending=False)\n names = SFrame.read_csv(f\"{OUTPUT_PATH}/name.basics.tsv.gz\", delimiter=\"\\t\")\n\n self._actors = self._actors.join(names)\n self._actors[\"gender\"] = self._actors.apply(lambda p: self.add_actor_gender(p))\n\n return self._actors\n\n @property\n def actors_movies(self):\n if self._actors_movies is None:\n download_file(IMDB_PRINCIPALS_URL, f\"{OUTPUT_PATH}/title.principals.tsv.gz\", False)\n self._actors_movies = SFrame.read_csv(f\"{OUTPUT_PATH}/title.principals.tsv.gz\", delimiter=\"\\t\",\n na_values=[\"\\\\N\"], verbose=self._verbose)\n self._actors_movies = self._actors_movies.filter_by([\"actor\", \"actress\"], \"category\")[\n \"tconst\", \"nconst\", \"characters\"]\n self._actors_movies = self._actors_movies.join(self.title[self.title[\"titleType\"] == \"movie\"])\n self._actors_movies = self._actors_movies.join(self.all_actors)\n return self._actors_movies\n\n @property\n def all_actors(self):\n if self._all_actors is None:\n download_file(IMDB_NAMES_URL, f\"{OUTPUT_PATH}/name.basics.tsv.gz\", False)\n self._all_actors = SFrame.read_csv(f\"{OUTPUT_PATH}/name.basics.tsv.gz\", delimiter=\"\\t\",\n na_values=[\"\\\\N\"], verbose=self._verbose)\n self._all_actors[\"primaryProfession\"] = self._all_actors[\"primaryProfession\"].apply(lambda x: x.split(\",\"))\n self._all_actors = self._all_actors.stack(\"primaryProfession\", \"primaryProfession\")\n self._all_actors = self._all_actors.filter_by([\"actor\", \"actress\"], \"primaryProfession\")\n self._all_actors[\"gender\"] = self._all_actors.apply(lambda p: self.add_actor_gender(p))\n return self._all_actors\n\n @property\n def rating(self):\n if self._rating is None:\n download_file(IMDB_RATING_URL, f\"{OUTPUT_PATH}/title.ratings.tsv.gz\", False)\n self._rating = SFrame.read_csv(f\"{OUTPUT_PATH}/title.ratings.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n self._rating = self._rating.join(self.title)\n return self._rating\n\n @property\n def crew(self):\n if self._crew is None:\n download_file(IMDB_CREW_URL, f\"{OUTPUT_PATH}/title.crew.tsv.gz\", False)\n self._crew = SFrame.read_csv(f\"{OUTPUT_PATH}/title.crew.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n self._crew[\"directors\"] = self.crew[\"directors\"].apply(lambda c: c.split(\",\"))\n self._crew = self._crew.stack(\"directors\", \"directors\")\n return self._crew\n\n @property\n def title(self):\n if self._title is None:\n download_file(IMDB_TITLES_URL, f\"{OUTPUT_PATH}/title.basics.tsv.gz\", False)\n self._title = SFrame.read_csv(f\"{OUTPUT_PATH}/title.basics.tsv.gz\", delimiter=\"\\t\", na_values=[\"\\\\N\"],\n verbose=self._verbose)\n return self._title\n\n @property\n def first_name_gender(self):\n if self._first_name_gender is None:\n self._first_name_gender = SFrame(f\"{DATA_PATH}/first_names_gender.sframe\")\n self._first_name_gender = self._first_name_gender.unstack([\"First Name\", \"Gender Dict\"])[0][\n \"Dict of First Name_Gender Dict\"]\n return self._first_name_gender\n\n def get_movies_data(self):\n rating = self.rating[self.rating[\"numVotes\"] > 1000]\n sf = self.title.join(rating)\n sf = sf[sf[\"titleType\"] == \"movie\"]\n return sf.sort(\"averageRating\", ascending=False)\n\n def get_directors_data(self):\n\n rating = self.rating[self.rating[\"numVotes\"] > 10000]\n\n sf = self.crew.join(rating)\n\n title = self.title[self.title[\"titleType\"] == \"movie\"]\n sf = sf.join(title)\n sf = sf.groupby(key_column_names='directors',\n operations={'averageRating': agg.AVG(\"averageRating\"), 'count': agg.COUNT()})\n\n sf = sf[sf[\"count\"] > 5]\n\n names = SFrame.read_csv(f\"{OUTPUT_PATH}/name.basics.tsv.gz\", delimiter=\"\\t\")\n sf = sf.join(names, {\"directors\": \"nconst\"})\n return sf.sort(\"averageRating\", ascending=False)\n\n def get_movies_by_character(self, character):\n p = self.actors_movies.dropna(\"characters\").stack(\"characters\", new_column_name=\"character\")\n char_movies = p[p[\"character\"].apply(lambda x: 1 if character in x else 0)]\n char_movies = char_movies.join(self.rating)\n char_movies = char_movies[char_movies[\"numVotes\"] > 2000].filter_by(\"movie\", \"titleType\")\n return char_movies[[\"primaryTitle\", \"tconst\", \"startYear\", \"averageRating\", \"numVotes\"]].unique()\n\n def get_movies_by_title(self, title):\n m = self.rating[self.rating[\"primaryTitle\"].apply(lambda x: 1 if title in x else 0)]\n m = m[m[\"numVotes\"] > 2000].filter_by(\"movie\", \"titleType\")\n return m[[\"primaryTitle\", \"tconst\", \"startYear\", \"averageRating\", \"numVotes\"]].unique()\n\n\nimdb_data = IMDbDatasets()\n# imdb_data.actors\n# isf.title\n# import gzip\n#\n# with gzip.open(f\"{TEMP_PATH}/title.basics.tsv.gz\", 'rt') as f:\n# for line in f:\n# print(line)\n", "id": "439404", "language": "Python", "matching_score": 4.034264087677002, "max_stars_count": 1, "path": "subs2network/imdb_dataset.py" }, { "content": "from subs2network.video_sn_analyzer import VideoSnAnalyzer\n\nimport glob\nimport json\nimport logging\nimport os\nimport traceback\nfrom collections import Counter\nfrom distutils.dir_util import copy_tree\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport pandas as pd\nimport turicreate.aggregate as agg\nfrom imdb import IMDb\nfrom networkx.readwrite import json_graph\nfrom nltk.corpus import names\nfrom nltk.corpus import wordnet\nfrom tqdm import tqdm\nfrom turicreate import SFrame\n\nfrom subs2network.consts import EPISODE_NAME, DATA_PATH, EPISODE_RATING, EPISODE_NUMBER, ROLES_GRAPH, SEASON_NUMBER, \\\n ACTORS_GRAPH, OUTPUT_PATH, MOVIE_YEAR, MAX_YEAR, SERIES_NAME, VIDEO_NAME, SRC_ID, DST_ID, WEIGHT, IMDB_RATING, BASEPATH\nfrom subs2network.exceptions import SubtitleNotFound, CastNotFound\nfrom subs2network.imdb_dataset import imdb_data\nfrom subs2network.subtitle_analyzer import SubtitleAnalyzer\nfrom subs2network.subtitle_fetcher import SubtitleFetcher\nfrom subs2network.utils import get_movie_obj, get_episode_obj\n\nlogging.basicConfig(level=logging.ERROR)\n\n\ndef get_series_episodes_details(s_id, series_name, subtitles_path):\n \"\"\"\n Returns TV series episodes' details from TheTVDB website\n :param subtitles_path:\n :param s_id: series id in TheTVDB\n :param series_name: series name\n :return: dict with episodes information\n \"\"\"\n series_path = f\"{subtitles_path}/series_info.pkl\"\n if not os.path.exists(series_path):\n logging.info(\"Retreving series data of %s\" % s_id)\n ia = IMDb()\n series = ia.get_movie(s_id)\n try:\n if series['kind'] != \"tv series\":\n raise TypeError(f\"{series_name} not a tv series\")\n except KeyError:\n print(s_id)\n\n ia.update(series, 'episodes')\n if series['episodes']:\n with open(series_path, \"wb\") as f:\n series = pickle.dump(series, f)\n else:\n with open(series_path, \"rb\") as f:\n series = pickle.load(f)\n return series['episodes']\n\n\ndef get_tvseries_graphs(series_name, imdb_id, seasons_set, episodes_set, subtitles_path,\n use_top_k_roles=None, timelaps_seconds=60, graph_type=ROLES_GRAPH, min_weight=2):\n series_details_dict = get_series_episodes_details(imdb_id, series_name, subtitles_path)\n seasons = set(series_details_dict.keys()) & seasons_set\n for seasons_number in seasons:\n for episode_number in episodes_set:\n try:\n episode = series_details_dict[seasons_number][episode_number]\n except KeyError:\n continue\n\n episode_name = episode[EPISODE_NAME]\n episode_rating = episode[EPISODE_RATING]\n epiosde_name = _get_episode_name(series_name, seasons_number, episode_number)\n\n try:\n yield get_episode_graph(epiosde_name, series_name, seasons_number, episode_number,\n episode_name, episode_rating, subtitles_path=subtitles_path,\n use_top_k_roles=use_top_k_roles, timelaps_seconds=timelaps_seconds,\n graph_type=graph_type, min_weight=min_weight, imdb_id=imdb_id)\n except SubtitleNotFound:\n logging.warning(\"Could not fetch %s subtitles\" % episode_name)\n continue\n\n\ndef get_person_movies_graphs(actor_name, filmography, person_type=\"actors\", min_movies_number=None,\n use_top_k_roles=None,\n timelaps_seconds=60, min_weight=4, ignore_roles_names=None):\n graphs_list = []\n for m_id, title, year in get_person_movies(actor_name, filmography):\n if year > MAX_YEAR:\n continue\n if min_movies_number is not None and len(graphs_list) >= min_movies_number:\n break\n title = title.replace('.', '').replace('/', '')\n movie_name = f\"{title} ({year})\"\n # try:\n create_dirs(\"movies\", title)\n\n subtitles_path = f\"{BASEPATH}/subtitles\"\n graph_path = f\"{OUTPUT_PATH}/movies/{title}/\"\n if not os.path.exists(f\"{OUTPUT_PATH}/{person_type}/{actor_name}/json/{title}.json\"):\n if not os.path.exists(f\"{graph_path}/{title}.json\"):\n try:\n g = get_movie_graph(movie_name, title, year, m_id, subtitles_path, use_top_k_roles=use_top_k_roles,\n timelaps_seconds=timelaps_seconds, rating=imdb_data.get_movie_rating(m_id),\n min_weight=min_weight, ignore_roles_names=ignore_roles_names)\n yield g\n except CastNotFound:\n logging.error(f\"{actor_name} - {title}\")\n logging.error(traceback.format_exc())\n except AttributeError:\n logging.error(f\"{actor_name} - {title}\")\n logging.error(traceback.format_exc())\n except SubtitleNotFound:\n logging.error(f\"{actor_name} - {title}\")\n logging.error(traceback.format_exc())\n except UnicodeEncodeError:\n logging.error(f\"{actor_name} - {title}\")\n logging.error(traceback.format_exc())\n except KeyError:\n logging.error(f\"{actor_name} - {title}\")\n logging.error(traceback.format_exc())\n else:\n print(f\"Copy: {actor_name} - {title}\")\n copy_tree(f\"{OUTPUT_PATH}/movies/{title}/json\",\n f\"{OUTPUT_PATH}/{person_type}/{actor_name}/json\")\n copy_tree(f\"{OUTPUT_PATH}/movies/{title}/graphs\",\n f\"{OUTPUT_PATH}/{person_type}/{actor_name}/graphs\")\n\n\ndef save_graphs_features(graphs_list, features_path, remove_unintresting_features, sep=\"\\t\"):\n features_dicts_list = []\n for g in graphs_list:\n d = VideoSnAnalyzer.get_features_dict(g, True)\n if d is None:\n continue\n features_dicts_list.append(d)\n all_keys = set()\n for d in features_dicts_list:\n all_keys |= set(d.keys())\n\n if remove_unintresting_features:\n all_keys -= set(get_unintresting_features_names(features_dicts_list))\n\n all_keys = list(all_keys)\n csv_lines = [sep.join(all_keys)]\n for d in features_dicts_list:\n l = []\n for k in all_keys:\n if k in d:\n l.append(str(d[k]))\n else:\n l.append(\"0\")\n csv_lines.append(sep.join(l))\n with open(features_path, \"w\", encoding='utf-8') as f:\n f.write(\"\\n\".join(csv_lines))\n\n\ndef save_graphs_to_csv(graphs_list, csv_folder, sep=\"\\t\"):\n for g in graphs_list:\n csv_path = f\"{csv_folder}/({g.graph[MOVIE_YEAR]}) - {g.graph[VIDEO_NAME]}.csv\"\n\n if SERIES_NAME in g.graph:\n save_episode_graph_to_csv(g, g.graph[SERIES_NAME], g.graph[SEASON_NUMBER], g.graph[EPISODE_NUMBER],\n g.graph[IMDB_RATING],\n csv_path, add_headers=True, sep=sep)\n else:\n save_movie_graph_to_csv(g, g.graph[VIDEO_NAME], g.graph[IMDB_RATING], csv_path, add_headers=True,\n sep=sep)\n\n\ndef save_graphs_to_json(graphs_list, output_dir):\n for g in graphs_list:\n data = json_graph.node_link_data(g)\n json_path = f\"{output_dir}/({g.graph[MOVIE_YEAR]}) - {g.graph[VIDEO_NAME]}.json\"\n with open(json_path, 'w') as fp:\n json.dump(data, fp)\n\n\ndef draw_graphs(graphs_list, figures_path, output_format=\"png\"):\n for g in graphs_list:\n draw_outpath = f\"{figures_path}/({g.graph[MOVIE_YEAR]}) - {g.graph[VIDEO_NAME]}.{output_format}\"\n draw_graph(g, draw_outpath)\n\n\ndef get_movie_graph(name, title, year, imdb_id, subtitles_path, use_top_k_roles=None, timelaps_seconds=60,\n min_weight=2, rating=None, ignore_roles_names=None):\n va = _get_movie_video_sn_analyzer(name, title, year, imdb_id, subtitles_path, use_top_k_roles,\n timelaps_seconds, rating, ignore_roles_names=ignore_roles_names)\n g = va.construct_social_network_graph(ROLES_GRAPH, min_weight)\n g.graph[VIDEO_NAME] = title\n g.graph[MOVIE_YEAR] = year\n g.graph[IMDB_RATING] = va.video_rating\n\n g_r = va.construct_social_network_graph(ACTORS_GRAPH, min_weight)\n g_r.graph[VIDEO_NAME] = f\"{title} - roles\"\n g_r.graph[MOVIE_YEAR] = year\n g_r.graph[IMDB_RATING] = va.video_rating\n\n return g, g_r\n\n\ndef get_episode_graph(epiosde_name, series_name, season_number, episode_number, episode_name, imdb_rating,\n subtitles_path, imdb_id, use_top_k_roles=None, timelaps_seconds=60,\n graph_type=ROLES_GRAPH, min_weight=2):\n va = _get_series_episode_video_sn_analyzer(epiosde_name, series_name, season_number, episode_number, episode_name,\n subtitles_path, use_top_k_roles, timelaps_seconds, imdb_id, imdb_rating)\n\n g = va.construct_social_network_graph(graph_type, min_weight)\n g.graph[IMDB_RATING] = imdb_rating\n g.graph[VIDEO_NAME] = epiosde_name\n g.graph[SERIES_NAME] = series_name\n g.graph[SEASON_NUMBER] = season_number\n g.graph[EPISODE_NUMBER] = episode_number\n return g\n\n\ndef _get_movie_video_sn_analyzer(name, title, year, imdb_id, subtitles_path, use_top_k_roles,\n timelaps_seconds, rating=None, ignore_roles_names=None):\n movie = get_movie_obj(name, title, year, imdb_id)\n return _fetch_and_analyze_subtitle(movie, subtitles_path, use_top_k_roles, timelaps_seconds, rating,\n ignore_roles_names=ignore_roles_names)\n\n\ndef _get_series_episode_video_sn_analyzer(epiosde_name, series_name, season_number, episode_number, episode_name,\n subtitle_path, use_top_k_roles, timelaps_seconds, imdb_id, imdb_rating):\n episode = get_episode_obj(epiosde_name, series_name, season_number, episode_number, episode_name, imdb_id)\n sf = SubtitleFetcher(episode)\n d = sf.fetch_subtitle(subtitle_path)\n return analyze_subtitle(epiosde_name, d, use_top_k_roles, timelaps_seconds, imdb_rating)\n\n\ndef analyze_subtitle(name, subs_dict, use_top_k_roles, timelaps_seconds, imdb_rating=None):\n sa = SubtitleAnalyzer(subs_dict, use_top_k_roles=use_top_k_roles)\n e = sa.get_subtitles_entities_links(timelaps_seconds=timelaps_seconds)\n if imdb_rating is None:\n imdb_rating = sa.imdb_rating\n return VideoSnAnalyzer(name, e, imdb_rating)\n\n\ndef _fetch_and_analyze_subtitle(video_obj, subtitle_path, use_top_k_roles, timelaps_seconds, imdb_rating=None,\n ignore_roles_names=None):\n sf = SubtitleFetcher(video_obj)\n d = sf.fetch_subtitle(subtitle_path)\n sa = SubtitleAnalyzer(d, use_top_k_roles=use_top_k_roles, ignore_roles_names=ignore_roles_names)\n e = sa.get_subtitles_entities_links(timelaps_seconds=timelaps_seconds)\n if imdb_rating is None:\n imdb_rating = sa.imdb_rating\n return VideoSnAnalyzer(video_obj.name, e, imdb_rating)\n\n\ndef draw_graph(g, outpath, graph_layout=nx.spring_layout):\n pos = graph_layout(g)\n plt.figure(num=None, figsize=(15, 15), dpi=150)\n plt.axis('off')\n edge_labels = dict([((u, v,), d['weight'])\n for u, v, d in g.edges(data=True)])\n\n nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels)\n\n nx.draw(g, pos, node_size=500, edge_cmap=plt.cm.Reds, with_labels=True)\n plt.savefig(outpath)\n plt.close()\n\n\ndef get_person_movies(person_name, types=None):\n if types is None:\n types = [\"actor\"]\n im = IMDb()\n p = im.search_person(person_name)[0]\n m_list = im.get_person_filmography(p.getID())\n # person_filmography = dict(item for m in m_list['data']['filmography'] for item in m.items())\n person_filmography = m_list['data']['filmography']\n for t in types:\n if t in person_filmography:\n m_list = person_filmography[t]\n for m in m_list:\n if \"Short\" not in m.notes:\n m_id = m.getID()\n year = m.get('year')\n title = m.get('title')\n if year:\n yield m_id, title, year\n\n\ndef _get_episode_name(series_name, season_number, episode_number):\n n = series_name\n n += \"S\"\n if season_number < 10:\n n += \"0\"\n n += str(season_number)\n n += \"E\"\n if episode_number < 10:\n n += \"0\"\n n += str(episode_number)\n return n\n\n\ndef save_episode_graph_to_csv(g, series_name, season_num, episode_num, rating, outpath, add_headers=False,\n sep=\";\", append_to_file=False):\n headers = [SERIES_NAME, SEASON_NUMBER, EPISODE_NUMBER, SRC_ID, DST_ID, WEIGHT, IMDB_RATING]\n csv_lines = []\n if add_headers:\n csv_lines.append(sep.join(headers))\n for v, u in g.edges():\n r = [series_name, str(season_num), str(episode_num), v, u, str(g.adj[v][u][WEIGHT]),\n str(rating)]\n csv_lines.append(sep.join(r))\n if append_to_file:\n with open(outpath, \"a\") as f:\n f.write(\"\\n\".join(csv_lines))\n else:\n with open(outpath, \"w\", encoding='utf-8') as f:\n f.write(\"\\n\".join(csv_lines))\n\n\ndef save_movie_graph_to_csv(g, movie_name, rating, outpath, add_headers=False, sep=\";\", append_to_file=False):\n headers = [VIDEO_NAME, SRC_ID, DST_ID, WEIGHT, IMDB_RATING]\n csv_lines = []\n if add_headers:\n csv_lines.append(sep.join(headers))\n for v, u in g.edges():\n r = [movie_name, v, u, str(g.adj[v][u][WEIGHT]), str(rating)]\n csv_lines.append(sep.join(r))\n if append_to_file:\n with open(outpath, \"a\", encoding='utf-8') as f:\n f.write(\"\\n\".join(csv_lines))\n else:\n with open(outpath, \"w\", encoding='utf-8') as f:\n f.write(\"\\n\".join(csv_lines))\n\n\ndef get_unintresting_features_names(features_dicts, min_freq=5):\n features_names = []\n\n for d in features_dicts:\n features_names += d.keys()\n c = Counter(features_names)\n return [k for k in c.keys() if c[k] < min_freq]\n\n\ndef create_dirs(t, name):\n os.makedirs(f\"{OUTPUT_PATH}/{t}/{name}\", exist_ok=True)\n os.makedirs(f\"{OUTPUT_PATH}/{t}/{name}/csv\", exist_ok=True)\n os.makedirs(f\"{OUTPUT_PATH}/{t}/{name}/json\", exist_ok=True)\n os.makedirs(f\"{OUTPUT_PATH}/{t}/{name}/graphs\", exist_ok=True)\n # os.makedirs(f\"{OUTPUT_PATH}/{t}/{name}/subtitles\", exist_ok=True)\n\n\ndef generate_series_graphs(name, s_id, seasons_set, episodes_set):\n create_dirs(\"series\", name)\n graphs = []\n for g in get_tvseries_graphs(name, s_id, seasons_set, episodes_set, f\"{BASEPATH}/subtitles\"):\n save_output([g], \"series\", name)\n graphs.append(g)\n save_graphs_features(graphs, f\"{OUTPUT_PATH}/series/{name}/{name} features.tsv\", True)\n joined_graph = nx.compose_all(graphs)\n joined_graph.graph[VIDEO_NAME] = name\n joined_graph.graph[\"movie_year\"] = MAX_YEAR\n save_output([joined_graph], \"series\", name)\n\n\ndef generate_actor_movies_graphs(name, ignore_roles_names, filmography):\n create_dirs(\"actors\", name)\n graphs = get_person_movies_graphs(name, filmography, \"actors\", min_movies_number=None,\n ignore_roles_names=ignore_roles_names)\n\n for g in graphs:\n save_output(g, \"actors\", name)\n save_output(g, \"movies\", g[0].graph[\"movie_name\"])\n\n\ndef generate_director_movies_graphs(name, ignore_roles_names):\n create_dirs(\"directors\", name)\n graphs = get_person_movies_graphs(name, [\"director\"], \"directors\", min_movies_number=None,\n ignore_roles_names=ignore_roles_names)\n for g in graphs:\n save_output(g, \"directors\", name)\n save_output(g, \"movies\", g[0].graph[\"movie_name\"])\n\n\ndef save_output(graphs, data_type, name):\n save_graphs_to_csv(graphs, f\"{OUTPUT_PATH}/{data_type}/{name}/csv\")\n draw_graphs(graphs, f\"{OUTPUT_PATH}/{data_type}/{name}/graphs\")\n save_graphs_to_json(graphs, f\"{OUTPUT_PATH}/{data_type}/{name}/json\")\n\n\ndef save_graphs_outputs(graphs, name):\n save_graphs_features(graphs, f\"{OUTPUT_PATH}/actors/{name}/{name} features.tsv\", True)\n save_graphs_to_csv(graphs, f\"{OUTPUT_PATH}/actors/{name}/csv\")\n draw_graphs(graphs, f\"{OUTPUT_PATH}/actors/{name}/graphs\")\n\n\ndef get_black_list():\n try:\n with open(f\"{DATA_PATH}/blacklist_roles.csv\", encoding='utf-8') as f:\n return f.read().splitlines()\n except FileNotFoundError:\n return None\n\n\ndef generate_movie_graph(movie_title, year, imdb_id, additional_data=None):\n rating = None\n if additional_data:\n rating = additional_data[\"averageRating\"]\n movie_title = movie_title.replace('.', '').replace('/', '')\n\n create_dirs(\"movies\", movie_title)\n graphs = get_movie_graph(f\"{movie_title} ({year})\", movie_title, year, imdb_id,\n f\"{BASEPATH}/subtitles\", use_top_k_roles=None,\n min_weight=3, rating=rating, ignore_roles_names=get_black_list())\n\n save_output(graphs, \"movies\", movie_title)\n\n with open(f\"{OUTPUT_PATH}/movies/{movie_title}/({year}) - {movie_title}.json\", 'w') as fp:\n json.dump(json.dumps(additional_data), fp)\n\n\ndef get_bechdel_movies():\n movies = SFrame.read_csv(f\"{DATA_PATH}/bechdel_imdb.csv\")\n movies = movies.sort(\"year\", False)\n movies = movies.filter_by(\"movie\", \"titleType\")\n generate_movies_graphs(movies)\n\n\ndef get_popular_movies(resume=False):\n movies = imdb_data.get_movies_data()\n generate_movies_graphs(movies, resume=resume)\n\n\ndef get_best_movies():\n movies = imdb_data.get_movies_data().head(1000)\n generate_movies_graphs(movies)\n\n\ndef generate_movies_graphs(movies_sf, overwrite=False, resume=False):\n resume_id = 0\n if resume:\n movies_sf = movies_sf.add_row_number()\n last_m = \\\n sorted(\n [(f, os.path.getmtime(f\"{OUTPUT_PATH}/movies/{f}\")) for f in os.listdir(f\"{OUTPUT_PATH}/movies/\")],\n key=lambda x: x[1])[0][0]\n resume_id = movies_sf[movies_sf[\"primaryTitle\"] == last_m][\"id\"][0]\n for m in movies_sf[resume_id:]:\n movie_name = m['primaryTitle'].replace('.', '').replace('/', '')\n try:\n if not glob.glob(f\"{OUTPUT_PATH}/movies/{movie_name}/json/*{movie_name} - roles.json\") or overwrite:\n generate_movie_graph(movie_name, m[\"startYear\"], m[\"tconst\"].strip(\"t\"), m)\n else:\n print(f\"{movie_name} Already Exists\")\n except UnicodeEncodeError:\n print(m[\"tconst\"])\n except SubtitleNotFound:\n print(f\"{movie_name} Subtitles Not Found\")\n except CastNotFound:\n print(f\"{movie_name} Cast Not Found\")\n\n\ndef get_movies_by_character(character, overwrite=False):\n movies = imdb_data.get_movies_by_character(character)\n generate_movies_graphs(movies, overwrite)\n\n\ndef get_movies_by_title(title, overwrite=False):\n movies = imdb_data.get_movies_by_title(title)\n generate_movies_graphs(movies, overwrite)\n\n\ndef get_worst_movies():\n movies = imdb_data.get_movies_data().tail(1000)\n generate_movies_graphs(movies)\n\n\ndef get_best_directors():\n directors = imdb_data.get_directors_data().head(100)\n ignore_roles_names = get_black_list()\n for d in directors:\n try:\n director_name = d['primaryName'].replace('.', '').replace('/', '')\n if not os.path.exists(f\"{OUTPUT_PATH}/directors/{director_name}/{director_name}.json\"):\n generate_director_movies_graphs(director_name, ignore_roles_names)\n with open(f\"{OUTPUT_PATH}/directors/{director_name}/{director_name}.json\", \"w\",\n encoding='utf-8') as f:\n f.write(json.dumps(d))\n except UnicodeEncodeError:\n pass\n except SubtitleNotFound:\n pass\n\n\ndef generate_actors_file():\n actors = imdb_data.popular_actors\n actors = actors[actors[\"count\"] > 5]\n res = []\n actors = actors.join(imdb_data.actors_movies, \"nconst\")\n for row in tqdm(actors):\n\n title = row[\"primaryTitle\"]\n\n graph_path = f\"{OUTPUT_PATH}/movies/{title}/\"\n try:\n if glob.glob(f\"{graph_path}/*{title}.json\"):\n res.append({**row, **{\"path\": os.path.abspath(graph_path)}})\n except:\n pass\n\n pd.DataFrame(res).to_csv(f\"{OUTPUT_PATH}/actors.csv\")\n\n\ndef get_popular_actors():\n actors = imdb_data.popular_actors\n actors = actors[actors[\"count\"] > 5]\n m_actors = actors[actors['gender'] == \"M\"].head(500)\n f_actors = actors[actors['gender'] == \"F\"].head(500)\n actors = f_actors.append(m_actors)\n ignore_roles_names = get_black_list()\n for a in actors:\n filmography_type = [\"actor\"]\n if \"actress\" in a[\"primaryProfession\"]:\n filmography_type = [\"actress\"]\n\n actor_name = a['primaryName'].replace('.', '').replace('/', '')\n\n generate_actor_movies_graphs(actor_name, ignore_roles_names=ignore_roles_names, filmography=filmography_type)\n with open(f\"{OUTPUT_PATH}/actors/{actor_name}/{actor_name}.json\", \"w\", encoding='utf-8') as f:\n f.write(json.dumps(a))\n\n\ndef generate_blacklist_roles():\n firstnames = SFrame.read_csv(f\"{DATA_PATH}/firstnames.csv\", verbose=False)[\"Name\"]\n surenames = SFrame.read_csv(f\"{DATA_PATH}/surenames.csv\", verbose=False)[\"name\"]\n surenames = surenames.apply(lambda n: n.title())\n sf = SFrame.read_csv(f\"{OUTPUT_PATH}/title.principals.tsv.gz\", delimiter=\"\\t\",\n column_type_hints={\"characters\": list},\n na_values=[\"\\\\N\"])\n sf = sf.filter_by([\"actor\", \"actress\"], \"category\")[\"tconst\", \"ordering\", \"characters\", \"nconst\"]\n sf = sf.join(imdb_data.title[imdb_data.title[\"titleType\"] == \"movie\"])\n sf = sf.stack(\"characters\", \"character\")\n sf[\"character\"] = sf[\"character\"].apply(lambda c: c.title())\n sf.export_csv(f\"{TEMP_PATH}/roles3.csv\")\n\n whitelist = sf.groupby(key_column_names=['character', \"nconst\"],\n operations={'count': agg.COUNT()})\n whitelist = whitelist[whitelist[\"count\"] > 1]['character']\n sf = sf.filter_by(whitelist, \"character\", True)\n sf = sf.groupby(key_column_names=['character'],\n operations={'ordering': agg.AVG(\"ordering\"), 'count': agg.COUNT()})\n sf[\"name\"] = sf[\"character\"].apply(lambda c: c.split(\" \")[-1].strip())\n sf = sf.filter_by(names.words(), \"name\", exclude=True)\n sf = sf.filter_by(surenames, \"name\", exclude=True)\n sf = sf.filter_by(firstnames, \"name\", exclude=True)\n sf = sf.sort(\"count\", False)\n sf = sf[sf['ordering'] > 3]\n w = {x.replace(\"_\", \" \").title() for x in wordnet.words()} - set(names.words())\n sf[\"set\"] = sf[\"character\"].apply(lambda x: x.split(\" \"))\n sf[\"set\"] = sf[\"set\"].apply(lambda x: w & set(x))\n sf = sf[sf['count'] > 11].append(sf[(sf['count'] > 1) & (sf['count'] < 10) & (sf[\"set\"] != [])])\n sf[[\"character\"]].export_csv(f\"{OUTPUT_PATH}/blacklist_roles.csv\")\n\n\nif __name__ == \"__main__\":\n # generate_blacklist_roles()\n # get_best_directors()\n # actors = imdb_data.actors\n # f_actors = actors[actors[\"gender\"]==\"F\"]\n # print(len(f_actors[f_actors[\"count\"] > 5]))\n # print(actors.to_dataframe().describe())\n generate_actors_file()\n # test_get_movie(\"A Nightmare on Elm Street\", 2010, \"1179056\", {\"averageRating\": 5.2})\n # try: g\n # # print(get_directors_data().head(100))\n # test_get_movie(\"The Legend of Zorro\", 2005, \"0386140\", {\"averageRating\": 5.9})\n #\n # # test_get_movie(\"Fight Club\", 1999, \"0137523\", {\"averageRating\": 8.8})\n #\n # # get_best_movies()\n # # test_get_movie(\"The Usual Suspects\", 1995, \"0114814\", {\"averageRating\": 8.6})\n # # # test_get_series(\"Friends\", \"0108778\", set(range(1, 11)), set(range(1, 30)))\n # # test_get_director_movies(\"<NAME>\", load_black_list())\n # # # test_get_actor_movies(\"<NAME>\")\n # # # v = VideosSnCreator()\n # # # name = \"Modern Family\"\n # # # v.save_series_graphs(name, \"95011\" ,set(range(1,7)), set(range(1,25)),\"/temp/series/%s/subtitles\" % name,\n # # # \"{TEMP_PATH}/series/%s/csv\" % name, draw_graph_path=\"{TEMP_PATH}/series/%s/graphs\" % name)\n # except Exception as e:\n # if not DEBUG:\n # send_email(\"<EMAIL>\", \"subs2network Code Crashed & Exited\", traceback.format_exc())\n # else:\n # raise e\n # if not DEBUG:\n # send_email(\"<EMAIL>\", \"subs2network Code Finished\", \"Code Finished\")\n", "id": "8858374", "language": "Python", "matching_score": 6.750918865203857, "max_stars_count": 1, "path": "subs2network/videos_sn_creator.py" }, { "content": "import traceback\nfrom subs2network.consts import DEBUG\nfrom subs2network.utils import send_email\nfrom subs2network.videos_sn_creator import get_best_directors, get_best_movies, get_worst_movies, generate_movie_graph, \\\n get_popular_movies, get_popular_actors, generate_actor_movies_graphs, get_black_list, generate_actors_file, \\\n get_bechdel_movies, generate_blacklist_roles, get_movies_by_character, get_movies_by_title\nfrom subs2network.consts import set_output_path\nif __name__ == \"__main__\":\n try:\n\n # ignore_roles_names = load_black_list()\n # test_get_actor_movies(\"<NAME>\", ignore_roles_names, [\"actor\"])\n # generate_actors_file()\n # generate_blacklist_roles()\n # generate_actors_file()\n # get_bechdel_movies()\n # get_movies_by_character(\"<NAME>\", True)\n # get_movies_by_title(\"Star Wars\", True)\n generate_movie_graph(\"The Innkeepers\", 2011, \"1594562\", {\"averageRating\": 5.5})\n\n # get_popular_movies(resume=True)\n # get_best_directors()\n except Exception as e:\n if not DEBUG:\n send_email(\"<EMAIL>\", \"subs2network Code Crashed & Exited\", traceback.format_exc())\n else:\n raise e\n if not DEBUG:\n send_email(\"<EMAIL>\", \"subs2network Code Finished\", \"Code Finished\")\n", "id": "8977384", "language": "Python", "matching_score": 0.47380608320236206, "max_stars_count": 1, "path": "main.py" }, { "content": "from turicreate import SFrame\nfrom subs2network.consts import DATA_PATH, OUTPUT_PATH\nfrom turicreate import aggregate as agg\nimport pandas as pd\nimport math\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\nfrom sklearn.metrics import precision_score, recall_score\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.metrics import roc_auc_score\n\nfrom subs2network.imdb_dataset import imdb_data\nimport numpy as np\n\ngenres = {'Action',\n 'Adventure',\n 'Animation',\n 'Biography',\n 'Comedy',\n 'Crime',\n 'Documentary',\n 'Drama',\n 'Family',\n 'Fantasy',\n 'Film-Noir',\n 'History',\n 'Horror',\n 'Music',\n 'Musical',\n 'Mystery',\n 'Romance',\n 'Sci-Fi',\n 'Sport',\n 'Thriller',\n 'War',\n 'Western'}\n\ndrop_cols = [\"X1\", \"genres\", \"imdbid\", \"originalTitle\", 'endYear', 'isAdult', 'tconst',\n 'titleType', 'tconst.1', 'titleType.1', 'originalTitle.1', 'isAdult.1', 'startYear.1', 'endYear.1',\n 'runtimeMinutes.1', 'genres.1', 'primaryTitle', 'X1', 'id', 'imdbid', 'id']\n\n\ndef split_vals(a, n):\n return a[:n].copy(), a[n:].copy()\n\n\nfrom sklearn.metrics import precision_score\n\n\ndef print_score(m, X_train, y_train, X_valid, y_valid):\n res = [\n (y_train, m.predict_proba(X_train)[:, 1]),\n roc_auc_score(y_valid, m.predict_proba(X_valid)[:, 1]),\n precision_score(y_train, m.predict(X_train)), precision_score(y_valid, m.predict(X_valid))]\n if hasattr(m, 'oob_score_'):\n res.append(m.oob_score_)\n print(res)\n\n\ndef calculate_gender_centrality():\n gender_centrality = pd.read_csv(f\"{OUTPUT_PATH}/gender.csv\", index_col=0)\n\n gender_centrality[\"rank_pagerank\"] = gender_centrality.groupby(\"movie_name\")[\"pagerank\"].rank(\n ascending=False).astype(int)\n rank_pagerank = pd.pivot_table(gender_centrality[[\"gender\", \"rank_pagerank\"]], index=\"gender\",\n columns=\"rank_pagerank\", aggfunc=len).T\n rank_pagerank[\"F%\"] = rank_pagerank[\"F\"] / (rank_pagerank[\"F\"] + rank_pagerank[\"M\"])\n rank_pagerank[\"M%\"] = rank_pagerank[\"M\"] / (rank_pagerank[\"F\"] + rank_pagerank[\"M\"])\n for gender in set().union(gender_centrality.gender.values):\n gender_centrality[gender] = gender_centrality.apply(lambda _: int(gender in _.gender), axis=1)\n gender_centrality = gender_centrality.sort_values([\"movie_name\", \"rank_pagerank\"])\n return gender_centrality\n\n\ndef get_female_in_top_10_roles():\n gender_centrality = calculate_gender_centrality()\n gender_centrality_movies = gender_centrality[gender_centrality[\"rank_pagerank\"] < 11].groupby(\"movie_name\").agg(\n [\"sum\", \"count\"])\n female_in_top_10 = pd.DataFrame()\n female_in_top_10[\"F_top10\"] = gender_centrality_movies[\"F\"][\"sum\"] / gender_centrality_movies[\"F\"][\"count\"]\n female_in_top_10[\"year\"] = gender_centrality_movies[\"year\"][\"sum\"] / gender_centrality_movies[\"year\"][\"count\"]\n female_in_top_10[\"movie_name\"] = gender_centrality_movies.index.str.replace(\" - roles\", \"\")\n female_in_top_10[\"year\"] = female_in_top_10[\"year\"].astype(int)\n return female_in_top_10\n\n\ndef get_relationship_triangles():\n triangles = SFrame.read_csv(f\"{OUTPUT_PATH}/triangles.csv\", usecols=[\"0\", \"1\", \"2\", \"3\", \"4\"])\n triangles_gender = triangles.apply(\n lambda x: [imdb_data.get_actor_gender(x[\"0\"]), imdb_data.get_actor_gender(x[\"1\"]),\n imdb_data.get_actor_gender(x[\"2\"])])\n triangles_gender = triangles_gender.unpack()\n triangles_gender[\"movie\"] = triangles[\"3\"]\n triangles_gender[\"year\"] = triangles[\"4\"]\n triangles_gender = triangles_gender.dropna()\n triangles_gender = triangles_gender.join(imdb_data.title, {\"movie\": \"primaryTitle\", \"year\": \"startYear\"})\n\n triangles_gender[\"1\"] = triangles_gender[\"X.0\"] == \"M\"\n triangles_gender[\"2\"] = triangles_gender[\"X.1\"] == \"M\"\n triangles_gender[\"3\"] = triangles_gender[\"X.2\"] == \"M\"\n triangles_gender[\"total_men\"] = triangles_gender[\"1\"] + triangles_gender[\"2\"] + triangles_gender[\n \"3\"]\n\n triangles_gender[\"genres\"] = triangles_gender[\"genres\"].apply(lambda x: x.split(\",\"))\n\n return triangles_gender\n\n\ndef count_triangles():\n triangles_gender_bin = get_relationship_triangles()\n\n triangles_df = triangles_gender_bin.to_dataframe()\n triangles_df = triangles_df[triangles_df[\"genres\"].notnull()]\n for genre in set().union(*triangles_df.genres.values):\n triangles_df[genre] = triangles_df.apply(lambda _: int(genre in _.genres), axis=1)\n triangles_df = triangles_df.drop([\"1\", \"2\", \"3\", \"genres\"], axis=1)\n triangles_df = triangles_df.rename(columns={\"total_men\": \"Males in triangle\"})\n piv = pd.pivot_table(triangles_df, columns=\"Males in triangle\", values=genres, aggfunc=np.sum)\n piv[\"total\"] = piv[0] + piv[1] + piv[2] + piv[3]\n for i in range(4):\n piv[i] = piv[i] / piv[\"total\"]\n return piv\n\n\ndef triangles():\n triagles_gender = get_relationship_triangles()\n\n moive_triangle = triagles_gender.groupby([\"movie\", \"year\", \"total\"], operations={'count': agg.COUNT()})\n\n traingles_at_movie = moive_triangle.to_dataframe().pivot_table(index=[\"movie\", \"year\"], values=\"count\",\n columns='total',\n aggfunc=lambda x: x)\n traingles_at_movie = traingles_at_movie.fillna(0)\n\n traingles_at_movie = traingles_at_movie.reset_index()\n return traingles_at_movie\n\n\nclass BechdelClassifier(object):\n\n def __init__(self):\n self.bechdel = SFrame.read_csv(f\"{DATA_PATH}/bechdel.csv\", column_type_hints={\"imdbid\": str})\n self.bechdel.sort(\"year\", False)\n self.bechdel[\"tconst\"] = \"tt\" + self.bechdel[\"imdbid\"]\n self.bechdel_imdb = imdb_data.title.join(self.bechdel)\n self.clf = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=5, random_state=1)\n self._graph_features = SFrame()\n\n @property\n def graph_features(self):\n if not self.graph_features:\n try:\n self.graph_features = SFrame.read_csv(f\"{DATA_PATH}/bechdel_features.csv\")\n except:\n t = triangles()\n self.graph_features = SFrame.read_csv(\"../temp/graph_features.csv\")\n\n self.graph_features = self.graph_features.join(SFrame(get_female_in_top_10_roles()),\n on={\"movie_name\": \"movie_name\", \"year\": \"year\"})\n self.graph_features = self.graph_features.join(SFrame(t), on={\"movie_name\": \"movie\", \"year\": \"year\"})\n self.graph_features[\"total_tri\"] = self.graph_features[\"0\"] + self.graph_features[\"1\"] + \\\n self.graph_features[\"2\"] + self.graph_features[\"3\"]\n for i in range(4):\n self.graph_features[f\"{i}%\"] = self.graph_features[str(i)] / self.graph_features[\"total_tri\"]\n\n self.graph_features.save(f\"{DATA_PATH}/bechdel_features.csv\", \"csv\")\n return self.graph_features\n\n @graph_features.setter\n def graph_features(self, value):\n self._graph_features = value\n\n def build_dataset(self):\n\n self.graph_features = imdb_data.title.filter_by(\"movie\", \"titleType\").join(self.graph_features,\n on={\"primaryTitle\": \"movie_name\",\n \"startYear\": \"year\"})\n self.graph_features = self.graph_features[self.graph_features[\"node_number\"] > 5]\n bechdel_ml = self.graph_features.join(self.bechdel_imdb,\n on={\"primaryTitle\": \"primaryTitle\", \"startYear\": \"year\"}, how='left')\n\n bechdel_ml = bechdel_ml[bechdel_ml[\"genres\"] != None]\n bechdel_ml = bechdel_ml.to_dataframe()\n bechdel_ml[\"genres\"] = bechdel_ml.genres.str.split(\",\")\n for genre in set().union(*bechdel_ml.genres.values):\n bechdel_ml[genre] = bechdel_ml.apply(lambda _: int(genre in _.genres), axis=1)\n\n train = bechdel_ml[bechdel_ml[\"rating\"].notnull()]\n val = bechdel_ml[bechdel_ml[\"rating\"].isnull()]\n val = val.fillna(0)\n train = train.fillna(0)\n train[\"rating\"] = train[\"rating\"] == 3\n\n self.val_title = val.pop('title')\n self.X_train = train.drop(drop_cols, axis=1)\n self.val = val.drop(drop_cols, axis=1)\n self.X_train = self.X_train.sort_values(\"startYear\")\n self.title = self.X_train.pop('title')\n self.y = self.X_train.pop(\"rating\")\n\n\n def triangles(self):\n triagles_gender = get_relationship_triangles()\n # triagles_gender[\"1\"] = triagles_gender[\"X.0\"] == \"M\"\n # triagles_gender[\"2\"] = triagles_gender[\"X.1\"] == \"M\"\n # triagles_gender[\"3\"] = triagles_gender[\"X.2\"] == \"M\"\n # triagles_gender[\"total\"] = triagles_gender[\"1\"] + triagles_gender[\"2\"] + triagles_gender[\"3\"]\n\n moive_triangle = triagles_gender.groupby([\"movie\", \"year\", \"total_men\"], operations={'count': agg.COUNT()})\n # type(moive_triangle)\n traingles_at_movie = moive_triangle.to_dataframe().pivot_table(index=[\"movie\", \"year\"], values=\"count\",\n columns='total_men',\n aggfunc=lambda x: x)\n traingles_at_movie = traingles_at_movie.fillna(0)\n\n traingles_at_movie = traingles_at_movie.reset_index()\n # bechdel_triangles = SFrame(traingles_at_movie).join(self.bechdel_imdb, {\"tconst\": \"tconst\"})\n return traingles_at_movie\n\n def train_val(self, additional_metrics={}):\n n_valid = 1000\n y_pred = []\n X_train, X_valid = split_vals(self.X_train, len(self.X_train) - n_valid)\n y_train, y_valid = split_vals(self.y, len(self.X_train) - n_valid)\n self.clf.fit(X_train, y_train)\n print_score(self.clf, X_train, y_train, X_valid, y_valid)\n # from sklearn.metrics import f1_score\n for k, m in additional_metrics:\n if not y_pred:\n y_pred = self.clf.predict(X_valid)\n print(f\"{k}: {m(y_valid, y_pred)}\")\n\n def train(self):\n self.clf = RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=5, random_state=1)\n self.clf.fit(self.X_train, self.y)\n return self.clf\n\n def dataset_to_csv(self, path):\n pd.concat([self.X_train, self.y], axis=1).to_csv(path, index=False)\n\n\nif __name__ == \"__main__\":\n # count_triangles()\n b = BechdelClassifier()\n b.build_dataset()\n # print(b.train_test())\n rfc = b.train()\n v = rfc.predict_proba(b.val)[:, 1]\n print(v.mean())\n b.val[\"decade\"] = b.val[\"startYear\"] // 10\n for y, d in b.val.groupby(\"decade\"):\n if len(d) > 10:\n d.pop(\"decade\")\n v = rfc.predict_proba(d)[:, 1]\n print(y, v.mean(), len(d))\n\n for g in genres:\n print(f\"{g}:\")\n x = b.val.iloc[b.val[g].nonzero()]\n for y, d in x.groupby(\"decade\"):\n if len(d) > 10:\n d.pop(\"decade\")\n v = rfc.predict_proba(d)[:, 1]\n print(y, v.mean(), len(d))\n", "id": "8194762", "language": "Python", "matching_score": 2.5756030082702637, "max_stars_count": 1, "path": "subs2network/bechdel_classifier.py" }, { "content": "import turicreate as tc\nimport turicreate.aggregate as agg\n\n\n# data from http://www.ssa.gov/oact/babynames/names.zip\n# and from wikitree\ndef create_ground_truth_names(baby_names_path, wikitree_users_path, ratio=0.9):\n \"\"\"\n Createing SFrame with statistics on first name gender probability using data from WikiTree and SSA\n :param baby_names_path: the file to SSA baby names files\n :param wikitree_users_path: link to file with WikiTree names\n :param ratio: the ratio that above it the name gender is considered male\n :return: SFrame with data regarding first name gender\n :rtype: tc.SFrame\n :note: first names data files can be downloaded from http://www.ssa.gov/oact/babynames/names.zip and\n https://www.wikitree.com/wiki/Help:Database_Dumps\n \"\"\"\n sf = tc.SFrame.read_csv(\"%s/*.txt\" % baby_names_path, header=False)\n sf = sf.rename({'X1': 'First Name', 'X2': 'Gender', 'X3': 'Count'})\n\n w_sf = tc.SFrame.read_csv(wikitree_users_path, delimiter=\"\\t\", header=True)\n w_sf = w_sf[['Preferred Name', 'Gender']]\n w_sf = w_sf.rename({'Preferred Name': 'First Name'})\n w_sf = w_sf[w_sf['Gender'] != 0]\n w_sf['First Name'] = w_sf['First Name'].apply(lambda n: n.split()[0] if len(n) > 0 else '')\n w_sf = w_sf[w_sf['First Name'] != '']\n w_sf['Gender'] = w_sf['Gender'].apply(lambda g: 'M' if g == 1 else 'F')\n w_sf = w_sf.groupby(['First Name', 'Gender'], {'Count': agg.COUNT()})\n\n sf = sf.append(w_sf)\n sf['First Name'] = sf['First Name'].apply(lambda n: n.lower())\n g = sf.groupby(['<NAME>', 'Gender'], agg.SUM('Count'))\n\n g['stat'] = g.apply(lambda r: (r['Gender'], r['Sum of Count']))\n sf = g.groupby('First Name', {'Stats': agg.CONCAT('stat')})\n sf['Total Births'] = sf['Stats'].apply(lambda l: sum([i[1] for i in l]))\n sf['Total Males'] = sf['Stats'].apply(lambda l: sum([i[1] for i in l if i[0] == 'M']))\n sf['Percentage Males'] = sf.apply(lambda r: float(r['Total Males']) / r['Total Births'])\n sf = sf[sf['Total Births'] >= 5]\n\n def get_name_gender(p):\n if p >= ratio:\n return 'Male'\n if p <= (1 - ratio):\n return 'Female'\n return 'Unisex'\n\n sf['Gender'] = sf['Percentage Males'].apply(lambda p: get_name_gender(p))\n sf = sf.remove_column('Stats')\n\n return sf\n", "id": "7898473", "language": "Python", "matching_score": 1.5502325296401978, "max_stars_count": 1, "path": "ScienceDynamics/sframe_creators/gender_classifier.py" }, { "content": "import math\n\n\ndef json2nvd3(j):\n d = {}\n for k, v in j.iteritems():\n if k == 'keywords':\n d[k] = keywords_clean(v)\n elif 'gender' in k:\n d[k] = genderdata2nvd3(v)\n else:\n d[k] = data2nvd3(v)\n return d\n\n\ndef data2nvd3(d):\n d = {int(k): v for k, v in d.iteritems() if (isinstance(v, float) or isinstance(v, int)) and not math.isnan(v)}\n keys = d.keys()\n keys.sort()\n l = []\n for k in keys:\n l.append({'x': k, 'y': d[k]})\n\n return l\n\n\ndef genderdata2nvd3(d):\n d = {int(k): v for k, v in d.iteritems()}\n keys = d.keys()\n keys.sort()\n l = []\n for k in keys:\n gender_dict = d[k]\n if \"Female\" not in gender_dict and \"Male\" not in gender_dict:\n continue\n if \"Female\" not in gender_dict:\n gender_dict[\"Female\"] = 0\n if \"Male\" not in gender_dict:\n gender_dict[\"Male\"] = 0\n\n y = float(gender_dict[\"Female\"]) / (gender_dict[\"Female\"] + gender_dict[\"Male\"])\n l.append({'x': k, 'y': y})\n return l\n\n\ndef keywords_clean(d):\n clean_dict = {}\n for k, v in d.iteritems():\n if v == {}:\n continue\n clean_dict[int(k)] = v\n return clean_dict\n", "id": "7842115", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "ScienceDynamics/visualization/parse_data.py" }, { "content": "class AuthorNotFound(Exception):\n pass", "id": "11023161", "language": "Python", "matching_score": 1.8463276624679565, "max_stars_count": 1, "path": "ScienceDynamics/exceptions.py" }, { "content": "\n\nclass SubtitleNotFound(Exception):\n pass\n\n\nclass CastNotFound(Exception):\n pass\n", "id": "12221021", "language": "Python", "matching_score": 0, "max_stars_count": 1, "path": "subs2network/exceptions.py" }, { "content": "import nltk\nnltk.download('names')\nnltk.download('punkt')\n# en_core_web_sm\nnltk.download('words')\n", "id": "7806367", "language": "Python", "matching_score": 1.0691452026367188, "max_stars_count": 1, "path": "subs2network/first_run.py" }, { "content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\nimport codecs, os\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n readme = \"\\n\" + f.read()\n\nrequirements = [\n 'matplotlib==3.0.3',\n 'networkx==2.2',\n 'pysrt',\n 'sendgrid',\n 'babelfish',\n 'numpy',\n 'turicreate',\n 'IMDbPY',\n 'subliminal',\n 'stop-words',\n 'fuzzywuzzy',\n 'spacy',\n 'python-dotenv',\n 'tmdbsimple',\n 'tqdm',\n 'nltk==3.3',\n 'python-Levenshtein',\n 'scikit-learn>=0.20.1'\n]\nsetup_requirements = ['pytest-runner', ]\n\ntest_requirements = ['pytest', ]\n\nsetup(\n author=\"<NAME>\",\n author_email='<EMAIL>',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n\n 'Programming Language :: Python :: 3.6',\n ],\n description=\"Python Boilerplate contains all the boilerplate you need to create a Python package.\",\n install_requires=requirements,\n license=\"Apache Software License 2.0\",\n long_description=readme,\n include_package_data=True,\n keywords='subs2network',\n name='subs2network',\n packages=find_packages(include=['subs2network']),\n setup_requires=setup_requirements,\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/data4goodlab/subs2network/',\n version='0.4.0',\n zip_safe=False,\n)\n\nimport nltk\nnltk.download('names')\nnltk.download('punkt')\nnltk.download('words')\n", "id": "2076933", "language": "Python", "matching_score": 1.556907057762146, "max_stars_count": 1, "path": "setup.py" }, { "content": "# -*- coding: utf-8 -*-\n\n\"\"\"Top-level package for subs2network.\"\"\"\n\n__author__ = \"\"\"<NAME>\"\"\"\n__email__ = '<EMAIL>'\n__version__ = '0.4.2'\n", "id": "10779844", "language": "Python", "matching_score": 0.802996814250946, "max_stars_count": 1, "path": "subs2network/__init__.py" }, { "content": "from functools import lru_cache\nfrom ScienceDynamics.exceptions import AuthorNotFound\nfrom ScienceDynamics.config.log_config import logger\n\n\nclass AuthorsFetcher(object):\n def __init__(self, db_client, db_name=\"journals\", authors_features_collection=\"authors_features\"):\n \"\"\"\n Construct an author's feature object\n :param db_client: a MONGO DB client\n :param db_name: database name\n :param authors_features_collection: the name of the MONGO collection which contains the authors features\n\n \"\"\"\n self._db = db_client[db_name]\n self._authors_features_collection = self._db[authors_features_collection]\n\n @lru_cache(maxsize=50000)\n def get_author_data(self, author_id=None, author_name=None):\n \"\"\"\n Returns authors data as dict there are two types of data 'features' that include the author general features,\n and 'features_by_year' that include the author's features in specific year, such as papers.\n :param author_id: the author's id\n :param author_name: the author's name (can be regex)\n :return: dict with the author's data\n :rtype: dict\n \"\"\"\n if author_id is not None:\n return self._get_author_by_id(author_id)\n if author_name is not None:\n return self._get_author_by_name(author_name)\n return None\n\n def get_author_ids_by_name(self, author_name):\n \"\"\"\n Returns a list of ids for authors with the input author name\n :param author_name: author's full name or regex object\n :return: list of author_ids\n :rtype: list<str>\n :note the author_name can be regex object\n \"\"\"\n try:\n return [d[\"Author ID\"] for d in self._authors_features_collection.find({\"Author name\": author_name})]\n\n except StopIteration:\n logger.warning('Failed to find author %s ' % author_name)\n\n def _get_author_by_id(self, author_id):\n \"\"\"\n Return author data\n :param author_id: author's id\n :return: return the author's data for author who match the input id\n :rtype: dict\n \"\"\"\n j = {}\n logger.debug(\"Fetching author %s\" % author_id)\n\n try:\n j = self._authors_features_collection.find({\"Author ID\": author_id}).next()\n\n except StopIteration:\n logger.warning(f'Failed to fetch author {author_id} features')\n raise AuthorNotFound()\n # converting the year keys back to int from string\n for k, d in j.items():\n if \"by Year\" in k and d is not None:\n j[k] = {int(y): v for y, v in d.items()}\n return j\n\n def _get_author_by_name(self, author_name):\n \"\"\"\n Return author data\n :param author_name: author's full name\n :return: return the author's data for the first author who match the input full name\n :rtype: dict\n \"\"\"\n j = {}\n logger.debug(\"Fetching author %s\" % author_name)\n\n try:\n logger.debug(f\"Fetching first author matching name {author_name}\")\n j = self._authors_features_collection.find({\"Author name\": author_name}).next()\n\n except StopIteration:\n logger.warning(f'Failed to fetch author {author_name} features')\n # converting the year keys back to int from string\n for k, d in j.items():\n if \"by Year\" in k:\n j[k] = {int(y): v for y, v in d.items()}\n return j\n", "id": "190517", "language": "Python", "matching_score": 2.9951682090759277, "max_stars_count": 1, "path": "ScienceDynamics/fetchers/authors_fetcher.py" }, { "content": "from functools import lru_cache\nfrom ScienceDynamics.config.log_config import logger\n\n\nclass PapersFetcher(object):\n def __init__(self, db_client, db_name=\"journals\", papers_collection=\"papers_features\",\n papers_join_collection=\"aminer_mag_papers\"):\n self._db = db_client[db_name]\n self._papers_features_collection = self._db[papers_collection]\n self._papers_join_collections = self._db[papers_join_collection]\n\n @lru_cache(maxsize=1000000)\n def get_paper_data(self, paper_id):\n j = {}\n #logger.debug(f\"Fetching paper {paper_id}\")\n try:\n j = self._papers_join_collections.find({\"MAG Paper ID\": paper_id}).next()\n\n except StopIteration:\n try:\n j = self._papers_features_collection.find({\"Paper ID\": paper_id}).next()\n except StopIteration:\n return None\n\n return j\n\n def get_journal_papers_data(self, journal_id):\n j = {}\n logger.debug(f\"Fetching journal {journal_id} papers\")\n try:\n c = self._papers_features_collection.find({'Journal ID mapped to venue name': journal_id})\n l = list(c)\n except StopIteration:\n return []\n\n return l\n\n def get_papers_ids_by_issn(self, issn):\n logger.debug(f\"Fetching papers with ISSN {issn}\")\n try:\n return [j[\"MAG Paper ID\"] for j in self._papers_join_collections.find({\"issn\": issn})]\n\n except StopIteration:\n return []\n", "id": "7698865", "language": "Python", "matching_score": 3.1032001972198486, "max_stars_count": 1, "path": "ScienceDynamics/fetchers/papers_fetcher.py" }, { "content": "from functools import lru_cache\nfrom ScienceDynamics.config.configs import VenueType, AMINER_MAG_JOIN_SFRAME, SJR_SFRAME, EXTENDED_PAPERS_SFRAME, STORAGE_PATH\nfrom ScienceDynamics.config.log_config import logger\nimport turicreate as tc\nimport turicreate.aggregate as agg\nimport pathlib\nfrom ScienceDynamics.datasets.microsoft_academic_graph import MicrosoftAcademicGraph\nfrom ScienceDynamics.datasets.sjr import SJR\n\n\nclass VenueFetcher(object):\n def __init__(self, db_client, db_name=\"journals\", papers_collection=\"papers_features\",\n papers_join_collection=\"aminer_mag_papers\", sjr_collection=\"sjr_journals\"):\n \"\"\"\n Construct venue fetcher object whcih retreive data about a venue\n :param db_client: MongoDB client object\n :param db_name: database name\n :param papers_collection: papers collections\n :param papers_join_collection: the collection which contains papers both from the AMiner and MAG datasets\n :param sjr_collection: SJR collection\n\n \"\"\"\n self._db = db_client[db_name]\n self._papers_collection = self._db[papers_collection]\n self._papers_join_collection = self._db[papers_join_collection]\n self._sjr_collection = self._db[sjr_collection]\n \n \n\n\n def _get_papers_ids(self, venue_id, venue_name, venue_type, use_join_col=False):\n col = self._papers_collection\n paper_id_col = 'Paper ID'\n if use_join_col:\n col = self._papers_join_collection\n paper_id_col = 'MAG Paper ID'\n papers_ids = []\n if venue_id is not None:\n if venue_type == VenueType.journal:\n papers_ids += [j[paper_id_col] for j in col.find({\"Journal ID mapped to venue name\": venue_id})]\n if venue_type == VenueType.conference:\n papers_ids += [j[paper_id_col] for j in col.find({\"Conference ID mapped to venue name\": venue_id})]\n\n if venue_id is None and venue_name is not None: # get papers by name only if venue id is missing\n papers_ids += [j[paper_id_col] for j in col.find({\"Original venue name\": venue_name})]\n\n return list(set(papers_ids))\n\n @lru_cache(maxsize=100)\n def get_papers_ids_dict(self, venue_id, venue_name, venue_type=VenueType.journal, issn_list=()):\n \"\"\"\n Returns the venue's paper ids both that appear in the MAG paper dataset and in the AMingerMag join dataset\n :param venue_type:\n :param venue_id: the MAG venue id\n :param venue_name: the venue's name\n :param issn_list: ISSNs list\n :return: dict with the venue's papers ids. The dict has two keys 'papers_ids' & 'join_papers_ids'\n :rtyoe: dict\n :note: ISSN format \\d{4}-\\d{4} (with '-')\n \"\"\"\n logger.info(f\"Getting papers id of venue_id={venue_id},venue_name={venue_name}. and issn_list={issn_list}\")\n papers_ids_dict = {'papers_ids': self._get_papers_ids(venue_id, venue_name, venue_type)}\n\n l = self._get_papers_ids(venue_id, venue_name, venue_type, use_join_col=True)\n for issn in issn_list:\n l += [j['MAG Paper ID'] for j in self._papers_join_collection.find({\"issn\": issn})]\n papers_ids_dict['join_papers_ids'] = list(set(l))\n\n return papers_ids_dict\n\n def get_sjr_dict(self, venue_name, issn_list=()):\n \"\"\"\n Get's the venue SJR data from venue name's or ISSN values\n :param venue_name: venue names\n :param issn_list: issn values list (optional)\n :return: list of the matching ISSN journals from the SJR dataset\n :rtype: list<dict>\n :noteo: isssn values in SJR dataset are 8 digits\n \"\"\"\n logger.info(f\"Get SJR data of venue_name={venue_name}, issn_list={issn_list}\")\n sjr_data = {}\n l = [j for j in self._sjr_collection.find({\"Title\": venue_name})]\n for issn in issn_list:\n issn = issn.replace('-', '')\n l += [j for j in self._sjr_collection.find({\"ISSN\": issn})]\n\n for j in l:\n if j[\"ISSN\"] not in sjr_data:\n sjr_data[j[\"ISSN\"]] = []\n sjr_data[j[\"ISSN\"]].append(j)\n return sjr_data\n\n @staticmethod\n def get_valid_venues_papers_ids_sframe(min_ref_number, min_journal_papers_num):\n\n # Criteria I: we use only journals that have paper with valid DOI that appears in both AMiner and MAG datasets\n sf = tc.load_sframe(str(AMINER_MAG_JOIN_SFRAME))\n sf['Original venue name'] = sf['Original venue name'].apply(lambda n: n.lower())\n g = sf.groupby('Journal ID mapped to venue name', {'venue name': agg.CONCAT('Original venue name'),\n 'issn': agg.CONCAT('issn')})\n\n g['issn'] = g['issn'].apply(lambda l: list(set(l)))\n g['venue name'] = g['venue name'].apply(lambda l: list(set(l)))\n\n # Criteria II: the journal as only signle name\n g = g[g['venue name'].apply(lambda l: len(l) == 1)]\n g.materialize()\n g['venue name'] = g['venue name'].apply(lambda l: l[0].strip())\n\n # Criteria III: the journal's name appears in SJR\n sjr_dict = VenueFetcher.get_sjr_journals_dict()\n g = g[g['venue name'].apply(lambda v: v in sjr_dict)]\n\n venues_ids = set(g['Journal ID mapped to venue name'])\n\n # Criteria IV: Each venue need to have at least min_journal_papers_num papers with at\n # least min_ref_number refs in each paper\n dataset_dir = pathlib.Path(STORAGE_PATH)\n mag_path = dataset_dir / \"MAG\"\n mag = MicrosoftAcademicGraph(mag_path)\n \n sf = mag.extended_papers[\n 'Journal ID mapped to venue name', 'Original venue name', 'Paper ID', 'Ref Number']\n sf = sf[sf['Ref Number'] >= min_ref_number]\n sf.materialize()\n sf = sf[sf['Journal ID mapped to venue name'].apply(lambda i: i in venues_ids)]\n sf['Journal name'] = sf['Original venue name'].apply(lambda n: n.lower().strip())\n sf.materialize()\n # Notice that with the full Papers SFrmae journal can have several names\n g = sf.groupby(['Journal ID mapped to venue name'],\n {'Count': agg.COUNT(), 'Paper IDs List': agg.CONCAT(\"Paper ID\"),\n 'Journals names': agg.CONCAT('Journal name')})\n g['Journals names'] = g['Journals names'].apply(lambda l: list(set(l)))\n g = g[g['Count'] >= min_journal_papers_num]\n g = g[g['Journals names'].apply(lambda l: len(l) == 1)]\n g['Journals names'] = g['Journals names'].apply(lambda l: l[0])\n g = g.rename({'Journals names': 'Journal name'})\n g.materialize()\n\n return g\n\n @staticmethod\n def get_valid_venues_papers_ids_sframe_from_mag(min_ref_number, min_journal_papers_num):\n \n dataset_dir = pathlib.Path(STORAGE_PATH)\n mag_path = dataset_dir / \"MAG\"\n mag = MicrosoftAcademicGraph(mag_path)\n \n sf = mag.extended_papers[\n 'Journal ID mapped to venue name', 'Original venue name', 'Paper ID', 'Ref Number']\n sf = sf[sf['Ref Number'] >= min_ref_number]\n sf.materialize()\n sf['Journal name'] = sf['Original venue name'].apply(lambda n: n.lower().strip())\n sf.materialize()\n g = sf.groupby(['Journal ID mapped to venue name'],\n {'Count': agg.COUNT(), 'Paper IDs List': agg.CONCAT(\"Paper ID\"),\n 'Journals names': agg.CONCAT('Journal name')})\n g['Journals names'] = g['Journals names'].apply(lambda l: list(set(l)))\n g = g[g['Count'] >= min_journal_papers_num]\n g = g[g['Journals names'].apply(lambda l: len(l) == 1)]\n g['Journals names'] = g['Journals names'].apply(lambda l: l[0])\n g = g.rename({'Journals names': 'Journal name'})\n g.materialize()\n return g\n\n @staticmethod\n def get_sjr_journals_dict():\n \"\"\" Returns a dict in which the keys are the journals names and the values are the journal issns\n \"\"\"\n dataset_dir = pathlib.Path(STORAGE_PATH)\n\n sjr_path = dataset_dir / \"SJR\"\n sjr = SJR(sjr_path)\n d = {}\n sf = sjr.data\n sf = sf[sf['Type'] == 'journal']\n sf.materialize()\n for r in sf:\n t = r['Title'].lower().strip()\n if t not in d:\n d[t] = []\n d[t].append(r['ISSN'])\n d = {k: set(v) for k, v in d.items()}\n return d\n", "id": "765800", "language": "Python", "matching_score": 3.214639663696289, "max_stars_count": 1, "path": "ScienceDynamics/fetchers/venue_fetcher.py" }, { "content": "from pymongo import MongoClient\nimport turicreate as tc\nfrom ScienceDynamics.datasets.mag_authors import AuthorsFeaturesExtractor\n\nfrom ScienceDynamics.config.configs import AUTHROS_FEATURES_SFRAME, EXTENDED_PAPERS_SFRAME, SJR_SFRAME, \\\n AMINER_MAG_JOIN_SFRAME, MONGO_IP\nfrom ScienceDynamics.config.log_config import logger\n\n\nclass MongoDBConnector(object):\n def __init__(self, host=\"localhost\", port=27017):\n \"\"\"\n Create connection to the relevant mongo server\n\n \"\"\"\n self._client = MongoClient(host, port)\n\n def insert_sframe(self, sf, db_name, collection_name, insert_rows_iter=100000, index_cols_list=()):\n \"\"\"\n Insert the input SFrame into the input DB and collection\n :param sf: SFrame object\n :param db_name: DB name\n :param collection_name: collection names\n :param insert_rows_iter: how many rows to insert in each iteration\n :param index_cols_list: list of columns to add index to each element in the list is atuple with the column names\n and if the column is unique\n\n\n \"\"\"\n rows_num = len(sf)\n collection = self._client[db_name][collection_name]\n for i in range(0, rows_num, insert_rows_iter):\n logger.info(\"Inserting rows %s - %s to %s.%s\" % (i, i + insert_rows_iter, db_name, collection_name))\n tmp_sf = sf[i: i + insert_rows_iter]\n json_list = [r for r in tmp_sf]\n collection.insert_many(json_list)\n for i in index_cols_list:\n self.create_index(db_name, collection_name, i[0], unique=i[1])\n\n def create_index(self, db_name, collection_name, index_col, unique):\n if index_col is None:\n return\n collection = self._client[db_name][collection_name]\n collection.create_index(index_col, unique=unique)\n\n def get_collection(self, db_name, collection_name):\n \"\"\"\n Get a Mongo collection object\n :param db_name: DB name\n :param collection_name: collection name\n :return: Mongo collection\n \"\"\"\n return self._client[db_name][collection_name]\n\n @property\n def client(self):\n \"\"\"\n Return the mongo client\n :return: return Mongo Client\n :rtype: MongoClient\n \"\"\"\n return self._client\n\n\ndef _convert_sframe_dict_key_to_str(sf, col_names):\n for c in col_names:\n sf[c] = sf[c].apply(lambda d: {str(int(float(k))): [i for i in v if i is not ''] for k, v in d.items()})\n # remove empty lists\n for c in col_names:\n sf[c] = sf[c].apply(lambda d: {k: v for k, v in d.items() if v != []})\n sf.materialize()\n return sf\n\n\ndef load_sframes(mag, sjr, joined):\n # from ScienceDynamics.config.configs import DATASETS_BASE_DIR\n # mag = MicrosoftAcademicGraph(DATASETS_BASE_DIR / \"MicrosoftAcademicGraph.zip\")\n \"\"\"\n Load the journals/authors sframes to Mongo\n \"\"\"\n logger.info(\"Loading authors features\")\n md = MongoDBConnector()\n a = AuthorsFeaturesExtractor(mag)\n\n sf = a.authors_features\n logger.info(\"Converting\")\n\n sf = _convert_sframe_dict_key_to_str(sf, [c for c in sf.column_names() if \"Year\" in c])\n sf['Sequence Number by Year Dict'] = sf['Sequence Number by Year Dict'].apply(\n lambda d: {k: [str(int(float(i))) for i in v] for k, v in d.items()})\n sf.materialize()\n index_list = [('Author ID', True), ('Author name', False)]\n md.insert_sframe(sf, 'journals', 'authors_features', index_cols_list=index_list)\n\n logger.info(\"Loading papers features\")\n sf = mag.extended_papers\n index_list = [('OriginalVenue', False), ('PaperId', True), ('ConferenceSeriesId', False), ('ConferenceInstanceId', False),\n ('JournalId', False)]\n md.insert_sframe(sf, 'journals', 'papers_features', index_cols_list=index_list)\n\n logger.info(\"Loading SJR features\")\n sf = sjr.data\n sf = sf.rename({c: c.replace(\".\", \"\") for c in sf.column_names()})\n sf['Title'] = sf['Title'].apply(lambda t: t.encode('utf-8'))\n index_list = [('Title', False), ('ISSN', False)]\n md.insert_sframe(sf, 'journals', 'sjr_journals', index_cols_list=index_list)\n\n sf = joined.aminer_mag_links_by_doi\n sf = sf.rename({c: c.replace(\".\", \"\") for c in sf.column_names()})\n index_list = [('OriginalVenue', False), ('MAG Paper ID', True), ('Conference ID mapped to venue name', False),\n ('Journal ID mapped to venue name', False), ('issn', False)]\n\n\n md.insert_sframe(sf, 'journals', 'aminer_mag_papers', index_cols_list=index_list)\n", "id": "2242192", "language": "Python", "matching_score": 3.6440699100494385, "max_stars_count": 1, "path": "ScienceDynamics/mongo_connector.py" }, { "content": "import sys\nimport zipfile\n\nfrom ScienceDynamics.config.configs import TMP_DIR, SFRAMES_BASE_DIR\nfrom ScienceDynamics.config.log_config import logger\nimport turicreate as tc\nimport turicreate.aggregate as agg\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom ScienceDynamics.datasets.configs import FIRST_NAMES_SFRAME\nfrom ScienceDynamics.datasets.utils import download_file, save_sframe\n\n\ndef _entities_years_list_to_dict(l):\n \"\"\"\n Create a dict of entites by year\n :param l: list which each element is a size two tuple (year, entity_id\n :return: a dict in which each key is a year and each value is a list of entities\n :rtype: dict<int,list>\n \"\"\"\n d = {}\n for y, eid in l:\n y = str(y) # for easier mongo insert key need to be str\n if y not in d:\n d[y] = []\n d[y].append(eid)\n return d\n\n\nclass AuthorsFeaturesExtractor(object):\n def __init__(self, mag, paper_min_ref=5, fields=None):\n \"\"\"\n Consturct and Author Features Extractor object\n :param paper_min_ref: minimum number of references\n \"\"\"\n self._mag = mag\n self._paper_min_ref = paper_min_ref\n self._p_sf = self._get_extended_papers_sframe(paper_min_ref)\n self._paper_authors_years = None\n self._paper_author_affiliation_join_sframe = None\n self._sframe_dir = SFRAMES_BASE_DIR\n if not Path(FIRST_NAMES_SFRAME).exists():\n dataset_zip = str(FIRST_NAMES_SFRAME).replace(\".sframe\",\".zip\")\n with zipfile.ZipFile(Path(dataset_zip), 'r') as f:\n f.extractall(SFRAMES_BASE_DIR)\n\n def _get_extended_papers_sframe(self, paper_min_ref, fields=None):\n \"\"\"\n Return SFrame with Extended Papers titles only for papers with the minimial input references number\n :param paper_min_ref: minimum number of references\n :return: SFrame with Papers data\n :rtype: tc.SFrame\n \"\"\"\n extended = self._mag.extended_papers\n if paper_min_ref is not None:\n sf_path = f\"{TMP_DIR}/extended_paper_min_ref_{paper_min_ref}.sfrmae\"\n if Path(sf_path).is_dir():\n return tc.load_sframe(sf_path)\n \n extended = extended[extended['Ref Number'] >= paper_min_ref]\n extended = extended[extended[\"Authors Number\"]<500]\n if fields is not None:\n fos = self._mag.fields_of_study.filter_by(fields, \"NormalizedName\")[\"FieldOfStudyId\"]\n papers = self._mag.paper_fields_of_study.filter_by(fos, \"FieldOfStudyId\")[\"PaperId\"]\n extended = extended.filter_by(papers, \"PaperId\")\n extended.save(sf_path)\n return extended\n\n @property\n @save_sframe(sframe=\"paper_authors_years.sframe\")\n def paper_authors_years(self):\n \"\"\"\n Return an SFrame in which each row consists of PaperId, AuthorId, Year\n :return: SFrame with Author and Paper by publication year data\n :rtype: tc.SFrame()\n \"\"\"\n if self._paper_authors_years is None:\n p_sf = self._p_sf[\"PaperId\", \"Year\"]\n self._paper_authors_years = self._mag.paper_author_affiliations[\n \"AuthorId\", \"PaperId\"] # 337000127 for all papers\n self._paper_authors_years = self._paper_authors_years.join(p_sf, on=\"PaperId\")\n return self._paper_authors_years\n \n @save_sframe(sframe=\"authors_papers_dict_sframe.sframe\")\n def get_authors_papers_dict_sframe(self):\n \"\"\"\n Create SFrame in which each row contains an AuthorId and a dict with the author's publication by year dict\n :return: SFrame with Authors ID and Papers by Years Dict columns\n :rtype: tc.SFrame\n \"\"\"\n logger.info(\"Calcualting authors' papers by year\")\n a_sf = self.paper_authors_years\n a_sf['Paper Year'] = a_sf.apply(lambda r: (r[\"Year\"], r[\"PaperId\"]))\n g = a_sf.groupby(\"AuthorId\", {\"Papers List\": agg.CONCAT(\"Paper Year\")})\n g['Papers by Years Dict'] = g[\"Papers List\"].apply(lambda l: _entities_years_list_to_dict(l))\n g = g.remove_column(\"Papers List\")\n return g\n \n @save_sframe(sframe=\"co_authors_dict_sframe.sframe\")\n def get_co_authors_dict_sframe(self):\n \"\"\"\n Create SFrame with each author's coauthors by year\n :return: SFrame with AuthorId and Coauthors by Years Dict\n :note: the function can take considerable amount of time to execute\n \"\"\"\n logger.info(\"Calcualting authors' coauthors by year\")\n sf = self.paper_authors_years\n sf = sf.join(sf, on='PaperId')\n sf = sf[sf['AuthorId'] != sf['AuthorId.1']]\n sf = sf.remove_column('Year.1')\n sf = sf.groupby(['AuthorId', 'Year'], {'Coauthors List': agg.CONCAT('AuthorId.1')})\n sf['Coauthors Year'] = sf.apply(lambda r: (r['Year'], r['Coauthors List']))\n sf = sf.groupby(\"AuthorId\", {'Coauthors list': agg.CONCAT('Coauthors Year')})\n sf['Coauthors by Years Dict'] = sf['Coauthors list'].apply(lambda l: {y: coa_list for y, coa_list in l})\n sf = sf.remove_column('Coauthors list')\n return sf\n\n def _get_author_feature_by_year_sframe(self, feature_name, feature_col_name):\n \"\"\"\n Create a SFrame with AuthorId and a dict with the author's input feature (feature_name) over the years values\n :param feature_name: input feature name\n :param feature_col_name: the Sframe column name which contains dict with the author feature_name values over the years\n :return: SFrame with AuthorId and feature_col_name columns\n :rtype: tc.SFrame\n \"\"\"\n logger.info(\"Calcualting authors feature %s by year\" % feature_name)\n a_sf = self.paper_author_affiliation_sframe['AuthorId', 'Year', feature_name]\n a_sf['Feature Year'] = a_sf.apply(lambda r: (int(r[\"Year\"]), r[feature_name]))\n g = a_sf.groupby(\"AuthorId\", {\"Feature List\": agg.CONCAT(\"Feature Year\")})\n g[feature_col_name] = g[\"Feature List\"].apply(lambda l: _entities_years_list_to_dict(l))\n g = g.remove_column(\"Feature List\")\n\n return g\n\n @property\n @save_sframe(sframe=\"paper_author_affiliation_join.sframe\")\n def paper_author_affiliation_sframe(self):\n \"\"\"\n Returns SFrame in whcih each row contains the AuthorId, PaperId, Year, ConferenceSeriesId, JournalId,\n OriginalVenue\n :return: SFrame with Authors and Papers Data\n :rtype: tc.SFrame\n \"\"\"\n if self._paper_author_affiliation_join_sframe is None:\n p_sf = self._p_sf[\n ['PaperId', 'Year', \"ConferenceSeriesId\", \"JournalId\",\n \"OriginalVenue\"]]\n a_sf = self._mag.paper_author_affiliations\n self._paper_author_affiliation_join_sframe = a_sf.join(p_sf, on=\"PaperId\")\n return self._paper_author_affiliation_join_sframe\n\n @property\n @save_sframe(sframe=\"authors_features.sframe\")\n def authors_features(self):\n \"\"\"\n Create Authors SFrame in which each row is unique AuthorId and the author's various features\n :return: SFrame with Authors features\n :rtype: tc. SFrame\n \"\"\"\n p_sf = self._p_sf[['PaperId']] # 22082741\n a_sf = self._mag.paper_author_affiliations[\"AuthorId\", \"PaperId\"]\n a_sf = a_sf.join(p_sf, on=\"PaperId\")\n a_sf = a_sf[[\"AuthorId\"]].unique()\n g = self.get_authors_papers_dict_sframe()\n a_sf = a_sf.join(g, on=\"AuthorId\", how=\"left\") # 22443094 rows\n g = self.get_co_authors_dict_sframe()\n a_sf = a_sf.join(g, on=\"AuthorId\", how='left')\n author_names = self._mag.author_names\n author_names[\"First Name\"] = author_names[\"NormalizedName\"].apply(lambda x: x.split(\" \")[0])\n a_sf = a_sf.join(author_names, on=\"AuthorId\", how=\"left\")\n g_sf = tc.load_sframe(str(FIRST_NAMES_SFRAME))\n a_sf = a_sf.join(g_sf, on={\"First Name\": \"First Name\"}, how=\"left\")\n\n feature_names = [(\"AffiliationId\", \"Affilation by Year Dict\"),\n ('AuthorSequenceNumber', 'Sequence Number by Year Dict'),\n (\"ConferenceSeriesId\", \"Conference ID by Year Dict\"),\n (\"JournalId\", \"Journal ID by Year Dict\"),\n (\"OriginalVenue\", \"Venue by Year Dict\")]\n for fname, col_name in tqdm(feature_names):\n f_sf = self._get_author_feature_by_year_sframe(fname, col_name)\n a_sf = a_sf.join(f_sf, on=\"AuthorId\", how='left')\n\n return a_sf\n", "id": "980200", "language": "Python", "matching_score": 3.708763837814331, "max_stars_count": 1, "path": "ScienceDynamics/datasets/mag_authors.py" }, { "content": "import turicreate as gl\nimport turicreate.aggregate as agg\nimport re\n# working on papers with at most 5 citations\nr_sf = gl.load_sframe('./PaperReferences.sframe')\n\nr_sf = r_sf.groupby('Paper ID', {'Ref Count': agg.COUNT()}) # There are 30058322 in the list\nr_sf.save('/data/sframes/PapersRefCount.sframe')\nr_sf = r_sf[r_sf['Ref Count'] >= 5] # left with 22,083,058\n\np_sf = gl.load_sframe(\"./Papers.sframe/\") # 126,903,970 rows\np_sf = r_sf.join(p_sf) # 22,082,741\np_sf.save('./PapersMin5Ref.sframe')\n\np_sf = gl.load_sframe('./PapersMin5Ref.sframe')\na_sf = gl.load_sframe('./PaperAuthorAffiliations.sframe/') # 337000127\nsf = p_sf[['Paper ID']].join(a_sf) # 86,561,861 rows\nsf = sf.join(p_sf, on=\"Paper ID\")\nsf.groupby(\"Author ID\", {'Papers Count': agg.COUNT_DISTINCT('Paper ID'),\n 'start_year': agg.MIN('Paper publish year'), 'last_year': agg.MAX('Paper publish year'),\n 'mean_ref_count': agg.AVG('Ref Count'), 'papers_list': agg.CONCAT('Paper ID'),\n 'journals_list': agg.CONCAT('Journal ID mapped to venue name'),\n 'conference_list': agg.CONCAT('Conference ID mapped to venue name'),\n 'affilation_list': agg.CONCAT('Affiliation ID')\n })\n\nsf = gl.SFrame()\nr = re.compile(r\"\\d{4}\")\nfor i in l:\n try:\n y = r.findall(i)[0]\n x = gl.SFrame.read_csv(\"%s/%s\" % (p, i))\n x['Year'] = y\n x['Total Docs'] = x['Total Docs. (%s)' % y]\n x = x['Title', 'H index', 'SJR Best Quartile', 'SJR', 'Type', 'Rank', 'Year', 'Total Docs']\n sf = sf.append(x)\n except:\n continue\n", "id": "7860919", "language": "Python", "matching_score": 1.3959391117095947, "max_stars_count": 1, "path": "ScienceDynamics/test.py" }, { "content": "#from ScienceDynamics.author import Author\nfrom collections import Counter\nimport numpy as np\nfrom functools import lru_cache\n\n\nclass AuthorsListAnalyzer(object):\n def __init__(self, authors_list):\n \"\"\"\n :param authors_list: authors list\n :type [Author]\n \"\"\"\n self._authors_list = []\n if authors_list is not None:\n self._authors_list = authors_list # type: list[Author]\n\n @lru_cache(maxsize=100)\n def get_author_academic_ages_list(self, year):\n \"\"\"\n Returns the authors academic age list in a year\n :param year: year\n :return: list of author academic ages\n :rtype: list of int\n \"\"\"\n return [a.get_academic_age(year) for a in self._authors_list]\n\n @lru_cache(maxsize=100)\n def get_publications_number_list(self, start_year, end_year):\n \"\"\"\n Returns the a list with the number of authors publications\n :param start_year: start year\n :param end_year: end year\n :return: list with the number of publication of each author\n :rtype: list of int\n \"\"\"\n return [a.number_of_papers(start_year, end_year) for a in self._authors_list]\n\n def get_average_age(self, year):\n \"\"\"\n Authors average academic age in specific year\n :param year: year\n :return: Average authors academic age\n :rtype: float\n \"\"\"\n return np.average(self.get_author_academic_ages_list(year))\n\n def get_median_age(self, year):\n \"\"\"\n Authors median academic age in specific year\n :param year: year\n :return: Medan authors academic age\n :rtype: float\n \"\"\"\n return np.median(self.get_author_academic_ages_list(year))\n\n def get_average_publication_number(self, start_year, end_year):\n \"\"\"\n Return authors average publications number\n :param start_year: start year\n :param end_year: end year\n :return: the authors number of publication between years\n :rtype: float\n \"\"\"\n return np.average(self.get_publications_number_list(start_year, end_year))\n\n def get_median_publication_number(self, start_year, end_year):\n \"\"\"\n Return authors median publications number\n :param start_year: start year\n :param end_year: end year\n :return: the authors number of publication between years\n :rtype: float\n \"\"\"\n return np.median(self.get_publications_number_list(start_year, end_year))\n\n def get_gender_stats(self):\n \"\"\"\n Dict return authors' gender with the number of authors in each gender\n :return: Counter object with the number of authors in each gender\n :rtype: Counter\n \"\"\"\n return Counter([a.gender for a in self._authors_list])\n\n def get_female_probabilities(self, remove_nulls=True):\n \"\"\"\n Returns a list if the probability of each author's first name to be of a female\n :param remove_nulls: if True remove None values from list\n :return: list of of probability of the author to be female\n :rtype: list<float>\n \"\"\"\n if remove_nulls:\n return [a.female_probability for a in self._authors_list if a.female_probability is not None]\n return [a.female_probability for a in self._authors_list]\n\n def get_avg_female_probabilities(self):\n \"\"\"\n Returns the authors average female probability based on the authors' first names\n :return: return the authors average female probability\n :rtype: float\n \"\"\"\n return np.average(self.get_female_probabilities(remove_nulls=True))\n", "id": "6053750", "language": "Python", "matching_score": 1.7138700485229492, "max_stars_count": 1, "path": "ScienceDynamics/authors_list_analyzer.py" }, { "content": "import altair as alt\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n\ndef filter_sframe_by_years(sf, start_year, end_year):\n \"\"\"\n Filter the sframe row to include only rows between the start & end years\n :param sf: SFrame which include \"Year\" column\n :param start_year: start year\n :param end_year: end year\n :return: return SFrame with row with \"Year\" column between the start & end year\n :rtype: tc.SFrame\n \"\"\"\n if start_year is not None:\n sf = sf[sf['Year'] >= start_year]\n if end_year is not None:\n sf = sf[sf['Year'] <= end_year]\n return sf\n\n\ndef draw_feature_yearly_func_value(sf, col_name, var_name, start_year, end_year, func_name=\"agg.AVG\", title=None):\n \"\"\"\n Return a Altair chart of the input feature between the start & end year using Turicreate aggregation function\n :param sf: SFrame object\n :param col_name: column name\n :param var_name: the new aggregate varilable name (this also will be the chart's Y-axis\n :param start_year: start year\n :param end_year: end year\n :param func_name: Turicreate aggregation function name, such as agg.AVG, agg,MAX, and etc\n :param title: chart title\n :return: chart with the with the yearly aggregated values of the input column\n :rtyoe: alt.Chart\n \"\"\"\n sf = filter_sframe_by_years(sf, start_year, end_year)\n g = sf.groupby(\"Year\", {var_name: eval(\"%s('%s')\" % (func_name, col_name))})\n g = g.sort(\"Year\")\n df = g.to_dataframe()\n if title is not None:\n chart = alt.Chart(df, title=title)\n else:\n chart = alt.Chart(df)\n chart = chart.mark_line().encode(\n alt.X('Year:Q', axis=alt.Axis(format='d'), scale=alt.Scale(zero=False)),\n alt.Y('%s:Q' % var_name, scale=alt.Scale(zero=False)),\n\n )\n return chart\n\n\ndef draw_features_yearly_chart(sf, y_col, start_year, end_year, title=None):\n \"\"\"\n Returns a chart with the yearly values between start and end input years\n :param sf: SFrame object\n :param y_col: the name of the column with the Y values\n :param start_year: start year\n :param end_year: end year\n :param title: chart title\n :return: chart with the yearly values of the SFrame columns that will be used as categories\n :rtype: alt.CHart\n \"\"\"\n sf = filter_sframe_by_years(sf, start_year, end_year)\n df = sf.to_dataframe()\n df = df.fillna(0)\n df = df.sort_values(by=[\"Year\"])\n if title is not None:\n chart = alt.Chart(df, title=title)\n else:\n chart = alt.Chart(df)\n chart = chart.mark_line().encode(\n alt.X('Year:Q', axis=alt.Axis(format='d'), scale=alt.Scale(zero=False)),\n alt.Y('%s:Q' % y_col, scale=alt.Scale(zero=False)),\n\n )\n\n return chart\n\n\ndef draw_features_yearly_chart_multi_lines(sf, var_name, value_name, start_year, end_year, title=None):\n \"\"\"\n Returns a chart with the yearly values between start and end input years, where the column names are the chart\n categories and the column values are the graph values in each category\n :param sf: SFrame object\n :param var_name: The name of categories column (will also be use in the chart legend)\n :param value_name: The name of the new values column (will also be used as Y-axis lable)\n :param start_year: start year\n :param end_year: end year\n :param title: chart title\n :return: chart with the yearly values of the SFrame columns that will be used as categories\n :rtype: alt.CHart\n \"\"\"\n sf = filter_sframe_by_years(sf, start_year, end_year)\n df = sf.to_dataframe()\n df = df.fillna(0)\n df = pd.melt(df, id_vars=[\"Year\"],\n var_name=var_name, value_name=value_name)\n df = df.sort_values(by=['Year'])\n if title is not None:\n chart = alt.Chart(df, title=title)\n else:\n chart = alt.Chart(df)\n\n chart = chart.mark_line().encode(\n alt.X('Year:Q', axis=alt.Axis(format='d'), scale=alt.Scale(zero=False)),\n alt.Y('%s:Q' % value_name, scale=alt.Scale(zero=False)),\n color=var_name\n )\n return chart\n\n\ndef draw_features_decade_dist(sf, var_name, start_year, end_year, col_warp=4, sharex=False, sharey=False):\n \"\"\"\n Return chart using seaborn package that draw the SFrame input var name distribution over the decades\n :param sharey:\n :param sf: input sframe\n :param var_name: input varname\n :param start_year: start year\n :param end_year: end year\n :param col_warp: number of columns in each row\n :param sharex: if True share X-axis otherwise each subplot will have it's own X-axis\n :param sharex: if True share Y-axis otherwise each subplot will have it's own Y-axis\n :return: chart with subplots which each subplot contains the var_name distrubtion in specific decade\n \"\"\"\n sf = filter_sframe_by_years(sf, start_year, end_year)\n sf[\"Decade\"] = sf[\"Year\"].apply(lambda y: y - y % 10)\n df = sf[\"Decade\", var_name].to_dataframe()\n\n df = df.sort_values(by=['Decade'])\n g = sns.FacetGrid(df, col=\"Decade\", col_wrap=col_warp, aspect=3, sharex=sharex, sharey=sharey)\n g = g.map(sns.distplot, var_name)\n return g\n\n\ndef draw_layered_hist(sf, feature_col_name, decades_list, start_year, end_year, xlim=None, xlabel=None, ylabel=None):\n n = len(decades_list)\n sf = filter_sframe_by_years(sf, start_year, end_year)\n s = set(decades_list)\n sf[\"Decade\"] = sf[\"Year\"].apply(lambda y: y - y % 10)\n sf = sf[\"Decade\", feature_col_name]\n df = sf[sf['Decade'].apply(lambda decade: decade in s)].to_dataframe()\n colors = sns.color_palette(\"hls\", n)\n\n for i in range(n):\n d = decades_list[i]\n c = colors[i]\n sns.distplot(df[df[\"Decade\"] == d][feature_col_name], color=c, label=\"%ss\" % d)\n plt.legend()\n if xlim is not None:\n plt.xlim(*xlim)\n if xlabel is not None:\n plt.xlabel(xlabel)\n\n if ylabel is not None:\n plt.ylabel(ylabel)\n", "id": "2796824", "language": "Python", "matching_score": 2.5433521270751953, "max_stars_count": 1, "path": "ScienceDynamics/visualization/visual_utils.py" }, { "content": "import turicreate.aggregate as agg\nimport itertools\nfrom collections import Counter\nimport pycld2 as cld2\n\n\ndef get_column_count_sframe(sf, col_name, sort_by_count=True):\n g = sf.groupby(col_name, {'Count': agg.COUNT()})\n if sort_by_count:\n g = g.sort(\"Count\")\n return g\n\n\ndef filter_sframe_by_func(sf, filter_func):\n \"\"\"\n Filter Sframe using a filter function\n :param sf: SFrame object\n :param filter_func: a function that return 1 for the lines we want to keep and 0 for the lines we want to filter out\n :return: filtered SFrame\n :rtype: gl.SFrame\n \"\"\"\n if filter_func is None:\n return sf\n\n return sf[sf.apply(lambda r: filter_func(r))]\n\n\ndef join_all_lists(list_of_lists):\n return list(itertools.chain.from_iterable(list_of_lists))\n\n\ndef count_value_in_dict_values_lists(d, value):\n \"\"\"\n Given dict with list as values the function count how many times a value appear in these value lists\n :param d: dict with lists as value\n :param value: value to count\n :return: the number of times a value appears in the dict values\n :rtype: int\n \"\"\"\n l = join_all_lists(d.values())\n c = Counter(l)\n if value in c:\n return c[value]\n return 0\n\n\ndef grouper(n, iterable, fillvalue=None):\n args = [iter(iterable)] * n\n return itertools.zip_longest(fillvalue=fillvalue, *args)\n\n\ndef detect_lang(t):\n \"\"\"\n Detect the input text language using cld2 package.\n :param t: input text\n :return: return string with the name of the detected language or None if no language was detected\n :rtype: str\n \"\"\"\n try:\n return cld2.detect(t, bestEffort=True)[2][0][0].lower()\n except:\n return None\n", "id": "10143738", "language": "Python", "matching_score": 0.7108759880065918, "max_stars_count": 1, "path": "ScienceDynamics/utils.py" }, { "content": "import pubmed_parser as pp\nfrom pathlib import Path\nimport json\nfrom tqdm import tqdm \nimport sys\nimport argparse\nfrom itertools import islice\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', required=True,\n help='Path to the pubmed data folder')\n parser.add_argument('-d', default=\"\"\n help='Output directory')\n parser.add_argument('-l', type=int, default=0,\n help='Limit the number of files to read')\n \n args = vars(parser.parse_args())\n res = []\n pubmed_files = Path(args[\"i\"]).rglob(\"*.xml.gz\")\n\n if args[\"l\"]:\n pubmed_files = islice(pubmed_files,0,args[\"l\"])\n for xml_path in tqdm(pubmed_files):\n res+= pp.parse_medline_xml(str(xml_path))\n \n output_path = Path(args['d']) / \"pubmed.json\"\n with output_path.open(\"w\") as f:\n f.write(json.dumps(res))\n", "id": "6404113", "language": "Python", "matching_score": 0.11421545594930649, "max_stars_count": 1, "path": "examples/Coronavirus/pubmed_2_json.py" }, { "content": "import wptools\nfrom collections import defaultdict\nimport re \nimport wikipedia\nimport geonamescache\nimport concurrent\nimport reverse_geocode\nimport tldextract\nfrom geonamescache.mappers import country\nfrom tqdm import tqdm\nimport wikipedia\n\n\ndef get_wikidata(term, c=0):\n try:\n p = wptools.page(term, silent=True).get_wikidata()\n return p.data[\"wikidata\"]\n except LookupError:\n if not c:\n page_name = wikipedia.page(page_name).original_title\n return get_wikidata(page_name, c+1)\n return None\n\n\ndef get_wikidata_from_url(wp):\n try:\n page_name = wp.split(\"wiki/\")[1]\n return get_wikidata(page_name)\n except:\n return None\n \n\ndef chunks(l, n):\n # For item i in a range that is a length of l,\n for i in range(0, len(l), n):\n # Create an index range for l of n items:\n yield l[i:i + n]\n \ndef get_wikidata_from_url_parallel(wiki_pages, max_workers=2, fields=None):\n res = []\n with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:\n for wd in tqdm(executor.map(get_wikidata_from_url, wiki_pages), total=len(wiki_pages)):\n if fields is None:\n res.append(wd)\n else:\n if wd:\n wikidata = defaultdict(str,wd)\n res.append( {k: wikidata[k] for k in fields})\n else:\n res.append(None)\n return res\n\n\ndef get_wikipedia_cordinates(wp):\n try:\n page_name = wp.split(\"wiki/\")[1]\n p = wikipedia.page(page_name)\n return p.coordinates\n except:\n pass\n return None\n\ndef get_wikipedia_cordinates_parallel(wiki_pages, max_workers = 2):\n res = []\n with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:\n for i, e in tqdm(enumerate(executor.map(get_wikipedia_cordinates, wiki_pages)), total=len(wiki_pages)):\n if e is not None:\n res.append(e)\n else:\n res.append({})\n\n return res\n\n \n\nclass WikiLocationFetcher(object):\n \n def __init__(self, mag_affilations, max_worker=2):\n self.aff = mag_affilations\n self.gc = geonamescache.GeonamesCache()\n cities = self.gc.get_cities()\n countries = self.gc.get_countries()\n self.cities = {v['name'] for k, v in cities.items()}\n self.countries = {v['name'] for k, v in countries.items()}\n self._max_workers = max_worker\n self.mapper = country(from_key='iso', to_key='name')\n\n\n\n\n def normalize_wiki_location_data(self):\n features = [\"coordinate location (P625)\", \"country (P17)\",\"located at street address (P6375)\", \"located in the administrative territorial entity (P131)\", 'headquarters location (P159)', 'location (P276)']\n res = get_wikidata_from_url_parallel(self.aff['WikiPage'], self._max_workers, features)\n self.aff[\"geo\"] = res\n self.aff = self.aff.unpack(\"geo\", column_name_prefix=\"\")\n self.aff = self.aff.rename({\"country (P17)\": \"Country\", \"headquarters location (P159)\": \"Headquarters Location\", 'located at street address (P6375)': \"Street Address\", \"location (P276)\":\"Location\", 'located in the administrative territorial entity (P131)':\"Administrative Territorial Entity\"})\n cord = [{\"latitude\": l['coordinate location (P625)'][\"latitude\"], \"longitude\": l['coordinate location (P625)'][\"longitude\"] } if (l and 'coordinate location (P625)' in l and l['coordinate location (P625)']!=\"\" and type(l['coordinate location (P625)']) ==dict) else {\"latitude\":\"\", \"longitude\":\"\"} for l in res ]\n self.aff['coordinate location (P625)'] = cord\n self.aff = self.aff.unpack('coordinate location (P625)', column_name_prefix=\"\")\n self.aff[\"Country\"] = self.aff[\"Country\"].apply(lambda x: x.split(\"(\")[0])\n\n def extract_data_from_location(self):\n self.aff[\"Location\"] = self.aff[\"Location\"].apply(lambda x: x.split(\"(\")[0].strip())\n self.aff[\"City_Temp\"] = self.aff[\"Location\"].apply(lambda x: x if x in self.cities else \"\")\n self.aff[\"Country_Temp\"] = self.aff[\"Location\"].apply(lambda x: x if x in self.countries else \"\")\n self.aff[\"City\"] = self.aff.apply(lambda x: x[\"City\"] if x[\"City\"]!=\"\" else x[\"City_Temp\"] )\n self.aff[\"Country\"] = self.aff.apply(lambda x: x[\"Country\"] if x[\"Country\"]!=\"\" else x[\"Country_Temp\"] )\n self.aff[\"City\"] = self.aff.apply(lambda x: x[\"City_Temp\"] if x[\"City\"]!= x[\"City_Temp\"] and x[\"City_Temp\"]!=\"\" else x[\"City\"])\n self.aff= self.aff.remove_columns([\"Country_Temp\",\"City_Temp\"])\n\n\n def extract_data_from_address(self):\n self.aff[\"Temp\"] = self.aff[\"Street Address\"].apply(lambda x: x.split(\",\"))\n self.aff[\"City_Temp\"] = self.aff[\"Temp\"].apply(lambda x: [c for c in x if c.strip() in self.cities])\n self.aff[\"City_Temp\"] = self.aff[\"City_Temp\"].apply(lambda x: x[0] if len(x) else \"\")\n self.aff[\"City\"] = self.aff.apply(lambda x: x[\"City\"] if x[\"City\"]!=\"\" else x[\"City_Temp\"] )\n\n self.aff[\"Country_Temp\"] = self.aff[\"Temp\"].apply(lambda x: [c for c in x if c.strip() in self.countries])\n self.aff[\"Country_Temp\"] = self.aff[\"Country_Temp\"].apply(lambda x: x[0] if len(x) else \"\")\n self.aff[\"Country\"] = self.aff.apply(lambda x: x[\"Country\"] if x[\"Country\"]!=\"\" else x[\"Country_Temp\"] )\n self.aff= self.aff.remove_columns([\"Country_Temp\",\"City_Temp\"])\n\n\n def extract_data_from_cord(self):\n temp = []\n for x in self.aff:\n if x[\"latitude\"]:\n temp.append(reverse_geocode.search([(x[\"latitude\"], x[\"longitude\"])])[0])\n else:\n temp.append({})\n self.aff[\"Temp\"] = temp\n self.aff[\"Country\"] = self.aff.apply(lambda x: x[\"Country\"] if x[\"Country\"]!=\"\" and x[\"Country\"] is not None else x[\"Temp\"][\"country\"] if x[\"Temp\"]!={} and 'country' in x[\"Temp\"] else \"\" )\n self.aff[\"City\"] = self.aff.apply(lambda x: x[\"Temp\"][\"city\"] if x[\"Temp\"]!={} and 'country' in x[\"Temp\"] else \"\" )\n self.aff= self.aff.remove_columns([\"Temp\"])\n\n def extract_data_from_atw(self):\n self.aff[\"Temp\"] =self.aff[\"Administrative Territorial Entity\"].apply(lambda x: x.split(\"(\")[0].strip())\n self.aff[\"Temp\"] =self.aff[\"Temp\"].apply(lambda x: x if x in self.cities else \"\")\n self.aff[\"City\"] = self.aff.apply(lambda x: x[\"City\"] if x[\"City\"]!=\"\" else x[\"Temp\"] )\n self.aff= self.aff.remove_columns([\"Temp\"])\n\n\n def extract_data_from_headquarters(self):\n self.aff[\"Temp\"] =self.aff[\"Headquarters Location\"].apply(lambda x: x.split(\"(\")[0].strip())\n self.aff[\"Temp\"] =self.aff[\"Temp\"].apply(lambda x: x if x in self.cities else \"\")\n self.aff[\"City\"] = self.aff.apply(lambda x: x[\"City\"] if x[\"City\"]!=\"\" else x[\"Temp\"] )\n self.aff= self.aff.remove_columns([\"Temp\"])\n\n\n def extract_country_from_city(self):\n self.aff[\"Country_Temp\"] = self.aff.apply(lambda x: self.gc.get_cities_by_name(x[\"City\"]) if x[\"City\"] and not x[\"Country\"] else [] )\n self.aff[\"Country_Temp\"] = self.aff[\"Country_Temp\"].apply(lambda x: [l.popitem()[1] for l in x] )\n self.aff[\"Country\"] = self.aff.apply(lambda x: x[\"Country\"] if x[\"Country\"]!=\"\" else self.mapper(x[\"Country_Temp\"][0][\"countrycode\"]) if len(x[\"Country_Temp\"])==1 else \"\" )\n self.aff= self.aff.remove_columns([\"Country_Temp\"])\n\n \n\n def extract_wiki_cordinates(self):\n wiki_pages = self.aff[(self.aff[\"latitude\"]== None) | (self.aff[\"latitude\"]==\"\")][['AffiliationId','WikiPage']]\n \n cords = get_wikipedia_cordinates_parallel(wiki_pages['WikiPage'], self._max_workers)\n res_geo = [reverse_geocode.search([line])[0] if line!={} else {} for line in cords]\n cords = [[str(line[0]), str(line[1])] if line!={} else [] for line in cords ]\n wiki_pages[\"geo\"] = res_geo\n wiki_pages[\"geo2\"] = cords\n\n self.aff = self.aff.join(wiki_pages)\n self.aff = self.aff.fillna('geo', {})\n self.aff[\"Country\"] = self.aff.apply(lambda x: x[\"Country\"] if x[\"Country\"]!=\"\" and x[\"Country\"] is not None else x[\"geo\"][\"country\"] if x[\"geo\"]!={} and 'country' in x[\"geo\"] else \"\" )\n self.aff[\"City\"] = self.aff.apply(lambda x: x[\"City\"] if x[\"City\"]!=\"\" and x[\"City\"] is not None else x[\"geo\"][\"city\"] if x[\"geo\"]!={} and 'city' in x[\"geo\"] else \"\" )\n\n self.aff[\"latitude\"] = self.aff.apply(lambda x: x[\"geo2\"][0] if( x[\"latitude\"]==\"\" or x[\"latitude\"] is None) and x[\"geo2\"] else x[\"latitude\"])\n self.aff[\"longitude\"] = self.aff.apply(lambda x: x[\"geo2\"][1] if( x[\"longitude\"]==\"\" or x[\"longitude\"] is None) and x[\"geo2\"] else x[\"longitude\"])\n\n self.aff= self.aff.remove_columns([\"geo\", \"geo2\"])\n\n\n\n def extract_country_from_url(self):\n self.aff[\"Country_Web\"] = self.aff[\"OfficialPage\"].apply(lambda x: self.mapper( tldextract.extract(x).suffix.split(\".\")[-1].upper()))\n self.aff[\"Country\"] = self.aff.apply(lambda x: x[\"Country\"] if x[\"Country\"]!=\"\" else x[\"Country_Web\"] if x[\"Country_Web\"] is not None else \"\" )\n self.aff[self.aff[\"Country\"].apply(lambda x: 1 if \"china\" in x.lower() else 0)]\n self.aff= self.aff.remove_columns([\"Country_Web\"])\n\n\n def standrtize_names(self):\n country_norm = [(\"United States\",\"United States of America\"),(\"China\" ,\"People's Republic of China\"),(\"China\" ,\"Hong Kong\"),(\"Japan\" ,\"Empire of Japan\"),(\"Iran\",\"Iran, Islamic Republic of\"),(\"Netherlands\" ,'Kingdom of the Netherlands'),(\"State of Palestine\",'Palestinian Territory'),\n (\"State of Palestine\",'Palestinian territories'),(\"United States\", \"Illinois\"),(\"Mexico\", 'State of Mexico'),(\"Russia\", 'Russian Empire'),(\"United States\", 'Virgin Islands, U.S.'),(\"Russia\", 'Russian Empire'),(\"Germany\" ,'German Reich'),(\"Germany\",'Weimar Republic'),\n (\"Taiwan\",\"Republic of China\")]\n country_norm = { orig:rename for rename, orig in country_norm}\n self.aff[\"Country\"] = self.aff[\"Country\"].apply(lambda x: x.strip().strip(\"[\").strip('\"'))\n self.aff[\"City\"] = self.aff[\"City\"].apply(lambda x: x.strip().strip(\"[\").strip('\"'))\n\n self.aff[\"Country\"] = self.aff[\"Country\"].apply(lambda x: country_norm[x] if x in country_norm else x)\n\n \n def add_location_data(self):\n self.normalize_wiki_location_data()\n self.extract_data_from_cord()\n self.extract_data_from_location()\n self.extract_data_from_atw()\n self.extract_data_from_headquarters()\n self.extract_data_from_address()\n self.extract_country_from_city()\n self.extract_wiki_cordinates()\n self.extract_country_from_url()\n self.standrtize_names()\n \n self.aff = self.aff.remove_columns([\"Temp\",\"Location\",\"Administrative Territorial Entity\", \"Street Address\",\"Headquarters Location\"])\n for col in self.aff.column_names():\n self.aff[col] = self.aff[col].apply(lambda x: None if x==\"\" else x)\n", "id": "1946279", "language": "Python", "matching_score": 1.043606162071228, "max_stars_count": 1, "path": "ScienceDynamics/fetchers/wikipedia_fetcher.py" }, { "content": "import turicreate as tc\nfrom functools import lru_cache\n\nfrom ScienceDynamics.config.configs import FIELD_OF_STUDY_PAPERS_ID_SFRAME\nfrom ScienceDynamics.datasets.microsoft_academic_graph import MicrosoftAcademicGraph\nfrom ScienceDynamics.config.configs import DATASETS_BASE_DIR\n\n\nclass FieldsOfStudyFetcher(object):\n def __init__(self):\n mag = MicrosoftAcademicGraph(DATASETS_BASE_DIR)\n self._sf = mag.fields_of_study_papers_ids()\n self._id_name_dict = None\n\n def _get_id_to_name_dict(self):\n \"\"\"\n Return a dict with all the fields ids as keys and their corresponding names as values\n :return: dict with all the fields ids as keys and fields' names as values\n :rtype: dict<str,str>\n \"\"\"\n if self._id_name_dict is None:\n self._id_name_dict = {r['Field of study ID']: r['Field of study name'] for r in self._sf}\n return self._id_name_dict\n\n def _get_field_data_value(self, field_id, key_name):\n \"\"\"\n Returns a dict with the data of the field id if it exists or None otherwise\n :param field_id: field id\n :param key_name: the requested attribute name\n :return: returns the field id input attribute value if one exists, or None otherwise\n \"\"\"\n d = self._get_field_data_dict(field_id)\n return d.get(key_name, None)\n\n @lru_cache(maxsize=100)\n def _get_field_data_dict(self, field_id):\n \"\"\"\n Returns the input field id data as a dict\n :param field_id: input field id\n :return: dict with the field data if it exists None otherwise\n :rtype: dict\n \"\"\"\n sf = self._sf[self._sf['Field of study ID'] == field_id]\n if len(sf) == 0:\n return None\n return sf[0]\n\n def get_field_name(self, field_id):\n \"\"\"\n Given a field of study id the function will return the field of study name\n :param field_id: field of study id\n :return: the field of study name if it exists or None otherwise\n :rtype: str\n \"\"\"\n return self.field_id_to_name_dict.get(field_id, None)\n\n def get_field_paper_ids(self, field_id):\n \"\"\"\n Returns the field of study papers ids list\n :param field_id: field of study id\n :return: a list of the field of study paperids\n :rtype: list<str>\n \"\"\"\n return self._get_field_data_value(field_id, \"Paper IDs\")\n\n def get_field_level(self, field_id):\n \"\"\"\n Return the field of study level\n :param field_id: field id\n :return: the field of study level value\n :rtype: int\n \"\"\"\n return self._get_field_data_value(field_id, \"Level\")\n\n def get_field_papers_number(self, field_id):\n \"\"\"\n Return the input field of study paper number\n :param field_id: field of study id\n :return: the fields of study number of papers\n :rtype: int\n \"\"\"\n return self._get_field_data_value(field_id, \"Number of Paper\")\n\n def get_field_ids_by_level(self, level):\n \"\"\"\n Returns a list of field of study ids in the input level\n :param level: field of study level\n :return: a list of field of study ids in the input level\n :rtyoe: list<str>\n \"\"\"\n sf = self._sf[self._sf['Level'] == level]\n return list(sf['Field of study ID'])\n\n @property\n def field_id_to_name_dict(self):\n \"\"\"\n Return a dict in which each key is a field id and each name is the field's name\n :return: dict with the fields ids and their names\n :rtype: dict<str, str>\n :note: while the field id is unqiue fields of study with the same name can have several ids\n \"\"\"\n if self._id_name_dict is None:\n self._id_name_dict = self._get_id_to_name_dict()\n return self._id_name_dict\n\n def get_field_ids_by_name(self, name_regex):\n \"\"\"\n The function returns fields id that match the input regex\n :param name_regex: a name regex\n :type name_regex:\n :return: the function returns dict with the fields id and name of the fields which match the input name_regex\n :rtyoe: dict<str,str>\n \"\"\"\n d = {k: v for k, v in self._get_id_to_name_dict().items() if name_regex.match(v) is not None}\n return d\n", "id": "2050068", "language": "Python", "matching_score": 3.337756395339966, "max_stars_count": 1, "path": "ScienceDynamics/fetchers/fields_of_study_fetcher.py" }, { "content": "from ScienceDynamics.datasets.microsoft_academic_graph import MicrosoftAcademicGraph\nfrom ScienceDynamics.datasets.sjr import SJR\nfrom ScienceDynamics.datasets.aminer import Aminer\n", "id": "852983", "language": "Python", "matching_score": 0.7005524039268494, "max_stars_count": 1, "path": "ScienceDynamics/datasets/__init__.py" }, { "content": "import pathlib\nfrom turicreate import SFrame\nfrom ScienceDynamics.datasets.configs import AMINER_URLS\nfrom ScienceDynamics.datasets.utils import download_file, save_sframe\nfrom ScienceDynamics.config import DATASETS_AMINER_DIR\n\n\nimport zipfile\n\n\nclass Aminer(object):\n def __init__(self, dataset_dir=None):\n if dataset_dir is None:\n dataset_dir = DATASETS_AMINER_DIR\n self._dataset_dir = pathlib.Path(dataset_dir)\n self._dataset_dir.mkdir(exist_ok=True)\n self._sframe_dir = self._dataset_dir / \"sframes\"\n self._sframe_dir.mkdir(exist_ok=True)\n for i, url in enumerate(AMINER_URLS):\n aminer_file = self._dataset_dir / f'aminer_papers_{i}.zip'\n if not pathlib.Path(aminer_file).exists():\n download_file(url, aminer_file)\n with zipfile.ZipFile(aminer_file, 'r') as f:\n f.extractall(self._dataset_dir)\n\n @property\n @save_sframe(sframe=\"PapersAMiner.sframe\")\n def data(self):\n \"\"\"\n Create AMiner Papers sFrame from the AMiner text files.\n After creating the SFrame, it is saved to the disk\n \"\"\"\n\n return SFrame.read_json(self._dataset_dir.joinpath(\"AMiner/*.txt\"), orient='lines')\n", "id": "2138672", "language": "Python", "matching_score": 1.4080421924591064, "max_stars_count": 1, "path": "ScienceDynamics/datasets/aminer.py" }, { "content": "import logging\nfrom logging.handlers import RotatingFileHandler\n\nfrom ScienceDynamics.config.configs import TMP_DIR\n\n\n\nlogFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\nlogger = logging.getLogger()\nfileHandler = RotatingFileHandler(TMP_DIR/'complex_network.log', mode='a', maxBytes=5 * 1024 * 1024,\n backupCount=2, encoding=None, delay=0)\nfileHandler.setFormatter(logFormatter)\nlogger.addHandler(fileHandler)\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logFormatter)\nlogger.addHandler(consoleHandler)\nlogger.setLevel(logging.DEBUG)\n", "id": "7406434", "language": "Python", "matching_score": 1.8899000883102417, "max_stars_count": 1, "path": "ScienceDynamics/config/log_config.py" }, { "content": "from ScienceDynamics.config.configs import *", "id": "12628023", "language": "Python", "matching_score": 0.1432071328163147, "max_stars_count": 1, "path": "ScienceDynamics/config/__init__.py" }, { "content": "from unittest import TestCase\nimport sys\n\n# hack to include configs.py\nsys.path.extend([\"..\"])\n\nfrom ScienceDynamics.author import Author\nfrom ScienceDynamics.config.fetch_config import AUTHORS_FETCHER\nimport re\nfrom collections import Counter\n\n\nclass TestAuthor(TestCase):\n\n def testAuthorFeatures(self):\n r = re.compile('T.*Berners.*Lee$', re.IGNORECASE)\n l = AUTHORS_FETCHER.get_author_ids_by_name(r)\n self.assertEqual(len(l), 2)\n author_list = [Author(author_id=i) for i in l]\n author_list = sorted(author_list, key=lambda a: a.papers_number)\n a = author_list[-1]\n self.assertEqual(a.fullname, u'<NAME>')\n self.assertEqual(a.papers_number, 20)\n self.assertEqual(a.gender, 'Male')\n coauthors = a.get_coauthors_list(None, None)\n self.assertEqual(len(coauthors), 90)\n c = Counter(coauthors)\n a2 = Author(author_id=c.most_common(1)[0])\n self.assertEqual(a2.fullname, '<NAME>')\n self.assertEqual(a2.papers_number, 43)\n", "id": "372360", "language": "Python", "matching_score": 2.9950780868530273, "max_stars_count": 1, "path": "tests/test_author.py" }, { "content": "from unittest import TestCase\nimport sys\n\nfrom ScienceDynamics.paper import Paper\n\nsys.path.extend([\"..\"])\n\n\nclass TestPaper(TestCase):\n def testPaperFeatures(self):\n p = Paper('75508021')\n self.assertEqual(p.paper_id, '75508021')\n self.assertEqual(p.references_count, 8)\n self.assertEqual(p.venue_name, 'Nature')\n self.assertEqual(p.total_number_of_times_authors_published_in_venue, 2)\n self.assertEqual(p.title, u'Cell biology: The checkpoint brake relieved')\n self.assertEqual(p.publish_year, 2007)\n", "id": "9160261", "language": "Python", "matching_score": 0.8889673352241516, "max_stars_count": 1, "path": "tests/test_paper.py" }, { "content": "import turicreate as gl\nfrom pymongo import MongoClient\n\nfrom ScienceDynamics.fetchers.authors_fetcher import AuthorsFetcher\nfrom ScienceDynamics.paper import Paper\n\n#\n# def create_venues_sframe(v1, v2):\n# client = MongoClient('mongodb://%s:%[email protected]' % ('myAdmin', '<PASSWORD>!%'))\n# a = AuthorsFetcher(client)\n# sf = gl.load_sframe('/data/sframes/papers_features_2015.sframe')\n# v1_sf = sf[sf[\"Journal ID mapped to venue name\"] == v1]\n# v2_sf = sf[sf[\"Journal ID mapped to venue name\"] == v2]\n# set_size = min(len(v1_sf), len(v2_sf))\n# sf = v1_sf[:set_size].append(v2_sf[:set_size])\n# l = []\n# for r in sf:\n# l.append(Paper(r, a).get_paper_features([v1, v2]))\n# sf = gl.load_sframe(l)\n# sf = sf.unpack(\"X1\", column_name_prefix='')\n# sf = sf.fillna('Keywords', [])\n# for feature in ['last_author_number_of_papers', 'first_author_number_of_papers', 'author_max_number_of_papers']:\n# sf = sf.fillna(feature, 0)\n#\n# stop_keywords = {'nature', 'physical sciences'}\n# sf['Keywords'] = sf['Keywords'].apply(lambda l: [i for i in l if i.lower() not in stop_keywords])\n#\n# return sf\n\n\ndef evaluate_single_feature_contribution(sf):\n # venue_id = '003B355D' #08364228 - Nature 003B355D - Science 077EDC2F -PNAS 0C101982 - PLOSONE 0BB9EF81- Scientific Reports\n train, test = sf.random_split(0.8)\n d = {}\n l = ['Authors Number', 'Ref Count',\n 'author_max_number_of_papers', 'first_author_academic_birthday', 'first_author_number_of_papers',\n 'get_authors_avg_academic_birthday', 'get_authors_median_academic_birthday',\n 'last_author_academic_birthday', 'last_author_number_of_papers',\n ]\n l += [c for c in sf.column_names() if 'in_venue' in c]\n for i in l:\n model = gl.classifier.create(train, target='Venue ID', features=[i])\n classification = model.classify(test)\n d[i] = gl.evaluation.precision(test['Venue ID'], classification['class'])\n\n for i in ['Keywords', 'Title Bag of Words']:\n model = gl.classifier.boosted_trees_classifier.create(train, target='Venue ID', max_iterations=1000,\n features=[i])\n classification = model.classify(test)\n d[i] = gl.evaluation.precision(test['Venue ID'], classification['class'])\n return d\n\n\nif __name__ == \"__main__\":\n v1 = '0C101982'\n v2 = '0BB9EF81'\n import itertools\n\n d = {}\n l = ['08364228', '003B355D', '077EDC2F', '0C101982', '0BB9EF81']\n for v1, v2 in itertools.combinations(l, 2):\n sf = create_venues_sframe(v1, v2)\n d[(v1, v2)] = evaluate_single_feature_contribution(sf)\n", "id": "11292958", "language": "Python", "matching_score": 2.336575984954834, "max_stars_count": 1, "path": "ScienceDynamics/prediction/evaluate_paper_features.py" }, { "content": "from ScienceDynamics.fetchers.authors_fetcher import AuthorsFetcher\nfrom ScienceDynamics.fetchers.fields_of_study_fetcher import FieldsOfStudyFetcher\nfrom ScienceDynamics.fetchers.papers_fetcher import PapersFetcher\nfrom ScienceDynamics.fetchers.venue_fetcher import VenueFetcher\nfrom ScienceDynamics.mongo_connector import MongoDBConnector\n\nHOST = \"localhost\"\nPORT = 27017\nMD = MongoDBConnector(HOST, PORT)\nAUTHORS_FETCHER = AuthorsFetcher(MD._client)\nPAPERS_FETCHER = PapersFetcher(MD._client)\nVENUE_FETCHER = VenueFetcher(MD._client)\nFIELDS_OF_STUDY_FETCHER = FieldsOfStudyFetcher()", "id": "954154", "language": "Python", "matching_score": 0.9138953685760498, "max_stars_count": 1, "path": "ScienceDynamics/config/fetch_config.py" } ]
2.543352
nra4ever
[ { "content": "from shutil import copy2\nimport glob\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--dir')\n parser.add_argument('-n', '--newdisks')\n parser.add_argument('-c', '--copyfrom')\n args = parser.parse_args()\n\ndef findzero(index):\n if index <= 99:\n zero = \"00\"\n if index > 99:\n zero = \"0\"\n return zero\n\n\nif args.dir:\n fileDir = args.dir\nelse:\n fileDir = input(\"Location of images: \")\nend = len(glob.glob1(fileDir, \"*.HFE\"))\nprint(str(end) + \" images currently on drive.\")\nendCurrent = end - 1\n\n\nif args.newdisks:\n floppies = int(args.newdisks)\nelse:\n floppies = int(input(\"How Many Disks Would you like to make?: \"))\nif args.copyfrom:\n cpnum = int(args.copyfrom)\nelse:\n cpnum = False\n\n\nif not cpnum:\n cpnum = endCurrent\n\n\nhighfile = \"DSKA\" + findzero(int(cpnum)) + str(cpnum) + \".HFE\"\n\n\nif floppies + endCurrent > 999:\n raise ValueError\n\nfor i in range(floppies + endCurrent + 1):\n if i > endCurrent:\n print(\"Writing DSKA\" + findzero(i) + str(i) + \".HFE\")\n copy2(fileDir + \"\\\\\" + highfile, fileDir + \"\\\\\" + \"DSKA\" + findzero(i) + str(i) + \".HFE\")\n\nprint(\"Operation Complete\")\n", "id": "1724342", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "HFECopy.py" } ]
0
mpt-bootcamp
[ { "content": "@task\ndef routetable(c):\n c.run(\"hostname\")\n c.run(\"netstat -r\")\n\n@task\ndef vmstat(c):\n c.run(\"hostname\")\n c.run(\"vmstat -t 1 5\")\n\n@task\ndef ulimit(c):\n c.run(\"hostname\")\n c.run(\"ulimit -a\") \n\n@task\ndef ps(c):\n c.run(\"hostname\")\n c.run(\"ps -Ao '%U, %p, %t, %a'\")\n\n@task\ndef memory(c):\n c.run(\"hostname\")\n c.run(\"cat /proc/meminfo\")\n\n@task\ndef tcpconnect(c):\n c.run(\"hostname\")\n c.run(\"netstat -at\")\n```\n\n### Exercise 3 - Installing Apache web server\n\nAdd a function to deploy Apache server. Add the following line of code to the *fabfile.py* file.\n\n```\n@task\ndef deploy_apache(c):\n c.sudo(\"apt-get update\")\n c.sudo(\"apt-get install -q -y apache2\")\n\n", "id": "10509418", "language": "Python", "matching_score": 2.7054901123046875, "max_stars_count": 0, "path": "fabfile.py" }, { "content": "from fabric import task\n\n@task\ndef hello(c):\n print(\"Hello!\")\n\n@task\ndef uptime(c):\n c.run(\"hostname\")\n c.run(\"uptime\")\n\n@task\ndef diskfree(c):\n uname = c.run('uname -s', hide=True)\n command = \"df -h / | tail -n1 | awk '{print $5}'\"\n result = c.run(command, hide=True).stdout.strip()\n print(result)\n\n@task\ndef deployapache(c):\n c.sudo(\"apt-get update\")\n c.sudo(\"apt-get install -q -y apache2\")\n \n@task\ndef routetable(c):\n c.run(\"hostname\")\n c.run(\"netstat -r\")\n\n@task\ndef vmstat(c):\n c.run(\"hostname\")\n c.run(\"vmstat -t 1 5\")\n\n@task\ndef ulimit(c):\n c.run(\"hostname\")\n c.run(\"ulimit -a\") \n\n@task\ndef ps(c):\n c.run(\"hostname\")\n c.run(\"ps -Ao '%p, %U, %t, %a'\")\n\n@task\ndef memory(c):\n c.run(\"hostname\")\n c.run(\"cat /proc/meminfo\")\n\n@task\ndef tcpconnect(c):\n c.run(\"hostname\")\n c.run(\"netstat -atp\")\n \n@task\ndef createhwinfo(c):\n c.run(\"hostname\")\n c.sudo(\"mkdir -p /var/www/html\")\n c.sudo(\"lshw -html | sudo tee /var/www/html/sysinfo.html > /dev/null\")\n\n@task\ndef apacheindex(c):\n c.run(\"hostname\")\n upload = c.put(\"docs/landing.html\", \"landing.html\")\n c.sudo(\"cp landing.html /var/www/html/index.html\")\n\n@task\ndef apachelog(c):\n c.run(\"hostname\")\n c.prompt()\n c.sudo(\"cp /var/log/apache2/access.log apache-access.log\")\n c.sudo(\"chown $USER: apache-access.log\")\n dload = c.get(\"apache-access.log\", \"data/apache-access.log\")\n \n@task\ndef reboot(c):\n c.run(\"hostname\")\n c.reboot()\n\n", "id": "2785404", "language": "Python", "matching_score": 0.8814429044723511, "max_stars_count": 0, "path": "fabfile-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport subprocess\n\n# return the OS release\nsubprocess.call([\"uname\", \"-a\"])", "id": "2962138", "language": "Python", "matching_score": 1.632294774055481, "max_stars_count": 0, "path": "scripts/uname.py" }, { "content": "#! /usr/bin/env python3\n\nimport subprocess\nimport sys\n\nprint(\"Executing \", sys.argv[0])\nsubprocess.call([sys.argv[1], sys.argv[2]])\n\n", "id": "10600423", "language": "Python", "matching_score": 0.8503115177154541, "max_stars_count": 0, "path": "lab1-ex6-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport subprocess\n\n# return the free memory in GB\nsubprocess.call([\"free\", \"-g\"])", "id": "6747651", "language": "Python", "matching_score": 1.1753236055374146, "max_stars_count": 0, "path": "scripts/freemem.py" }, { "content": "import os\nimport subprocess\nimport sys\n\nsubprocess.call([\"ls\", \"-lha\"])\n", "id": "6646658", "language": "Python", "matching_score": 1.087514877319336, "max_stars_count": 0, "path": "tests/ls1.py" }, { "content": "#! /usr/bin/env python3\n\nimport subprocess\n\nsubprocess.call([\"ps\", \"aux\"])\n\n", "id": "2685059", "language": "Python", "matching_score": 1.7297903299331665, "max_stars_count": 0, "path": "lab1-ex5-ans.py" }, { "content": "import os\nimport subprocess\nimport sys\n\nsubprocess.call([\"ps\", \"aux\"])\n", "id": "4533755", "language": "Python", "matching_score": 0.4813552796840668, "max_stars_count": 0, "path": "tests/ps1.py" }, { "content": "import os\nimport subprocess\nimport sys\n\n# create two files to hold the output and errors, respectively\nwith open('out.txt','w+') as fout:\n with open('err.txt','w+') as ferr:\n out=subprocess.call([\"ls\",'-lha'],stdout=fout,stderr=ferr)\n # reset file to read from it\n fout.seek(0)\n # save output (if any) in variable\n output=fout.read()\n\n # reset file to read from it\n ferr.seek(0) \n # save errors (if any) in variable\n errors = ferr.read()\n\nprint(output)\nprint(errors)\n\n\n\n", "id": "6022585", "language": "Python", "matching_score": 2.000683546066284, "max_stars_count": 0, "path": "tests/ls2.py" }, { "content": "import subprocess\nimport sys\n\n# Show Python version\nprint('version is', sys.version)\n\n# Execute system commands\nsubprocess.call(\"ls\")\nsubprocess.call([\"ls\", \"-lha\"])\nsubprocess.call([\"ls\", \"-l\", \"/etc/resolv.conf\"])\nsubprocess.call([\"ps\", \"aux\"])\n\n# Store stdout and stderr to variables\np = subprocess.Popen(\"date\", stdout=subprocess.PIPE)\n\n# Obtain the return tuple\n(output, err) = p.communicate()\n\n# Show the output\nprint(output)\n\n", "id": "566005", "language": "Python", "matching_score": 2.040004014968872, "max_stars_count": 0, "path": "tests/linux-commands.py" }, { "content": "import os\nimport subprocess\nimport sys\n\nrc = subprocess.run([\"ls\",\"-lha\"], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\nrc.stdout\nrc.stderr\nrc.returncode\n\nprint(rc.stdout)\nprint(rc.stderr)\nprint(rc.returncode)\n\n\n", "id": "5283385", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "tests/ls3.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport shutil\nimport glob\nfrom datetime import datetime\n\n# copyfile\nsrcfile = \"data/auth.log\"\ndstfile = \"temp/auth.log\"\n\nshutil.copyfile(srcfile, dstfile)\n\n# copytree\nif not os.path.exists(\"temp/data\"):\n shutil.copytree(\"data\", \"temp/data\", ignore=shutil.ignore_patterns('*.csv', '*.js'))\n\nif not os.path.exists(\"temp/tests\"):\n shutil.copytree(\"tests\", \"temp/tests\", ignore=shutil.ignore_patterns('*.csv', '*.js'))\n\n# move a file from test to the temp directory\nif os.path.exists(\"temp/tests/ls1.py\"):\n shutil.move(\"temp/tests/ls1.py\", \"temp\")\n\n# get disk usage of a directory\nprint(\"Disk usage of temp:\")\nprint(shutil.disk_usage(\"temp\"))\n\n# archive \nprint(\"Archive tests folder\")\nshutil.make_archive(\"temp/tests\", \"gztar\", \"temp/tests\")\nshutil.make_archive(\"temp/tests\", \"zip\", \"temp/tests\")\n\n# unpack\nprint(\"Unpack tests archive package\")\nshutil.unpack_archive(\"temp/tests.zip\", \"temp/test2\")\nprint(shutil.disk_usage(\"temp/test2\"))\n\n# rmtree\nprint(\"Delete directory tree - temp/test2\")\nif os.path.exists(\"temp/test2\"):\n shutil.rmtree(\"temp/test2\")\n", "id": "3846922", "language": "Python", "matching_score": 1.7841819524765015, "max_stars_count": 0, "path": "lab3-ex5-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport shutil\nimport pathlib\nfrom datetime import datetime\n\n# Create a temp folder and change into it.\ndirpath = \"temp\"\nprint(\"The current direocty is: {}\".format(os.getcwd()))\n\nprint(\"Creating directory - {}\".format(dirpath))\ntry: \n os.mkdir(dirpath) \nexcept OSError as error: \n print(\"{} is already created\".format(dirpath))\n\nprint(\"Change to directory - {}\".format(dirpath))\nos.chdir(dirpath)\nprint(\"Changing to directory {}\".format(os.getcwd()))\n\n# Creating a directory tree using yyyy/mm\nyear_month_path = str(datetime.now().year) + \"/\" + str(datetime.now().month)\nprint(\"Creating a directory tree {} if not exist\".format(year_month_path))\nif not os.path.exists(year_month_path):\n os.makedirs(year_month_path)\n\n# Get the current path, basename, and dirname\nprint(\"Current path: {}\".format(os.getcwd()))\nprint(\"Basename is: {}\".format(os.path.basename(os.getcwd())))\nprint(\"Dirname is: {}\".format(os.path.dirname(os.getcwd())))\n", "id": "376210", "language": "Python", "matching_score": 0.8980948328971863, "max_stars_count": 0, "path": "lab3-ex4-ans.py" }, { "content": "#! /usr/bin/env python3\n#\n# This is a main Python template script.\n#\n\nimport os\nimport sys\nimport subprocess\nimport argparse\nimport logging\nimport datetime\n\ndef init_logger():\n # Create a custom logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # Add error file handler\n error_handler = logging.FileHandler('error.log')\n # error_handler.setLevel(logging.ERROR)\n error_handler.setFormatter(logger_formatter)\n logger.addHandler(error_handler)\n\n # Add console log handler\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logger_formatter)\n logger.addHandler(console_handler)\n\n\ndef main():\n # Enable logging\n init_logger()\n logger = logging.getLogger(__file__)\n logger.info(\"Started\")\n \n (filename, fileext) = os.path.splitext(os.path.basename(__file__))\n filedir = (os.path.dirname(os.path.realpath(__file__)))\n logger.info(\"%s %s\", filename, filedir)\n\n# Main entry\n#-----------------------------------------------------------------------------#\nif __name__ == \"__main__\":\n main()", "id": "6081395", "language": "Python", "matching_score": 1.3198798894882202, "max_stars_count": 0, "path": "main.template.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport shutil\nimport pathlib\n\nfiledir = 'data/'\n\nprint(\"Listing files in {}\".format(filedir))\nwith os.scandir(filedir) as entries:\n for entry in entries:\n if not entry.is_dir():\n print(entry.name)\n \n", "id": "7115611", "language": "Python", "matching_score": 1.9689598083496094, "max_stars_count": 0, "path": "lab3-ex2-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport shutil\nimport pathlib\n\nfrom datetime import datetime\n\ndef convert_date(timestamp):\n d = datetime.utcfromtimestamp(timestamp)\n formated_date = d.strftime('%d %b %Y')\n return formated_date\n\nfiledir = 'data/'\n\nprint(\"Listing files in {}\".format(filedir))\nwith os.scandir(filedir) as entries:\n for entry in entries:\n if not entry.is_dir():\n print(entry.name)\n\n # Show the file stat of the last file\n info = entry.stat()\n print(f'{entry.name}\\t Last Modified: {convert_date(info.st_mtime)}') \n print(f\"Is directory: {entry.is_dir()}\")\n print(f\"Is file: {entry.is_file()}\")\n print(f\"Is symlink: {entry.is_symlink()}\")\n\n# Without using the scandir, you can use os.stat() to get the file status below\nfilepath = 'data/lab3-ex1.txt'\nstatus = os.stat(filepath) \nprint(status)\n", "id": "10292189", "language": "Python", "matching_score": 1.671778917312622, "max_stars_count": 0, "path": "lab3-ex3-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport shutil\nimport pathlib\n\nfilepath = 'data/lab3-ex1.txt'\n\nprint(f\"Creating a file {filepath}\")\nwith open(filepath, 'w') as f:\n line = \"This is an example to create a file.\\nThen read it back line by line\\n\"\n f.write(line)\n f.write(\"Done\\n\")\n\nprint(f\"Reading a file {filepath}\")\nwith open(filepath, 'r') as f:\n line = f.readline()\n i = 1\n while line:\n print(\"Line {}: {}\".format(i, line.strip()))\n line = f.readline()\n i += 1\n\n", "id": "9109395", "language": "Python", "matching_score": 0.7666174173355103, "max_stars_count": 0, "path": "lab3-ex1-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport subprocess\nimport sys\nimport requests\n\n# capture result in the Response object. \nres = requests.get(\n 'https://api.github.com/search/repositories',\n params = {'q': 'user:mpt-bootcamp'}\n)\n\n# Extract the results\njson_data = res.json()\nrepos = json_data['items']\n#print(repos[0]['name'])\nfor repo in repos:\n print(repo['name'])\n", "id": "7378112", "language": "Python", "matching_score": 1.0205515623092651, "max_stars_count": 0, "path": "lab4-ex6c-ans.py" }, { "content": "#! /usr/bin/env python3\n\nfrom ansible.module_utils.basic import *\n\ndef main():\n\tmodule = AnsibleModule(argument_spec={})\n\tresponse = {\"My\": \"Router\"}\n\tmodule.exit_json(changed=False, meta=response)\n\n\nif __name__ == '__main__':\n main()\n", "id": "4451081", "language": "Python", "matching_score": 0.14231257140636444, "max_stars_count": 0, "path": "library/my_router_ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport subprocess\nimport sys\n\n# create three sets\ncode = { \"GitHub\"}\nbuild = { \"Jenkins\", \"Gradle\", \"Maven\"}\ndeploy = { \"Jenkins\", \"Ansible\"}\n\n# create an union set\nall_tools = code.union(build).union(deploy)\n\n\nprint(\"Union:\", all_tools)\nprint(\"Intersection: \", build.intersection(deploy))\nprint(\"Difference: \", build.difference(deploy))\nprint(\"Difference: \", deploy.difference(build))\n\nprint(\"Is code and build disjoint:\", code.isdisjoint(build))\nprint(\"Is deploy and build disjoint:\", deploy.isdisjoint(build))\n\nx = {1, 2}\ny = {1, 3, 2, 4, 5}\n\nprint(\"is x a subset of y:\", x.issubset(y))\nprint(\"is y a subset of x:\", y.issubset(x))\nprint(\"is y a superset of x:\", y.issuperset(x))\n\ny.discard(1)\nprint(\"After discard 1 from y, is y a superset of x:\", y.issuperset(x))\n\nfor z in x.intersection(y):\n print(z)\n", "id": "11381133", "language": "Python", "matching_score": 0.22247445583343506, "max_stars_count": 0, "path": "lab2-ex4-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport subprocess\nimport sys\nimport requests\n\n# capture result in the Response object. \nres = requests.get('https://api.github.com')\nprint(\"Return Code: {}\".format(res.status_code))\nprint(\"Content:\")\nprint(res.content)\n\nprint(\"Content in JSON format:\")\nprint(res.json())\n\n\nprint(\"Respond Headers:\")\nprint(res.headers)", "id": "8610206", "language": "Python", "matching_score": 2.555351972579956, "max_stars_count": 0, "path": "lab4-ex6b-ans.py" }, { "content": "#! /usr/bin/env python3\n\nimport os\nimport subprocess\nimport sys\nimport requests\n\n# capture result in the Response object. \nres = requests.get('https://api.github.com')\nprint(res.status_code)\n\n", "id": "9382105", "language": "Python", "matching_score": 2.461329221725464, "max_stars_count": 0, "path": "lab4-ex6a-ans.py" } ]
1.247602
ManihtraA
[ { "content": "import sys\r\nimport file1\r\nnum = int(sys.argv[1])\r\nprint(file1.myfunc(num))\r\n", "id": "4147428", "language": "Python", "matching_score": 1.2055931091308594, "max_stars_count": 0, "path": "file3.py" }, { "content": "def myfunc(x):\r\n return [num for num in range(x) if num%2==0]\r\nlist1 = myfunc(21)\r\n", "id": "10031867", "language": "Python", "matching_score": 0.6785694360733032, "max_stars_count": 0, "path": "file4.py" }, { "content": "import file1\r\nfile1.list1.append(100)\r\nprint(file1.list1)\r\n", "id": "8395913", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "file2.py" }, { "content": "\r\n# from colorama import init\r\n# init()\r\nfrom colorama import Fore\r\nprint(Fore.BLUE + \"hi this is blue text\")\r\nprint(Fore.GREEN + \"hi green text\")\r\n", "id": "5642685", "language": "Python", "matching_score": 0, "max_stars_count": 0, "path": "test.py" }, { "content": "import fileTest\r\nprint(\"Top level in function\")\r\nfileTest.func()\r\nif __name__ == \"__main__\":\r\n\tprint(\"2 execute directly\")\r\nelse:\r\n\tprint(\"2 imported into another\")", "id": "1345041", "language": "Python", "matching_score": 2.6457512378692627, "max_stars_count": 0, "path": "fileTest2.py" }, { "content": "def func():\r\n\tprint(\"testRunning\")\r\nprint(\"top level printing\")\r\n\r\nif __name__==\"__main__\":\r\n\tprint(\"1 run directly\")\r\nelse:\r\n\tprint(\"1 run imported to another one\")", "id": "7458723", "language": "Python", "matching_score": 2.2551779747009277, "max_stars_count": 0, "path": "fileTest.py" } ]
0.942081
ExQA
[ { "content": "from django.urls import path\n\nfrom . import views\n\n\nurlpatterns = [\n path('signup/', views.SignUp.as_view(), name='signup'),\n path('calculator', views.calculator),\n path('weatherapp', views.index)\n\n]\n", "id": "1900597", "language": "Python", "matching_score": 1.3051033020019531, "max_stars_count": 0, "path": "accounts/urls.py" }, { "content": "from django.contrib import admin\nfrom django.urls import path, include\nfrom django.views.generic.base import TemplateView\nfrom accounts import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('accounts/', include('accounts.urls')),\n path('accounts/', include('django.contrib.auth.urls')),\n path('', TemplateView.as_view(template_name='home.html'), name='home'),\n path('calculator/', views.calculator, name='calculator'),\n path('weatherapp/', views.index, name='weather')\n\n]\n", "id": "7005624", "language": "Python", "matching_score": 1.630423665046692, "max_stars_count": 0, "path": "my_project/urls.py" }, { "content": "from django.apps import AppConfig\n\nclass AccountsConfig(AppConfig):\n name = 'accounts'\n\nclass WeatherConfig(AppConfig):\n name = 'weather'\n", "id": "10349844", "language": "Python", "matching_score": 0.0380246601998806, "max_stars_count": 0, "path": "accounts/apps.py" }, { "content": "from .models import City\nfrom django.forms import ModelForm, TextInput\n\n\nclass CityForm(ModelForm):\n class Meta:\n model = City\n fields = ['name']\n widgets = {'name': TextInput(attrs={\n 'class': 'form-control',\n 'name': 'city',\n 'id': 'city',\n 'placeholder': 'Enter your city'})}\n\n\n", "id": "7332319", "language": "Python", "matching_score": 1.3991281986236572, "max_stars_count": 0, "path": "accounts/forms.py" }, { "content": "from django.db import models\n\nclass City(models.Model):\n name = models.CharField(max_length=30)\n\n def __srt__(self):\n return self.name\n\n\n# Create your models here.\n", "id": "11972524", "language": "Python", "matching_score": 1.2647693157196045, "max_stars_count": 0, "path": "accounts/models.py" }, { "content": "from django.contrib import admin\nfrom .models import City\n\nadmin.site.register(City)\n\n# Register your models here.\n", "id": "6490495", "language": "Python", "matching_score": 0.04157961532473564, "max_stars_count": 0, "path": "accounts/admin.py" }, { "content": "from django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views import generic\nfrom my_project.calc_operations import calc_object\n\n\nclass SignUp(generic.CreateView):\n form_class = UserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'signup.html'\n\ndef calculator(request):\n ctx = {}\n ctx['operations'] = calc_object.keys()\n\n if request.method == 'GET':\n print(calc_object.keys())\n elif request.method == 'POST':\n try:\n first_num = float(request.POST.get('first_num').strip())\n operation = request.POST.get('operation')\n second_num = float(request.POST.get('second_num'))\n result = calc_object[operation](first_num, second_num)\n ctx['result'] = result\n except(ValueError, ZeroDivisionError, ) as e:\n ctx['msg'] = e\n return render(request, 'calculator.html', ctx)\n\n\ndef calculator_non_auth(request):\n ctx = {}\n ctx['operations'] = calc_object.keys()\n\n if request.method == 'GET':\n print(calc_object.keys())\n elif request.method == 'POST':\n try:\n first_num = float(request.POST.get('first_num').strip())\n operation = request.POST.get('operation')\n second_num = float(request.POST.get('second_num'))\n result = calc_object[operation](first_num, second_num)\n ctx['result'] = result\n except(ValueError, ZeroDivisionError, ) as e:\n ctx['msg'] = e\n return render(request, 'calculator.html', ctx)\n", "id": "10786024", "language": "Python", "matching_score": 1.7910245656967163, "max_stars_count": 0, "path": "accounts/views.py" }, { "content": "import math\n\ndef add(a, b):\n return a + b\n\ndef minus(a, b):\n return a - b\n\ndef umn(a, b):\n return a * b\n\ndef dell(a, b):\n return a / b\n\ndef expt(a, b):\n if b == 0:\n return 1\n return a*expt(a, b-1)\n\ndef sin(a, b):\n return math.sin(a)\ndef cos(a, b):\n return math.cos(a)\n\ncalc_object = {\n '+': add,\n '-': minus,\n '*': umn,\n '/': dell,\n '^': expt,\n 'sin': sin,\n 'cos': cos,\n}\n\n\ncalc_object_non_auth = {\n '+': add,\n '-': minus,\n '*': umn,\n '/': dell,\n '^': expt\n}\n\n\n\n", "id": "8049646", "language": "Python", "matching_score": 1.6545928716659546, "max_stars_count": 0, "path": "my_project/calc_operations.py" } ]
1.352116